summaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /security
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig264
-rw-r--r--security/Kconfig.hardening358
-rw-r--r--security/Makefile29
-rw-r--r--security/apparmor/.gitignore4
-rw-r--r--security/apparmor/Kconfig123
-rw-r--r--security/apparmor/Makefile113
-rw-r--r--security/apparmor/apparmorfs.c2682
-rw-r--r--security/apparmor/audit.c247
-rw-r--r--security/apparmor/capability.c158
-rw-r--r--security/apparmor/crypto.c123
-rw-r--r--security/apparmor/domain.c1458
-rw-r--r--security/apparmor/file.c711
-rw-r--r--security/apparmor/include/apparmor.h46
-rw-r--r--security/apparmor/include/apparmorfs.h134
-rw-r--r--security/apparmor/include/audit.h193
-rw-r--r--security/apparmor/include/capability.h46
-rw-r--r--security/apparmor/include/cred.h188
-rw-r--r--security/apparmor/include/crypto.h37
-rw-r--r--security/apparmor/include/domain.h39
-rw-r--r--security/apparmor/include/file.h240
-rw-r--r--security/apparmor/include/ipc.h18
-rw-r--r--security/apparmor/include/label.h467
-rw-r--r--security/apparmor/include/lib.h298
-rw-r--r--security/apparmor/include/match.h196
-rw-r--r--security/apparmor/include/mount.h50
-rw-r--r--security/apparmor/include/net.h112
-rw-r--r--security/apparmor/include/path.h32
-rw-r--r--security/apparmor/include/perms.h156
-rw-r--r--security/apparmor/include/policy.h315
-rw-r--r--security/apparmor/include/policy_ns.h165
-rw-r--r--security/apparmor/include/policy_unpack.h179
-rw-r--r--security/apparmor/include/procattr.h17
-rw-r--r--security/apparmor/include/resource.h46
-rw-r--r--security/apparmor/include/secid.h37
-rw-r--r--security/apparmor/include/sig_names.h101
-rw-r--r--security/apparmor/include/task.h98
-rw-r--r--security/apparmor/ipc.c108
-rw-r--r--security/apparmor/label.c2163
-rw-r--r--security/apparmor/lib.c542
-rw-r--r--security/apparmor/lsm.c1929
-rw-r--r--security/apparmor/match.c792
-rw-r--r--security/apparmor/mount.c740
-rw-r--r--security/apparmor/net.c257
-rw-r--r--security/apparmor/nulldfa.in107
-rw-r--r--security/apparmor/path.c217
-rw-r--r--security/apparmor/policy.c1209
-rw-r--r--security/apparmor/policy_ns.c435
-rw-r--r--security/apparmor/policy_unpack.c1237
-rw-r--r--security/apparmor/policy_unpack_test.c612
-rw-r--r--security/apparmor/procattr.c136
-rw-r--r--security/apparmor/resource.c187
-rw-r--r--security/apparmor/secid.c149
-rw-r--r--security/apparmor/stacksplitdfa.in114
-rw-r--r--security/apparmor/task.c293
-rw-r--r--security/bpf/Makefile5
-rw-r--r--security/bpf/hooks.c34
-rw-r--r--security/commoncap.c1485
-rw-r--r--security/device_cgroup.c877
-rw-r--r--security/inode.c350
-rw-r--r--security/integrity/Kconfig115
-rw-r--r--security/integrity/Makefile22
-rw-r--r--security/integrity/digsig.c221
-rw-r--r--security/integrity/digsig_asymmetric.c157
-rw-r--r--security/integrity/evm/Kconfig74
-rw-r--r--security/integrity/evm/Makefile8
-rw-r--r--security/integrity/evm/evm.h65
-rw-r--r--security/integrity/evm/evm_crypto.c428
-rw-r--r--security/integrity/evm/evm_main.c919
-rw-r--r--security/integrity/evm/evm_posix_acl.c23
-rw-r--r--security/integrity/evm/evm_secfs.c334
-rw-r--r--security/integrity/iint.c261
-rw-r--r--security/integrity/ima/Kconfig322
-rw-r--r--security/integrity/ima/Makefile20
-rw-r--r--security/integrity/ima/ima.h453
-rw-r--r--security/integrity/ima/ima_api.c455
-rw-r--r--security/integrity/ima/ima_appraise.c788
-rw-r--r--security/integrity/ima/ima_asymmetric_keys.c66
-rw-r--r--security/integrity/ima/ima_crypto.c882
-rw-r--r--security/integrity/ima/ima_efi.c75
-rw-r--r--security/integrity/ima/ima_fs.c522
-rw-r--r--security/integrity/ima/ima_init.c158
-rw-r--r--security/integrity/ima/ima_kexec.c166
-rw-r--r--security/integrity/ima/ima_main.c1100
-rw-r--r--security/integrity/ima/ima_modsig.c151
-rw-r--r--security/integrity/ima/ima_mok.c49
-rw-r--r--security/integrity/ima/ima_policy.c2307
-rw-r--r--security/integrity/ima/ima_queue.c241
-rw-r--r--security/integrity/ima/ima_queue_keys.c177
-rw-r--r--security/integrity/ima/ima_template.c536
-rw-r--r--security/integrity/ima/ima_template_lib.c746
-rw-r--r--security/integrity/ima/ima_template_lib.h69
-rw-r--r--security/integrity/integrity.h335
-rw-r--r--security/integrity/integrity_audit.c69
-rw-r--r--security/integrity/platform_certs/efi_parser.c108
-rw-r--r--security/integrity/platform_certs/keyring_handler.c85
-rw-r--r--security/integrity/platform_certs/keyring_handler.h45
-rw-r--r--security/integrity/platform_certs/load_ipl_s390.c36
-rw-r--r--security/integrity/platform_certs/load_powerpc.c96
-rw-r--r--security/integrity/platform_certs/load_uefi.c238
-rw-r--r--security/integrity/platform_certs/machine_keyring.c77
-rw-r--r--security/integrity/platform_certs/platform_keyring.c58
-rw-r--r--security/keys/Kconfig135
-rw-r--r--security/keys/Makefile32
-rw-r--r--security/keys/big_key.c290
-rw-r--r--security/keys/compat.c132
-rw-r--r--security/keys/compat_dh.c36
-rw-r--r--security/keys/dh.c333
-rw-r--r--security/keys/encrypted-keys/Makefile11
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c77
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.h27
-rw-r--r--security/keys/encrypted-keys/encrypted.c1043
-rw-r--r--security/keys/encrypted-keys/encrypted.h67
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c43
-rw-r--r--security/keys/gc.c380
-rw-r--r--security/keys/internal.h383
-rw-r--r--security/keys/key.c1215
-rw-r--r--security/keys/keyctl.c2026
-rw-r--r--security/keys/keyctl_pkey.c327
-rw-r--r--security/keys/keyring.c1794
-rw-r--r--security/keys/permission.c123
-rw-r--r--security/keys/persistent.c167
-rw-r--r--security/keys/proc.c323
-rw-r--r--security/keys/process_keys.c965
-rw-r--r--security/keys/request_key.c821
-rw-r--r--security/keys/request_key_auth.c283
-rw-r--r--security/keys/sysctl.c70
-rw-r--r--security/keys/trusted-keys/Kconfig38
-rw-r--r--security/keys/trusted-keys/Makefile16
-rw-r--r--security/keys/trusted-keys/tpm2key.asn111
-rw-r--r--security/keys/trusted-keys/trusted_caam.c80
-rw-r--r--security/keys/trusted-keys/trusted_core.c394
-rw-r--r--security/keys/trusted-keys/trusted_tee.c289
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c1074
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c550
-rw-r--r--security/keys/user_defined.c207
-rw-r--r--security/landlock/Kconfig21
-rw-r--r--security/landlock/Makefile4
-rw-r--r--security/landlock/common.h20
-rw-r--r--security/landlock/cred.c46
-rw-r--r--security/landlock/cred.h58
-rw-r--r--security/landlock/fs.c1205
-rw-r--r--security/landlock/fs.h71
-rw-r--r--security/landlock/limits.h27
-rw-r--r--security/landlock/object.c67
-rw-r--r--security/landlock/object.h91
-rw-r--r--security/landlock/ptrace.c120
-rw-r--r--security/landlock/ptrace.h14
-rw-r--r--security/landlock/ruleset.c475
-rw-r--r--security/landlock/ruleset.h180
-rw-r--r--security/landlock/setup.c40
-rw-r--r--security/landlock/setup.h18
-rw-r--r--security/landlock/syscalls.c456
-rw-r--r--security/loadpin/Kconfig41
-rw-r--r--security/loadpin/Makefile2
-rw-r--r--security/loadpin/loadpin.c435
-rw-r--r--security/lockdown/Kconfig47
-rw-r--r--security/lockdown/Makefile1
-rw-r--r--security/lockdown/lockdown.c167
-rw-r--r--security/lsm_audit.c463
-rw-r--r--security/min_addr.c53
-rw-r--r--security/safesetid/Kconfig15
-rw-r--r--security/safesetid/Makefile7
-rw-r--r--security/safesetid/lsm.c285
-rw-r--r--security/safesetid/lsm.h73
-rw-r--r--security/safesetid/securityfs.c345
-rw-r--r--security/security.c2707
-rw-r--r--security/selinux/.gitignore3
-rw-r--r--security/selinux/Kconfig117
-rw-r--r--security/selinux/Makefile34
-rw-r--r--security/selinux/avc.c1222
-rw-r--r--security/selinux/hooks.c7570
-rw-r--r--security/selinux/ibpkey.c237
-rw-r--r--security/selinux/ima.c125
-rw-r--r--security/selinux/include/audit.h60
-rw-r--r--security/selinux/include/avc.h188
-rw-r--r--security/selinux/include/avc_ss.h24
-rw-r--r--security/selinux/include/classmap.h264
-rw-r--r--security/selinux/include/conditional.h23
-rw-r--r--security/selinux/include/ibpkey.h34
-rw-r--r--security/selinux/include/ima.h30
-rw-r--r--security/selinux/include/initial_sid_to_string.h32
-rw-r--r--security/selinux/include/netif.h24
-rw-r--r--security/selinux/include/netlabel.h150
-rw-r--r--security/selinux/include/netnode.h26
-rw-r--r--security/selinux/include/netport.h25
-rw-r--r--security/selinux/include/objsec.h197
-rw-r--r--security/selinux/include/policycap.h21
-rw-r--r--security/selinux/include/policycap_names.h19
-rw-r--r--security/selinux/include/security.h467
-rw-r--r--security/selinux/include/xfrm.h94
-rw-r--r--security/selinux/netif.c280
-rw-r--r--security/selinux/netlabel.c615
-rw-r--r--security/selinux/netlink.c121
-rw-r--r--security/selinux/netnode.c305
-rw-r--r--security/selinux/netport.c238
-rw-r--r--security/selinux/nlmsgtab.c218
-rw-r--r--security/selinux/selinuxfs.c2261
-rw-r--r--security/selinux/ss/avtab.c679
-rw-r--r--security/selinux/ss/avtab.h120
-rw-r--r--security/selinux/ss/conditional.c758
-rw-r--r--security/selinux/ss/conditional.h85
-rw-r--r--security/selinux/ss/constraint.h63
-rw-r--r--security/selinux/ss/context.c32
-rw-r--r--security/selinux/ss/context.h199
-rw-r--r--security/selinux/ss/ebitmap.c564
-rw-r--r--security/selinux/ss/ebitmap.h154
-rw-r--r--security/selinux/ss/hashtab.c192
-rw-r--r--security/selinux/ss/hashtab.h148
-rw-r--r--security/selinux/ss/mls.c660
-rw-r--r--security/selinux/ss/mls.h116
-rw-r--r--security/selinux/ss/mls_types.h52
-rw-r--r--security/selinux/ss/policydb.c3731
-rw-r--r--security/selinux/ss/policydb.h391
-rw-r--r--security/selinux/ss/services.c4072
-rw-r--r--security/selinux/ss/services.h38
-rw-r--r--security/selinux/ss/sidtab.c628
-rw-r--r--security/selinux/ss/sidtab.h159
-rw-r--r--security/selinux/ss/symtab.c54
-rw-r--r--security/selinux/ss/symtab.h27
-rw-r--r--security/selinux/status.c124
-rw-r--r--security/selinux/xfrm.c473
-rw-r--r--security/smack/Kconfig55
-rw-r--r--security/smack/Makefile9
-rw-r--r--security/smack/smack.h505
-rw-r--r--security/smack/smack_access.c696
-rw-r--r--security/smack/smack_lsm.c5105
-rw-r--r--security/smack/smack_netfilter.c80
-rw-r--r--security/smack/smackfs.c3035
-rw-r--r--security/tomoyo/.gitignore3
-rw-r--r--security/tomoyo/Kconfig87
-rw-r--r--security/tomoyo/Makefile16
-rw-r--r--security/tomoyo/audit.c479
-rw-r--r--security/tomoyo/common.c2870
-rw-r--r--security/tomoyo/common.h1333
-rw-r--r--security/tomoyo/condition.c1122
-rw-r--r--security/tomoyo/domain.c945
-rw-r--r--security/tomoyo/environ.c123
-rw-r--r--security/tomoyo/file.c1045
-rw-r--r--security/tomoyo/gc.c670
-rw-r--r--security/tomoyo/group.c209
-rw-r--r--security/tomoyo/load_policy.c110
-rw-r--r--security/tomoyo/memory.c207
-rw-r--r--security/tomoyo/mount.c240
-rw-r--r--security/tomoyo/network.c777
-rw-r--r--security/tomoyo/policy/exception_policy.conf.default2
-rw-r--r--security/tomoyo/realpath.c310
-rw-r--r--security/tomoyo/securityfs_if.c273
-rw-r--r--security/tomoyo/tomoyo.c602
-rw-r--r--security/tomoyo/util.c1106
-rw-r--r--security/yama/Kconfig14
-rw-r--r--security/yama/Makefile4
-rw-r--r--security/yama/yama_lsm.c488
252 files changed, 108894 insertions, 0 deletions
diff --git a/security/Kconfig b/security/Kconfig
new file mode 100644
index 000000000..e6db09a77
--- /dev/null
+++ b/security/Kconfig
@@ -0,0 +1,264 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Security configuration
+#
+
+menu "Security options"
+
+source "security/keys/Kconfig"
+
+config SECURITY_DMESG_RESTRICT
+ bool "Restrict unprivileged access to the kernel syslog"
+ default n
+ help
+ This enforces restrictions on unprivileged users reading the kernel
+ syslog via dmesg(8).
+
+ If this option is not selected, no restrictions will be enforced
+ unless the dmesg_restrict sysctl is explicitly set to (1).
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY
+ bool "Enable different security models"
+ depends on SYSFS
+ depends on MULTIUSER
+ help
+ This allows you to choose different security modules to be
+ configured into your kernel.
+
+ If this option is not selected, the default Linux security
+ model will be used.
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_WRITABLE_HOOKS
+ depends on SECURITY
+ bool
+ default n
+
+config SECURITYFS
+ bool "Enable the securityfs filesystem"
+ help
+ This will build the securityfs filesystem. It is currently used by
+ various security modules (AppArmor, IMA, SafeSetID, TOMOYO, TPM).
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_NETWORK
+ bool "Socket and Networking Security Hooks"
+ depends on SECURITY
+ help
+ This enables the socket and networking security hooks.
+ If enabled, a security module can use these hooks to
+ implement socket and networking access controls.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_INFINIBAND
+ bool "Infiniband Security Hooks"
+ depends on SECURITY && INFINIBAND
+ help
+ This enables the Infiniband security hooks.
+ If enabled, a security module can use these hooks to
+ implement Infiniband access controls.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_NETWORK_XFRM
+ bool "XFRM (IPSec) Networking Security Hooks"
+ depends on XFRM && SECURITY_NETWORK
+ help
+ This enables the XFRM (IPSec) networking security hooks.
+ If enabled, a security module can use these hooks to
+ implement per-packet access controls based on labels
+ derived from IPSec policy. Non-IPSec communications are
+ designated as unlabelled, and only sockets authorized
+ to communicate unlabelled data can send without using
+ IPSec.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_PATH
+ bool "Security hooks for pathname based access control"
+ depends on SECURITY
+ help
+ This enables the security hooks for pathname based access control.
+ If enabled, a security module can use these hooks to
+ implement pathname based access controls.
+ If you are unsure how to answer this question, answer N.
+
+config INTEL_TXT
+ bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)"
+ depends on HAVE_INTEL_TXT
+ help
+ This option enables support for booting the kernel with the
+ Trusted Boot (tboot) module. This will utilize
+ Intel(R) Trusted Execution Technology to perform a measured launch
+ of the kernel. If the system does not support Intel(R) TXT, this
+ will have no effect.
+
+ Intel TXT will provide higher assurance of system configuration and
+ initial state as well as data reset protection. This is used to
+ create a robust initial kernel measurement and verification, which
+ helps to ensure that kernel security mechanisms are functioning
+ correctly. This level of protection requires a root of trust outside
+ of the kernel itself.
+
+ Intel TXT also helps solve real end user concerns about having
+ confidence that their hardware is running the VMM or kernel that
+ it was configured with, especially since they may be responsible for
+ providing such assurances to VMs and services running on it.
+
+ See <https://www.intel.com/technology/security/> for more information
+ about Intel(R) TXT.
+ See <http://tboot.sourceforge.net> for more information about tboot.
+ See Documentation/x86/intel_txt.rst for a description of how to enable
+ Intel TXT support in a kernel boot.
+
+ If you are unsure as to whether this is required, answer N.
+
+config LSM_MMAP_MIN_ADDR
+ int "Low address space for LSM to protect from user allocation"
+ depends on SECURITY && SECURITY_SELINUX
+ default 32768 if ARM || (ARM64 && COMPAT)
+ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+ can help reduce the impact of kernel NULL pointer bugs.
+
+ For most ia64, ppc64 and x86 users with lots of address space
+ a value of 65536 is reasonable and should cause no problems.
+ On arm and other archs it should not be higher than 32768.
+ Programs which use vm86 functionality or have some need to map
+ this low address space will need the permission specific to the
+ systems running LSM.
+
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+ bool
+ help
+ The heap allocator implements __check_heap_object() for
+ validating memory ranges against heap object sizes in
+ support of CONFIG_HARDENED_USERCOPY.
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+ imply STRICT_DEVMEM
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocated pages, are not on the process stack,
+ or are part of the kernel text. This prevents entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ # https://bugs.llvm.org/show_bug.cgi?id=41459
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120001
+ # https://github.com/llvm/llvm-project/issues/53645
+ depends on !CC_IS_CLANG || !X86_32
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
+config STATIC_USERMODEHELPER
+ bool "Force all usermode helper calls through a single binary"
+ help
+ By default, the kernel can call many different userspace
+ binary programs through the "usermode helper" kernel
+ interface. Some of these binaries are statically defined
+ either in the kernel code itself, or as a kernel configuration
+ option. However, some of these are dynamically created at
+ runtime, or can be modified after the kernel has started up.
+ To provide an additional layer of security, route all of these
+ calls through a single executable that can not have its name
+ changed.
+
+ Note, it is up to this single binary to then call the relevant
+ "real" usermode helper binary, based on the first argument
+ passed to it. If desired, this program can filter and pick
+ and choose what real programs are called.
+
+ If you wish for all usermode helper programs are to be
+ disabled, choose this option and then set
+ STATIC_USERMODEHELPER_PATH to an empty string.
+
+config STATIC_USERMODEHELPER_PATH
+ string "Path to the static usermode helper binary"
+ depends on STATIC_USERMODEHELPER
+ default "/sbin/usermode-helper"
+ help
+ The binary called by the kernel when any usermode helper
+ program is wish to be run. The "real" application's name will
+ be in the first argument passed to this program on the command
+ line.
+
+ If you wish for all usermode helper programs to be disabled,
+ specify an empty string here (i.e. "").
+
+source "security/selinux/Kconfig"
+source "security/smack/Kconfig"
+source "security/tomoyo/Kconfig"
+source "security/apparmor/Kconfig"
+source "security/loadpin/Kconfig"
+source "security/yama/Kconfig"
+source "security/safesetid/Kconfig"
+source "security/lockdown/Kconfig"
+source "security/landlock/Kconfig"
+
+source "security/integrity/Kconfig"
+
+choice
+ prompt "First legacy 'major LSM' to be initialized"
+ default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+ default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+ default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+ default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+ default DEFAULT_SECURITY_DAC
+
+ help
+ This choice is there only for converting CONFIG_DEFAULT_SECURITY
+ in old kernel configs to CONFIG_LSM in new kernel configs. Don't
+ change this choice unless you are creating a fresh kernel config,
+ for this choice will be ignored after CONFIG_LSM has been set.
+
+ Selects the legacy "major security module" that will be
+ initialized first. Overridden by non-default CONFIG_LSM.
+
+ config DEFAULT_SECURITY_SELINUX
+ bool "SELinux" if SECURITY_SELINUX=y
+
+ config DEFAULT_SECURITY_SMACK
+ bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+ config DEFAULT_SECURITY_TOMOYO
+ bool "TOMOYO" if SECURITY_TOMOYO=y
+
+ config DEFAULT_SECURITY_APPARMOR
+ bool "AppArmor" if SECURITY_APPARMOR=y
+
+ config DEFAULT_SECURITY_DAC
+ bool "Unix Discretionary Access Controls"
+
+endchoice
+
+config LSM
+ string "Ordered list of enabled LSMs"
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor,bpf" if DEFAULT_SECURITY_SMACK
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo,bpf" if DEFAULT_SECURITY_APPARMOR
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,tomoyo,bpf" if DEFAULT_SECURITY_TOMOYO
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" if DEFAULT_SECURITY_DAC
+ default "landlock,lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf"
+ help
+ A comma-separated list of LSMs, in initialization order.
+ Any LSMs left off this list will be ignored. This can be
+ controlled at boot with the "lsm=" parameter.
+
+ If unsure, leave this as the default.
+
+source "security/Kconfig.hardening"
+
+endmenu
+
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
new file mode 100644
index 000000000..0f295961e
--- /dev/null
+++ b/security/Kconfig.hardening
@@ -0,0 +1,358 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Kernel hardening options"
+
+config GCC_PLUGIN_STRUCTLEAK
+ bool
+ help
+ While the kernel is built with warnings enabled for any missed
+ stack variable initializations, this warning is silenced for
+ anything passed by reference to another function, under the
+ occasionally misguided assumption that the function will do
+ the initialization. As this regularly leads to exploitable
+ flaws, this plugin is available to identify and zero-initialize
+ such variables, depending on the chosen level of coverage.
+
+ This plugin was originally ported from grsecurity/PaX. More
+ information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+menu "Memory initialization"
+
+config CC_HAS_AUTO_VAR_INIT_PATTERN
+ def_bool $(cc-option,-ftrivial-auto-var-init=pattern)
+
+config CC_HAS_AUTO_VAR_INIT_ZERO_BARE
+ def_bool $(cc-option,-ftrivial-auto-var-init=zero)
+
+config CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
+ # Clang 16 and later warn about using the -enable flag, but it
+ # is required before then.
+ def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang)
+ depends on !CC_HAS_AUTO_VAR_INIT_ZERO_BARE
+
+config CC_HAS_AUTO_VAR_INIT_ZERO
+ def_bool CC_HAS_AUTO_VAR_INIT_ZERO_BARE || CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
+
+choice
+ prompt "Initialize kernel stack variables at function entry"
+ default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS
+ default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN
+ default INIT_STACK_ALL_ZERO if CC_HAS_AUTO_VAR_INIT_ZERO
+ default INIT_STACK_NONE
+ help
+ This option enables initialization of stack variables at
+ function entry time. This has the possibility to have the
+ greatest coverage (since all functions can have their
+ variables initialized), but the performance impact depends
+ on the function calling complexity of a given workload's
+ syscalls.
+
+ This chooses the level of coverage over classes of potentially
+ uninitialized variables. The selected class of variable will be
+ initialized before use in a function.
+
+ config INIT_STACK_NONE
+ bool "no automatic stack variable initialization (weakest)"
+ help
+ Disable automatic stack variable initialization.
+ This leaves the kernel vulnerable to the standard
+ classes of uninitialized stack variable exploits
+ and information exposures.
+
+ config GCC_PLUGIN_STRUCTLEAK_USER
+ bool "zero-init structs marked for userspace (weak)"
+ # Plugin can be removed once the kernel only supports GCC 12+
+ depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO
+ select GCC_PLUGIN_STRUCTLEAK
+ help
+ Zero-initialize any structures on the stack containing
+ a __user attribute. This can prevent some classes of
+ uninitialized stack variable exploits and information
+ exposures, like CVE-2013-2141:
+ https://git.kernel.org/linus/b9e146d8eb3b9eca
+
+ config GCC_PLUGIN_STRUCTLEAK_BYREF
+ bool "zero-init structs passed by reference (strong)"
+ # Plugin can be removed once the kernel only supports GCC 12+
+ depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO
+ depends on !(KASAN && KASAN_STACK)
+ select GCC_PLUGIN_STRUCTLEAK
+ help
+ Zero-initialize any structures on the stack that may
+ be passed by reference and had not already been
+ explicitly initialized. This can prevent most classes
+ of uninitialized stack variable exploits and information
+ exposures, like CVE-2017-1000410:
+ https://git.kernel.org/linus/06e7e776ca4d3654
+
+ As a side-effect, this keeps a lot of variables on the
+ stack that can otherwise be optimized out, so combining
+ this with CONFIG_KASAN_STACK can lead to a stack overflow
+ and is disallowed.
+
+ config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
+ bool "zero-init everything passed by reference (very strong)"
+ # Plugin can be removed once the kernel only supports GCC 12+
+ depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO
+ depends on !(KASAN && KASAN_STACK)
+ select GCC_PLUGIN_STRUCTLEAK
+ help
+ Zero-initialize any stack variables that may be passed
+ by reference and had not already been explicitly
+ initialized. This is intended to eliminate all classes
+ of uninitialized stack variable exploits and information
+ exposures.
+
+ As a side-effect, this keeps a lot of variables on the
+ stack that can otherwise be optimized out, so combining
+ this with CONFIG_KASAN_STACK can lead to a stack overflow
+ and is disallowed.
+
+ config INIT_STACK_ALL_PATTERN
+ bool "pattern-init everything (strongest)"
+ depends on CC_HAS_AUTO_VAR_INIT_PATTERN
+ depends on !KMSAN
+ help
+ Initializes everything on the stack (including padding)
+ with a specific debug value. This is intended to eliminate
+ all classes of uninitialized stack variable exploits and
+ information exposures, even variables that were warned about
+ having been left uninitialized.
+
+ Pattern initialization is known to provoke many existing bugs
+ related to uninitialized locals, e.g. pointers receive
+ non-NULL values, buffer sizes and indices are very big. The
+ pattern is situation-specific; Clang on 64-bit uses 0xAA
+ repeating for all types and padding except float and double
+ which use 0xFF repeating (-NaN). Clang on 32-bit uses 0xFF
+ repeating for all types and padding.
+
+ config INIT_STACK_ALL_ZERO
+ bool "zero-init everything (strongest and safest)"
+ depends on CC_HAS_AUTO_VAR_INIT_ZERO
+ depends on !KMSAN
+ help
+ Initializes everything on the stack (including padding)
+ with a zero value. This is intended to eliminate all
+ classes of uninitialized stack variable exploits and
+ information exposures, even variables that were warned
+ about having been left uninitialized.
+
+ Zero initialization provides safe defaults for strings
+ (immediately NUL-terminated), pointers (NULL), indices
+ (index 0), and sizes (0 length), so it is therefore more
+ suitable as a production security mitigation than pattern
+ initialization.
+
+endchoice
+
+config GCC_PLUGIN_STRUCTLEAK_VERBOSE
+ bool "Report forcefully initialized variables"
+ depends on GCC_PLUGIN_STRUCTLEAK
+ depends on !COMPILE_TEST # too noisy
+ help
+ This option will cause a warning to be printed each time the
+ structleak plugin finds a variable it thinks needs to be
+ initialized. Since not all existing initializers are detected
+ by the plugin, this can produce false positive warnings.
+
+config GCC_PLUGIN_STACKLEAK
+ bool "Poison kernel stack before returning from syscalls"
+ depends on GCC_PLUGINS
+ depends on HAVE_ARCH_STACKLEAK
+ help
+ This option makes the kernel erase the kernel stack before
+ returning from system calls. This has the effect of leaving
+ the stack initialized to the poison value, which both reduces
+ the lifetime of any sensitive stack contents and reduces
+ potential for uninitialized stack variable exploits or information
+ exposures (it does not cover functions reaching the same stack
+ depth as prior functions during the same syscall). This blocks
+ most uninitialized stack variable attacks, with the performance
+ impact being driven by the depth of the stack usage, rather than
+ the function calling complexity.
+
+ The performance impact on a single CPU system kernel compilation
+ sees a 1% slowdown, other systems and workloads may vary and you
+ are advised to test this feature on your expected workload before
+ deploying it.
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+config GCC_PLUGIN_STACKLEAK_VERBOSE
+ bool "Report stack depth analysis instrumentation" if EXPERT
+ depends on GCC_PLUGIN_STACKLEAK
+ depends on !COMPILE_TEST # too noisy
+ help
+ This option will cause a warning to be printed each time the
+ stackleak plugin finds a function it thinks needs to be
+ instrumented. This is useful for comparing coverage between
+ builds.
+
+config STACKLEAK_TRACK_MIN_SIZE
+ int "Minimum stack frame size of functions tracked by STACKLEAK"
+ default 100
+ range 0 4096
+ depends on GCC_PLUGIN_STACKLEAK
+ help
+ The STACKLEAK gcc plugin instruments the kernel code for tracking
+ the lowest border of the kernel stack (and for some other purposes).
+ It inserts the stackleak_track_stack() call for the functions with
+ a stack frame size greater than or equal to this parameter.
+ If unsure, leave the default value 100.
+
+config STACKLEAK_METRICS
+ bool "Show STACKLEAK metrics in the /proc file system"
+ depends on GCC_PLUGIN_STACKLEAK
+ depends on PROC_FS
+ help
+ If this is set, STACKLEAK metrics for every task are available in
+ the /proc file system. In particular, /proc/<pid>/stack_depth
+ shows the maximum kernel stack consumption for the current and
+ previous syscalls. Although this information is not precise, it
+ can be useful for estimating the STACKLEAK performance impact for
+ your workloads.
+
+config STACKLEAK_RUNTIME_DISABLE
+ bool "Allow runtime disabling of kernel stack erasing"
+ depends on GCC_PLUGIN_STACKLEAK
+ help
+ This option provides 'stack_erasing' sysctl, which can be used in
+ runtime to control kernel stack erasing for kernels built with
+ CONFIG_GCC_PLUGIN_STACKLEAK.
+
+config INIT_ON_ALLOC_DEFAULT_ON
+ bool "Enable heap memory zeroing on allocation by default"
+ depends on !KMSAN
+ help
+ This has the effect of setting "init_on_alloc=1" on the kernel
+ command line. This can be disabled with "init_on_alloc=0".
+ When "init_on_alloc" is enabled, all page allocator and slab
+ allocator memory will be zeroed when allocated, eliminating
+ many kinds of "uninitialized heap memory" flaws, especially
+ heap content exposures. The performance impact varies by
+ workload, but most cases see <1% impact. Some synthetic
+ workloads have measured as high as 7%.
+
+config INIT_ON_FREE_DEFAULT_ON
+ bool "Enable heap memory zeroing on free by default"
+ depends on !KMSAN
+ help
+ This has the effect of setting "init_on_free=1" on the kernel
+ command line. This can be disabled with "init_on_free=0".
+ Similar to "init_on_alloc", when "init_on_free" is enabled,
+ all page allocator and slab allocator memory will be zeroed
+ when freed, eliminating many kinds of "uninitialized heap memory"
+ flaws, especially heap content exposures. The primary difference
+ with "init_on_free" is that data lifetime in memory is reduced,
+ as anything freed is wiped immediately, making live forensics or
+ cold boot memory attacks unable to recover freed memory contents.
+ The performance impact varies by workload, but is more expensive
+ than "init_on_alloc" due to the negative cache effects of
+ touching "cold" memory areas. Most cases see 3-5% impact. Some
+ synthetic workloads have measured as high as 8%.
+
+config CC_HAS_ZERO_CALL_USED_REGS
+ def_bool $(cc-option,-fzero-call-used-regs=used-gpr)
+ # https://github.com/ClangBuiltLinux/linux/issues/1766
+ # https://github.com/llvm/llvm-project/issues/59242
+ depends on !CC_IS_CLANG || CLANG_VERSION > 150006
+
+config ZERO_CALL_USED_REGS
+ bool "Enable register zeroing on function exit"
+ depends on CC_HAS_ZERO_CALL_USED_REGS
+ help
+ At the end of functions, always zero any caller-used register
+ contents. This helps ensure that temporary values are not
+ leaked beyond the function boundary. This means that register
+ contents are less likely to be available for side channels
+ and information exposures. Additionally, this helps reduce the
+ number of useful ROP gadgets by about 20% (and removes compiler
+ generated "write-what-where" gadgets) in the resulting kernel
+ image. This has a less than 1% performance impact on most
+ workloads. Image size growth depends on architecture, and should
+ be evaluated for suitability. For example, x86_64 grows by less
+ than 1%, and arm64 grows by about 5%.
+
+endmenu
+
+config CC_HAS_RANDSTRUCT
+ def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
+ # Randstruct was first added in Clang 15, but it isn't safe to use until
+ # Clang 16 due to https://github.com/llvm/llvm-project/issues/60349
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 160000
+
+choice
+ prompt "Randomize layout of sensitive kernel structures"
+ default RANDSTRUCT_FULL if COMPILE_TEST && (GCC_PLUGINS || CC_HAS_RANDSTRUCT)
+ default RANDSTRUCT_NONE
+ help
+ If you enable this, the layouts of structures that are entirely
+ function pointers (and have not been manually annotated with
+ __no_randomize_layout), or structures that have been explicitly
+ marked with __randomize_layout, will be randomized at compile-time.
+ This can introduce the requirement of an additional information
+ exposure vulnerability for exploits targeting these structure
+ types.
+
+ Enabling this feature will introduce some performance impact,
+ slightly increase memory usage, and prevent the use of forensic
+ tools like Volatility against the system (unless the kernel
+ source tree isn't cleaned after kernel installation).
+
+ The seed used for compilation is in scripts/basic/randomize.seed.
+ It remains after a "make clean" to allow for external modules to
+ be compiled with the existing seed and will be removed by a
+ "make mrproper" or "make distclean". This file should not be made
+ public, or the structure layout can be determined.
+
+ config RANDSTRUCT_NONE
+ bool "Disable structure layout randomization"
+ help
+ Build normally: no structure layout randomization.
+
+ config RANDSTRUCT_FULL
+ bool "Fully randomize structure layout"
+ depends on CC_HAS_RANDSTRUCT || GCC_PLUGINS
+ select MODVERSIONS if MODULES
+ help
+ Fully randomize the member layout of sensitive
+ structures as much as possible, which may have both a
+ memory size and performance impact.
+
+ One difference between the Clang and GCC plugin
+ implementations is the handling of bitfields. The GCC
+ plugin treats them as fully separate variables,
+ introducing sometimes significant padding. Clang tries
+ to keep adjacent bitfields together, but with their bit
+ ordering randomized.
+
+ config RANDSTRUCT_PERFORMANCE
+ bool "Limit randomization of structure layout to cache-lines"
+ depends on GCC_PLUGINS
+ select MODVERSIONS if MODULES
+ help
+ Randomization of sensitive kernel structures will make a
+ best effort at restricting randomization to cacheline-sized
+ groups of members. It will further not randomize bitfields
+ in structures. This reduces the performance hit of RANDSTRUCT
+ at the cost of weakened randomization.
+endchoice
+
+config RANDSTRUCT
+ def_bool !RANDSTRUCT_NONE
+
+config GCC_PLUGIN_RANDSTRUCT
+ def_bool GCC_PLUGINS && RANDSTRUCT
+ help
+ Use GCC plugin to randomize structure layout.
+
+ This plugin was ported from grsecurity/PaX. More
+ information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+endmenu
diff --git a/security/Makefile b/security/Makefile
new file mode 100644
index 000000000..18121f8f8
--- /dev/null
+++ b/security/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel security code
+#
+
+obj-$(CONFIG_KEYS) += keys/
+
+# always enable default capabilities
+obj-y += commoncap.o
+obj-$(CONFIG_MMU) += min_addr.o
+
+# Object file lists
+obj-$(CONFIG_SECURITY) += security.o
+obj-$(CONFIG_SECURITYFS) += inode.o
+obj-$(CONFIG_SECURITY_SELINUX) += selinux/
+obj-$(CONFIG_SECURITY_SMACK) += smack/
+obj-$(CONFIG_SECURITY) += lsm_audit.o
+obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
+obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
+obj-$(CONFIG_SECURITY_YAMA) += yama/
+obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
+obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/
+obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/
+obj-$(CONFIG_CGROUPS) += device_cgroup.o
+obj-$(CONFIG_BPF_LSM) += bpf/
+obj-$(CONFIG_SECURITY_LANDLOCK) += landlock/
+
+# Object integrity file lists
+obj-$(CONFIG_INTEGRITY) += integrity/
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
new file mode 100644
index 000000000..6d1eb1c15
--- /dev/null
+++ b/security/apparmor/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+net_names.h
+capability_names.h
+rlim_names.h
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
new file mode 100644
index 000000000..f334e7ccc
--- /dev/null
+++ b/security/apparmor/Kconfig
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_APPARMOR
+ bool "AppArmor support"
+ depends on SECURITY && NET
+ select AUDIT
+ select SECURITY_PATH
+ select SECURITYFS
+ select SECURITY_NETWORK
+ default n
+ help
+ This enables the AppArmor security module.
+ Required userspace tools (if they are not included in your
+ distribution) and further information may be found at
+ http://apparmor.wiki.kernel.org
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_APPARMOR_DEBUG
+ bool "Build AppArmor with debug code"
+ depends on SECURITY_APPARMOR
+ default n
+ help
+ Build apparmor with debugging logic in apparmor. Not all
+ debugging logic will necessarily be enabled. A submenu will
+ provide fine grained control of the debug options that are
+ available.
+
+config SECURITY_APPARMOR_DEBUG_ASSERTS
+ bool "Build AppArmor with debugging asserts"
+ depends on SECURITY_APPARMOR_DEBUG
+ default y
+ help
+ Enable code assertions made with AA_BUG. These are primarily
+ function entry preconditions but also exist at other key
+ points. If the assert is triggered it will trigger a WARN
+ message.
+
+config SECURITY_APPARMOR_DEBUG_MESSAGES
+ bool "Debug messages enabled by default"
+ depends on SECURITY_APPARMOR_DEBUG
+ default n
+ help
+ Set the default value of the apparmor.debug kernel parameter.
+ When enabled, various debug messages will be logged to
+ the kernel message buffer.
+
+config SECURITY_APPARMOR_INTROSPECT_POLICY
+ bool "Allow loaded policy to be introspected"
+ depends on SECURITY_APPARMOR
+ default y
+ help
+ This option selects whether introspection of loaded policy
+ is available to userspace via the apparmor filesystem. This
+ adds to kernel memory usage. It is required for introspection
+ of loaded policy, and check point and restore support. It
+ can be disabled for embedded systems where reducing memory and
+ cpu is paramount.
+
+config SECURITY_APPARMOR_HASH
+ bool "Enable introspection of sha1 hashes for loaded profiles"
+ depends on SECURITY_APPARMOR_INTROSPECT_POLICY
+ select CRYPTO
+ select CRYPTO_SHA1
+ default y
+ help
+ This option selects whether introspection of loaded policy
+ hashes is available to userspace via the apparmor
+ filesystem. This option provides a light weight means of
+ checking loaded policy. This option adds to policy load
+ time and can be disabled for small embedded systems.
+
+config SECURITY_APPARMOR_HASH_DEFAULT
+ bool "Enable policy hash introspection by default"
+ depends on SECURITY_APPARMOR_HASH
+ default y
+ help
+ This option selects whether sha1 hashing of loaded policy
+ is enabled by default. The generation of sha1 hashes for
+ loaded policy provide system administrators a quick way
+ to verify that policy in the kernel matches what is expected,
+ however it can slow down policy load on some devices. In
+ these cases policy hashing can be disabled by default and
+ enabled only if needed.
+
+config SECURITY_APPARMOR_EXPORT_BINARY
+ bool "Allow exporting the raw binary policy"
+ depends on SECURITY_APPARMOR_INTROSPECT_POLICY
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ default y
+ help
+ This option allows reading back binary policy as it was loaded.
+ It increases the amount of kernel memory needed by policy and
+ also increases policy load time. This option is required for
+ checkpoint and restore support, and debugging of loaded policy.
+
+config SECURITY_APPARMOR_PARANOID_LOAD
+ bool "Perform full verification of loaded policy"
+ depends on SECURITY_APPARMOR
+ default y
+ help
+ This options allows controlling whether apparmor does a full
+ verification of loaded policy. This should not be disabled
+ except for embedded systems where the image is read only,
+ includes policy, and has some form of integrity check.
+ Disabling the check will speed up policy loads.
+
+config SECURITY_APPARMOR_KUNIT_TEST
+ tristate "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
+ depends on KUNIT && SECURITY_APPARMOR
+ default KUNIT_ALL_TESTS
+ help
+ This builds the AppArmor KUnit tests.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running KUnit test harness and are not for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
new file mode 100644
index 000000000..065f4e346
--- /dev/null
+++ b/security/apparmor/Makefile
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for AppArmor Linux Security Module
+#
+obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
+
+apparmor-y := apparmorfs.o audit.o capability.o task.o ipc.o lib.o match.o \
+ path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
+ resource.o secid.o file.o policy_ns.o label.o mount.o net.o
+apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
+
+obj-$(CONFIG_SECURITY_APPARMOR_KUNIT_TEST) += apparmor_policy_unpack_test.o
+apparmor_policy_unpack_test-objs += policy_unpack_test.o
+
+clean-files := capability_names.h rlim_names.h net_names.h
+
+# Build a lower case string table of address family names
+# Transform lines from
+# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
+# #define AF_INET 2 /* Internet IP Protocol */
+# to
+# [1] = "local",
+# [2] = "inet",
+#
+# and build the securityfs entries for the mapping.
+# Transforms lines from
+# #define AF_INET 2 /* Internet IP Protocol */
+# to
+# #define AA_SFS_AF_MASK "local inet"
+quiet_cmd_make-af = GEN $@
+cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
+ sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
+ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
+ echo "};" >> $@ ;\
+ printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\
+ sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
+ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\
+ $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+# Build a lower case string table of sock type names
+# Transform lines from
+# SOCK_STREAM = 1,
+# to
+# [1] = "stream",
+quiet_cmd_make-sock = GEN $@
+cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
+ sed $^ >>$@ -r -n \
+ -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
+ echo "};" >> $@
+
+# Build a lower case string table of capability names
+# Transforms lines from
+# #define CAP_DAC_OVERRIDE 1
+# to
+# [1] = "dac_override",
+quiet_cmd_make-caps = GEN $@
+cmd_make-caps = echo "static const char *const capability_names[] = {" > $@ ;\
+ sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \
+ -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
+ echo "};" >> $@ ;\
+ printf '%s' '\#define AA_SFS_CAPS_MASK "' >> $@ ;\
+ sed $< -r -n -e '/CAP_FS_MASK/d' \
+ -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \
+ tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+
+# Build a lower case string table of rlimit names.
+# Transforms lines from
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# [RLIMIT_STACK] = "stack",
+#
+# and build a second integer table (with the second sed cmd), that maps
+# RLIMIT defines to the order defined in asm-generic/resource.h This is
+# required by policy load to map policy ordering of RLIMITs to internal
+# ordering for architectures that redefine an RLIMIT.
+# Transforms lines from
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# RLIMIT_STACK,
+#
+# and build the securityfs entries for the mapping.
+# Transforms lines from
+# #define RLIMIT_FSIZE 1 /* Maximum filesize */
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# #define AA_SFS_RLIMIT_MASK "fsize stack"
+quiet_cmd_make-rlim = GEN $@
+cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
+ > $@ ;\
+ sed $< >> $@ -r -n \
+ -e 's/^\# ?define[ \t]+(RLIMIT_([A-Z0-9_]+)).*/[\1] = "\L\2",/p';\
+ echo "};" >> $@ ;\
+ echo "static const int rlim_map[RLIM_NLIMITS] = {" >> $@ ;\
+ sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
+ echo "};" >> $@ ; \
+ printf '%s' '\#define AA_SFS_RLIMIT_MASK "' >> $@ ;\
+ sed -r -n 's/^\# ?define[ \t]+RLIMIT_([A-Z0-9_]+).*/\L\1/p' $< | \
+ tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+$(obj)/capability.o : $(obj)/capability_names.h
+$(obj)/net.o : $(obj)/net_names.h
+$(obj)/resource.o : $(obj)/rlim_names.h
+$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
+ $(src)/Makefile
+ $(call cmd,make-caps)
+$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
+ $(src)/Makefile
+ $(call cmd,make-rlim)
+$(obj)/net_names.h : $(srctree)/include/linux/socket.h \
+ $(srctree)/include/linux/net.h \
+ $(src)/Makefile
+ $(call cmd,make-af)
+ $(call cmd,make-sock)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
new file mode 100644
index 000000000..7160e7aa5
--- /dev/null
+++ b/security/apparmor/apparmorfs.c
@@ -0,0 +1,2682 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /sys/kernel/security/apparmor interface functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/security.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/capability.h>
+#include <linux/rcupdate.h>
+#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/poll.h>
+#include <linux/zlib.h>
+#include <uapi/linux/major.h>
+#include <uapi/linux/magic.h>
+
+#include "include/apparmor.h"
+#include "include/apparmorfs.h"
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/crypto.h"
+#include "include/ipc.h"
+#include "include/label.h"
+#include "include/policy.h"
+#include "include/policy_ns.h"
+#include "include/resource.h"
+#include "include/policy_unpack.h"
+#include "include/task.h"
+
+/*
+ * The apparmor filesystem interface used for policy load and introspection
+ * The interface is split into two main components based on their function
+ * a securityfs component:
+ * used for static files that are always available, and which allows
+ * userspace to specificy the location of the security filesystem.
+ *
+ * fns and data are prefixed with
+ * aa_sfs_
+ *
+ * an apparmorfs component:
+ * used loaded policy content and introspection. It is not part of a
+ * regular mounted filesystem and is available only through the magic
+ * policy symlink in the root of the securityfs apparmor/ directory.
+ * Tasks queries will be magically redirected to the correct portion
+ * of the policy tree based on their confinement.
+ *
+ * fns and data are prefixed with
+ * aafs_
+ *
+ * The aa_fs_ prefix is used to indicate the fn is used by both the
+ * securityfs and apparmorfs filesystems.
+ */
+
+
+/*
+ * support fns
+ */
+
+struct rawdata_f_data {
+ struct aa_loaddata *loaddata;
+};
+
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+#define RAWDATA_F_DATA_BUF(p) (char *)(p + 1)
+
+static void rawdata_f_data_free(struct rawdata_f_data *private)
+{
+ if (!private)
+ return;
+
+ aa_put_loaddata(private->loaddata);
+ kvfree(private);
+}
+
+static struct rawdata_f_data *rawdata_f_data_alloc(size_t size)
+{
+ struct rawdata_f_data *ret;
+
+ if (size > SIZE_MAX - sizeof(*ret))
+ return ERR_PTR(-EINVAL);
+
+ ret = kvzalloc(sizeof(*ret) + size, GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+#endif
+
+/**
+ * mangle_name - mangle a profile name to std profile layout form
+ * @name: profile name to mangle (NOT NULL)
+ * @target: buffer to store mangled name, same length as @name (MAYBE NULL)
+ *
+ * Returns: length of mangled name
+ */
+static int mangle_name(const char *name, char *target)
+{
+ char *t = target;
+
+ while (*name == '/' || *name == '.')
+ name++;
+
+ if (target) {
+ for (; *name; name++) {
+ if (*name == '/')
+ *(t)++ = '.';
+ else if (isspace(*name))
+ *(t)++ = '_';
+ else if (isalnum(*name) || strchr("._-", *name))
+ *(t)++ = *name;
+ }
+
+ *t = 0;
+ } else {
+ int len = 0;
+ for (; *name; name++) {
+ if (isalnum(*name) || isspace(*name) ||
+ strchr("/._-", *name))
+ len++;
+ }
+
+ return len;
+ }
+
+ return t - target;
+}
+
+
+/*
+ * aafs - core fns and data for the policy tree
+ */
+
+#define AAFS_NAME "apparmorfs"
+static struct vfsmount *aafs_mnt;
+static int aafs_count;
+
+
+static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
+{
+ seq_printf(seq, "%s:[%lu]", AAFS_NAME, d_inode(dentry)->i_ino);
+ return 0;
+}
+
+static void aafs_free_inode(struct inode *inode)
+{
+ if (S_ISLNK(inode->i_mode))
+ kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
+static const struct super_operations aafs_super_ops = {
+ .statfs = simple_statfs,
+ .free_inode = aafs_free_inode,
+ .show_path = aafs_show_path,
+};
+
+static int apparmorfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ static struct tree_descr files[] = { {""} };
+ int error;
+
+ error = simple_fill_super(sb, AAFS_MAGIC, files);
+ if (error)
+ return error;
+ sb->s_op = &aafs_super_ops;
+
+ return 0;
+}
+
+static int apparmorfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, apparmorfs_fill_super);
+}
+
+static const struct fs_context_operations apparmorfs_context_ops = {
+ .get_tree = apparmorfs_get_tree,
+};
+
+static int apparmorfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &apparmorfs_context_ops;
+ return 0;
+}
+
+static struct file_system_type aafs_ops = {
+ .owner = THIS_MODULE,
+ .name = AAFS_NAME,
+ .init_fs_context = apparmorfs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+/**
+ * __aafs_setup_d_inode - basic inode setup for apparmorfs
+ * @dir: parent directory for the dentry
+ * @dentry: dentry we are seting the inode up for
+ * @mode: permissions the file should have
+ * @data: data to store on inode.i_private, available in open()
+ * @link: if symlink, symlink target string
+ * @fops: struct file_operations that should be used
+ * @iops: struct of inode_operations that should be used
+ */
+static int __aafs_setup_d_inode(struct inode *dir, struct dentry *dentry,
+ umode_t mode, void *data, char *link,
+ const struct file_operations *fops,
+ const struct inode_operations *iops)
+{
+ struct inode *inode = new_inode(dir->i_sb);
+
+ AA_BUG(!dir);
+ AA_BUG(!dentry);
+
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_private = data;
+ if (S_ISDIR(mode)) {
+ inode->i_op = iops ? iops : &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ } else if (S_ISLNK(mode)) {
+ inode->i_op = iops ? iops : &simple_symlink_inode_operations;
+ inode->i_link = link;
+ } else {
+ inode->i_fop = fops;
+ }
+ d_instantiate(dentry, inode);
+ dget(dentry);
+
+ return 0;
+}
+
+/**
+ * aafs_create - create a dentry in the apparmorfs filesystem
+ *
+ * @name: name of dentry to create
+ * @mode: permissions the file should have
+ * @parent: parent directory for this dentry
+ * @data: data to store on inode.i_private, available in open()
+ * @link: if symlink, symlink target string
+ * @fops: struct file_operations that should be used for
+ * @iops: struct of inode_operations that should be used
+ *
+ * This is the basic "create a xxx" function for apparmorfs.
+ *
+ * Returns a pointer to a dentry if it succeeds, that must be free with
+ * aafs_remove(). Will return ERR_PTR on failure.
+ */
+static struct dentry *aafs_create(const char *name, umode_t mode,
+ struct dentry *parent, void *data, void *link,
+ const struct file_operations *fops,
+ const struct inode_operations *iops)
+{
+ struct dentry *dentry;
+ struct inode *dir;
+ int error;
+
+ AA_BUG(!name);
+ AA_BUG(!parent);
+
+ if (!(mode & S_IFMT))
+ mode = (mode & S_IALLUGO) | S_IFREG;
+
+ error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
+ if (error)
+ return ERR_PTR(error);
+
+ dir = d_inode(parent);
+
+ inode_lock(dir);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry)) {
+ error = PTR_ERR(dentry);
+ goto fail_lock;
+ }
+
+ if (d_really_is_positive(dentry)) {
+ error = -EEXIST;
+ goto fail_dentry;
+ }
+
+ error = __aafs_setup_d_inode(dir, dentry, mode, data, link, fops, iops);
+ if (error)
+ goto fail_dentry;
+ inode_unlock(dir);
+
+ return dentry;
+
+fail_dentry:
+ dput(dentry);
+
+fail_lock:
+ inode_unlock(dir);
+ simple_release_fs(&aafs_mnt, &aafs_count);
+
+ return ERR_PTR(error);
+}
+
+/**
+ * aafs_create_file - create a file in the apparmorfs filesystem
+ *
+ * @name: name of dentry to create
+ * @mode: permissions the file should have
+ * @parent: parent directory for this dentry
+ * @data: data to store on inode.i_private, available in open()
+ * @fops: struct file_operations that should be used for
+ *
+ * see aafs_create
+ */
+static struct dentry *aafs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops)
+{
+ return aafs_create(name, mode, parent, data, NULL, fops, NULL);
+}
+
+/**
+ * aafs_create_dir - create a directory in the apparmorfs filesystem
+ *
+ * @name: name of dentry to create
+ * @parent: parent directory for this dentry
+ *
+ * see aafs_create
+ */
+static struct dentry *aafs_create_dir(const char *name, struct dentry *parent)
+{
+ return aafs_create(name, S_IFDIR | 0755, parent, NULL, NULL, NULL,
+ NULL);
+}
+
+/**
+ * aafs_remove - removes a file or directory from the apparmorfs filesystem
+ *
+ * @dentry: dentry of the file/directory/symlink to removed.
+ */
+static void aafs_remove(struct dentry *dentry)
+{
+ struct inode *dir;
+
+ if (!dentry || IS_ERR(dentry))
+ return;
+
+ dir = d_inode(dentry->d_parent);
+ inode_lock(dir);
+ if (simple_positive(dentry)) {
+ if (d_is_dir(dentry))
+ simple_rmdir(dir, dentry);
+ else
+ simple_unlink(dir, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(dir);
+ simple_release_fs(&aafs_mnt, &aafs_count);
+}
+
+
+/*
+ * aa_fs - policy load/replace/remove
+ */
+
+/**
+ * aa_simple_write_to_buffer - common routine for getting policy from user
+ * @userbuf: user buffer to copy data from (NOT NULL)
+ * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
+ * @copy_size: size of data to copy from user buffer
+ * @pos: position write is at in the file (NOT NULL)
+ *
+ * Returns: kernel buffer containing copy of user buffer data or an
+ * ERR_PTR on failure.
+ */
+static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
+ size_t alloc_size,
+ size_t copy_size,
+ loff_t *pos)
+{
+ struct aa_loaddata *data;
+
+ AA_BUG(copy_size > alloc_size);
+
+ if (*pos != 0)
+ /* only writes from pos 0, that is complete writes */
+ return ERR_PTR(-ESPIPE);
+
+ /* freed by caller to simple_write_to_buffer */
+ data = aa_loaddata_alloc(alloc_size);
+ if (IS_ERR(data))
+ return data;
+
+ data->size = copy_size;
+ if (copy_from_user(data->data, userbuf, copy_size)) {
+ aa_put_loaddata(data);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return data;
+}
+
+static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
+ loff_t *pos, struct aa_ns *ns)
+{
+ struct aa_loaddata *data;
+ struct aa_label *label;
+ ssize_t error;
+
+ label = begin_current_label_crit_section();
+
+ /* high level check about policy management - fine grained in
+ * below after unpack
+ */
+ error = aa_may_manage_policy(label, ns, mask);
+ if (error)
+ goto end_section;
+
+ data = aa_simple_write_to_buffer(buf, size, size, pos);
+ error = PTR_ERR(data);
+ if (!IS_ERR(data)) {
+ error = aa_replace_profiles(ns, label, mask, data);
+ aa_put_loaddata(data);
+ }
+end_section:
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+/* .load file hook fn to load policy */
+static ssize_t profile_load(struct file *f, const char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
+ int error = policy_update(AA_MAY_LOAD_POLICY, buf, size, pos, ns);
+
+ aa_put_ns(ns);
+
+ return error;
+}
+
+static const struct file_operations aa_fs_profile_load = {
+ .write = profile_load,
+ .llseek = default_llseek,
+};
+
+/* .replace file hook fn to load and/or replace policy */
+static ssize_t profile_replace(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
+ int error = policy_update(AA_MAY_LOAD_POLICY | AA_MAY_REPLACE_POLICY,
+ buf, size, pos, ns);
+ aa_put_ns(ns);
+
+ return error;
+}
+
+static const struct file_operations aa_fs_profile_replace = {
+ .write = profile_replace,
+ .llseek = default_llseek,
+};
+
+/* .remove file hook fn to remove loaded policy */
+static ssize_t profile_remove(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct aa_loaddata *data;
+ struct aa_label *label;
+ ssize_t error;
+ struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
+
+ label = begin_current_label_crit_section();
+ /* high level check about policy management - fine grained in
+ * below after unpack
+ */
+ error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
+ if (error)
+ goto out;
+
+ /*
+ * aa_remove_profile needs a null terminated string so 1 extra
+ * byte is allocated and the copied data is null terminated.
+ */
+ data = aa_simple_write_to_buffer(buf, size + 1, size, pos);
+
+ error = PTR_ERR(data);
+ if (!IS_ERR(data)) {
+ data->data[size] = 0;
+ error = aa_remove_profiles(ns, label, data->data, size);
+ aa_put_loaddata(data);
+ }
+ out:
+ end_current_label_crit_section(label);
+ aa_put_ns(ns);
+ return error;
+}
+
+static const struct file_operations aa_fs_profile_remove = {
+ .write = profile_remove,
+ .llseek = default_llseek,
+};
+
+struct aa_revision {
+ struct aa_ns *ns;
+ long last_read;
+};
+
+/* revision file hook fn for policy loads */
+static int ns_revision_release(struct inode *inode, struct file *file)
+{
+ struct aa_revision *rev = file->private_data;
+
+ if (rev) {
+ aa_put_ns(rev->ns);
+ kfree(rev);
+ }
+
+ return 0;
+}
+
+static ssize_t ns_revision_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct aa_revision *rev = file->private_data;
+ char buffer[32];
+ long last_read;
+ int avail;
+
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
+ last_read = rev->last_read;
+ if (last_read == rev->ns->revision) {
+ mutex_unlock(&rev->ns->lock);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ if (wait_event_interruptible(rev->ns->wait,
+ last_read !=
+ READ_ONCE(rev->ns->revision)))
+ return -ERESTARTSYS;
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
+ }
+
+ avail = sprintf(buffer, "%ld\n", rev->ns->revision);
+ if (*ppos + size > avail) {
+ rev->last_read = rev->ns->revision;
+ *ppos = 0;
+ }
+ mutex_unlock(&rev->ns->lock);
+
+ return simple_read_from_buffer(buf, size, ppos, buffer, avail);
+}
+
+static int ns_revision_open(struct inode *inode, struct file *file)
+{
+ struct aa_revision *rev = kzalloc(sizeof(*rev), GFP_KERNEL);
+
+ if (!rev)
+ return -ENOMEM;
+
+ rev->ns = aa_get_ns(inode->i_private);
+ if (!rev->ns)
+ rev->ns = aa_get_current_ns();
+ file->private_data = rev;
+
+ return 0;
+}
+
+static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
+{
+ struct aa_revision *rev = file->private_data;
+ __poll_t mask = 0;
+
+ if (rev) {
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
+ poll_wait(file, &rev->ns->wait, pt);
+ if (rev->last_read < rev->ns->revision)
+ mask |= EPOLLIN | EPOLLRDNORM;
+ mutex_unlock(&rev->ns->lock);
+ }
+
+ return mask;
+}
+
+void __aa_bump_ns_revision(struct aa_ns *ns)
+{
+ WRITE_ONCE(ns->revision, READ_ONCE(ns->revision) + 1);
+ wake_up_interruptible(&ns->wait);
+}
+
+static const struct file_operations aa_fs_ns_revision_fops = {
+ .owner = THIS_MODULE,
+ .open = ns_revision_open,
+ .poll = ns_revision_poll,
+ .read = ns_revision_read,
+ .llseek = generic_file_llseek,
+ .release = ns_revision_release,
+};
+
+static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
+ const char *match_str, size_t match_len)
+{
+ struct aa_perms tmp = { };
+ struct aa_dfa *dfa;
+ unsigned int state = 0;
+
+ if (profile_unconfined(profile))
+ return;
+ if (profile->file.dfa && *match_str == AA_CLASS_FILE) {
+ dfa = profile->file.dfa;
+ state = aa_dfa_match_len(dfa, profile->file.start,
+ match_str + 1, match_len - 1);
+ if (state) {
+ struct path_cond cond = { };
+
+ tmp = aa_compute_fperms(dfa, state, &cond);
+ }
+ } else if (profile->policy.dfa) {
+ if (!PROFILE_MEDIATES(profile, *match_str))
+ return; /* no change to current perms */
+ dfa = profile->policy.dfa;
+ state = aa_dfa_match_len(dfa, profile->policy.start[0],
+ match_str, match_len);
+ if (state)
+ aa_compute_perms(dfa, state, &tmp);
+ }
+ aa_apply_modes_to_perms(profile, &tmp);
+ aa_perms_accum_raw(perms, &tmp);
+}
+
+
+/**
+ * query_data - queries a policy and writes its data to buf
+ * @buf: the resulting data is stored here (NOT NULL)
+ * @buf_len: size of buf
+ * @query: query string used to retrieve data
+ * @query_len: size of query including second NUL byte
+ *
+ * The buffers pointed to by buf and query may overlap. The query buffer is
+ * parsed before buf is written to.
+ *
+ * The query should look like "<LABEL>\0<KEY>\0", where <LABEL> is the name of
+ * the security confinement context and <KEY> is the name of the data to
+ * retrieve. <LABEL> and <KEY> must not be NUL-terminated.
+ *
+ * Don't expect the contents of buf to be preserved on failure.
+ *
+ * Returns: number of characters written to buf or -errno on failure
+ */
+static ssize_t query_data(char *buf, size_t buf_len,
+ char *query, size_t query_len)
+{
+ char *out;
+ const char *key;
+ struct label_it i;
+ struct aa_label *label, *curr;
+ struct aa_profile *profile;
+ struct aa_data *data;
+ u32 bytes, blocks;
+ __le32 outle32;
+
+ if (!query_len)
+ return -EINVAL; /* need a query */
+
+ key = query + strnlen(query, query_len) + 1;
+ if (key + 1 >= query + query_len)
+ return -EINVAL; /* not enough space for a non-empty key */
+ if (key + strnlen(key, query + query_len - key) >= query + query_len)
+ return -EINVAL; /* must end with NUL */
+
+ if (buf_len < sizeof(bytes) + sizeof(blocks))
+ return -EINVAL; /* not enough space */
+
+ curr = begin_current_label_crit_section();
+ label = aa_label_parse(curr, query, GFP_KERNEL, false, false);
+ end_current_label_crit_section(curr);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
+ /* We are going to leave space for two numbers. The first is the total
+ * number of bytes we are writing after the first number. This is so
+ * users can read the full output without reallocation.
+ *
+ * The second number is the number of data blocks we're writing. An
+ * application might be confined by multiple policies having data in
+ * the same key.
+ */
+ memset(buf, 0, sizeof(bytes) + sizeof(blocks));
+ out = buf + sizeof(bytes) + sizeof(blocks);
+
+ blocks = 0;
+ label_for_each_confined(i, label, profile) {
+ if (!profile->data)
+ continue;
+
+ data = rhashtable_lookup_fast(profile->data, &key,
+ profile->data->p);
+
+ if (data) {
+ if (out + sizeof(outle32) + data->size > buf +
+ buf_len) {
+ aa_put_label(label);
+ return -EINVAL; /* not enough space */
+ }
+ outle32 = __cpu_to_le32(data->size);
+ memcpy(out, &outle32, sizeof(outle32));
+ out += sizeof(outle32);
+ memcpy(out, data->data, data->size);
+ out += data->size;
+ blocks++;
+ }
+ }
+ aa_put_label(label);
+
+ outle32 = __cpu_to_le32(out - buf - sizeof(bytes));
+ memcpy(buf, &outle32, sizeof(outle32));
+ outle32 = __cpu_to_le32(blocks);
+ memcpy(buf + sizeof(bytes), &outle32, sizeof(outle32));
+
+ return out - buf;
+}
+
+/**
+ * query_label - queries a label and writes permissions to buf
+ * @buf: the resulting permissions string is stored here (NOT NULL)
+ * @buf_len: size of buf
+ * @query: binary query string to match against the dfa
+ * @query_len: size of query
+ * @view_only: only compute for querier's view
+ *
+ * The buffers pointed to by buf and query may overlap. The query buffer is
+ * parsed before buf is written to.
+ *
+ * The query should look like "LABEL_NAME\0DFA_STRING" where LABEL_NAME is
+ * the name of the label, in the current namespace, that is to be queried and
+ * DFA_STRING is a binary string to match against the label(s)'s DFA.
+ *
+ * LABEL_NAME must be NUL terminated. DFA_STRING may contain NUL characters
+ * but must *not* be NUL terminated.
+ *
+ * Returns: number of characters written to buf or -errno on failure
+ */
+static ssize_t query_label(char *buf, size_t buf_len,
+ char *query, size_t query_len, bool view_only)
+{
+ struct aa_profile *profile;
+ struct aa_label *label, *curr;
+ char *label_name, *match_str;
+ size_t label_name_len, match_len;
+ struct aa_perms perms;
+ struct label_it i;
+
+ if (!query_len)
+ return -EINVAL;
+
+ label_name = query;
+ label_name_len = strnlen(query, query_len);
+ if (!label_name_len || label_name_len == query_len)
+ return -EINVAL;
+
+ /**
+ * The extra byte is to account for the null byte between the
+ * profile name and dfa string. profile_name_len is greater
+ * than zero and less than query_len, so a byte can be safely
+ * added or subtracted.
+ */
+ match_str = label_name + label_name_len + 1;
+ match_len = query_len - label_name_len - 1;
+
+ curr = begin_current_label_crit_section();
+ label = aa_label_parse(curr, label_name, GFP_KERNEL, false, false);
+ end_current_label_crit_section(curr);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
+ perms = allperms;
+ if (view_only) {
+ label_for_each_in_ns(i, labels_ns(label), label, profile) {
+ profile_query_cb(profile, &perms, match_str, match_len);
+ }
+ } else {
+ label_for_each(i, label, profile) {
+ profile_query_cb(profile, &perms, match_str, match_len);
+ }
+ }
+ aa_put_label(label);
+
+ return scnprintf(buf, buf_len,
+ "allow 0x%08x\ndeny 0x%08x\naudit 0x%08x\nquiet 0x%08x\n",
+ perms.allow, perms.deny, perms.audit, perms.quiet);
+}
+
+/*
+ * Transaction based IO.
+ * The file expects a write which triggers the transaction, and then
+ * possibly a read(s) which collects the result - which is stored in a
+ * file-local buffer. Once a new write is performed, a new set of results
+ * are stored in the file-local buffer.
+ */
+struct multi_transaction {
+ struct kref count;
+ ssize_t size;
+ char data[];
+};
+
+#define MULTI_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct multi_transaction))
+
+static void multi_transaction_kref(struct kref *kref)
+{
+ struct multi_transaction *t;
+
+ t = container_of(kref, struct multi_transaction, count);
+ free_page((unsigned long) t);
+}
+
+static struct multi_transaction *
+get_multi_transaction(struct multi_transaction *t)
+{
+ if (t)
+ kref_get(&(t->count));
+
+ return t;
+}
+
+static void put_multi_transaction(struct multi_transaction *t)
+{
+ if (t)
+ kref_put(&(t->count), multi_transaction_kref);
+}
+
+/* does not increment @new's count */
+static void multi_transaction_set(struct file *file,
+ struct multi_transaction *new, size_t n)
+{
+ struct multi_transaction *old;
+
+ AA_BUG(n > MULTI_TRANSACTION_LIMIT);
+
+ new->size = n;
+ spin_lock(&file->f_lock);
+ old = (struct multi_transaction *) file->private_data;
+ file->private_data = new;
+ spin_unlock(&file->f_lock);
+ put_multi_transaction(old);
+}
+
+static struct multi_transaction *multi_transaction_new(struct file *file,
+ const char __user *buf,
+ size_t size)
+{
+ struct multi_transaction *t;
+
+ if (size > MULTI_TRANSACTION_LIMIT - 1)
+ return ERR_PTR(-EFBIG);
+
+ t = (struct multi_transaction *)get_zeroed_page(GFP_KERNEL);
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&t->count);
+ if (copy_from_user(t->data, buf, size)) {
+ put_multi_transaction(t);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return t;
+}
+
+static ssize_t multi_transaction_read(struct file *file, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct multi_transaction *t;
+ ssize_t ret;
+
+ spin_lock(&file->f_lock);
+ t = get_multi_transaction(file->private_data);
+ spin_unlock(&file->f_lock);
+
+ if (!t)
+ return 0;
+
+ ret = simple_read_from_buffer(buf, size, pos, t->data, t->size);
+ put_multi_transaction(t);
+
+ return ret;
+}
+
+static int multi_transaction_release(struct inode *inode, struct file *file)
+{
+ put_multi_transaction(file->private_data);
+
+ return 0;
+}
+
+#define QUERY_CMD_LABEL "label\0"
+#define QUERY_CMD_LABEL_LEN 6
+#define QUERY_CMD_PROFILE "profile\0"
+#define QUERY_CMD_PROFILE_LEN 8
+#define QUERY_CMD_LABELALL "labelall\0"
+#define QUERY_CMD_LABELALL_LEN 9
+#define QUERY_CMD_DATA "data\0"
+#define QUERY_CMD_DATA_LEN 5
+
+/**
+ * aa_write_access - generic permissions and data query
+ * @file: pointer to open apparmorfs/access file
+ * @ubuf: user buffer containing the complete query string (NOT NULL)
+ * @count: size of ubuf
+ * @ppos: position in the file (MUST BE ZERO)
+ *
+ * Allows for one permissions or data query per open(), write(), and read()
+ * sequence. The only queries currently supported are label-based queries for
+ * permissions or data.
+ *
+ * For permissions queries, ubuf must begin with "label\0", followed by the
+ * profile query specific format described in the query_label() function
+ * documentation.
+ *
+ * For data queries, ubuf must have the form "data\0<LABEL>\0<KEY>\0", where
+ * <LABEL> is the name of the security confinement context and <KEY> is the
+ * name of the data to retrieve.
+ *
+ * Returns: number of bytes written or -errno on failure
+ */
+static ssize_t aa_write_access(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct multi_transaction *t;
+ ssize_t len;
+
+ if (*ppos)
+ return -ESPIPE;
+
+ t = multi_transaction_new(file, ubuf, count);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ if (count > QUERY_CMD_PROFILE_LEN &&
+ !memcmp(t->data, QUERY_CMD_PROFILE, QUERY_CMD_PROFILE_LEN)) {
+ len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
+ t->data + QUERY_CMD_PROFILE_LEN,
+ count - QUERY_CMD_PROFILE_LEN, true);
+ } else if (count > QUERY_CMD_LABEL_LEN &&
+ !memcmp(t->data, QUERY_CMD_LABEL, QUERY_CMD_LABEL_LEN)) {
+ len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
+ t->data + QUERY_CMD_LABEL_LEN,
+ count - QUERY_CMD_LABEL_LEN, true);
+ } else if (count > QUERY_CMD_LABELALL_LEN &&
+ !memcmp(t->data, QUERY_CMD_LABELALL,
+ QUERY_CMD_LABELALL_LEN)) {
+ len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
+ t->data + QUERY_CMD_LABELALL_LEN,
+ count - QUERY_CMD_LABELALL_LEN, false);
+ } else if (count > QUERY_CMD_DATA_LEN &&
+ !memcmp(t->data, QUERY_CMD_DATA, QUERY_CMD_DATA_LEN)) {
+ len = query_data(t->data, MULTI_TRANSACTION_LIMIT,
+ t->data + QUERY_CMD_DATA_LEN,
+ count - QUERY_CMD_DATA_LEN);
+ } else
+ len = -EINVAL;
+
+ if (len < 0) {
+ put_multi_transaction(t);
+ return len;
+ }
+
+ multi_transaction_set(file, t, len);
+
+ return count;
+}
+
+static const struct file_operations aa_sfs_access = {
+ .write = aa_write_access,
+ .read = multi_transaction_read,
+ .release = multi_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+static int aa_sfs_seq_show(struct seq_file *seq, void *v)
+{
+ struct aa_sfs_entry *fs_file = seq->private;
+
+ if (!fs_file)
+ return 0;
+
+ switch (fs_file->v_type) {
+ case AA_SFS_TYPE_BOOLEAN:
+ seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
+ break;
+ case AA_SFS_TYPE_STRING:
+ seq_printf(seq, "%s\n", fs_file->v.string);
+ break;
+ case AA_SFS_TYPE_U64:
+ seq_printf(seq, "%#08lx\n", fs_file->v.u64);
+ break;
+ default:
+ /* Ignore unpritable entry types. */
+ break;
+ }
+
+ return 0;
+}
+
+static int aa_sfs_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aa_sfs_seq_show, inode->i_private);
+}
+
+const struct file_operations aa_sfs_seq_file_ops = {
+ .owner = THIS_MODULE,
+ .open = aa_sfs_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * profile based file operations
+ * policy/profiles/XXXX/profiles/ *
+ */
+
+#define SEQ_PROFILE_FOPS(NAME) \
+static int seq_profile_ ##NAME ##_open(struct inode *inode, struct file *file)\
+{ \
+ return seq_profile_open(inode, file, seq_profile_ ##NAME ##_show); \
+} \
+ \
+static const struct file_operations seq_profile_ ##NAME ##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = seq_profile_ ##NAME ##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_profile_release, \
+} \
+
+static int seq_profile_open(struct inode *inode, struct file *file,
+ int (*show)(struct seq_file *, void *))
+{
+ struct aa_proxy *proxy = aa_get_proxy(inode->i_private);
+ int error = single_open(file, show, proxy);
+
+ if (error) {
+ file->private_data = NULL;
+ aa_put_proxy(proxy);
+ }
+
+ return error;
+}
+
+static int seq_profile_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+ if (seq)
+ aa_put_proxy(seq->private);
+ return single_release(inode, file);
+}
+
+static int seq_profile_name_show(struct seq_file *seq, void *v)
+{
+ struct aa_proxy *proxy = seq->private;
+ struct aa_label *label = aa_get_label_rcu(&proxy->label);
+ struct aa_profile *profile = labels_profile(label);
+ seq_printf(seq, "%s\n", profile->base.name);
+ aa_put_label(label);
+
+ return 0;
+}
+
+static int seq_profile_mode_show(struct seq_file *seq, void *v)
+{
+ struct aa_proxy *proxy = seq->private;
+ struct aa_label *label = aa_get_label_rcu(&proxy->label);
+ struct aa_profile *profile = labels_profile(label);
+ seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
+ aa_put_label(label);
+
+ return 0;
+}
+
+static int seq_profile_attach_show(struct seq_file *seq, void *v)
+{
+ struct aa_proxy *proxy = seq->private;
+ struct aa_label *label = aa_get_label_rcu(&proxy->label);
+ struct aa_profile *profile = labels_profile(label);
+ if (profile->attach)
+ seq_printf(seq, "%s\n", profile->attach);
+ else if (profile->xmatch)
+ seq_puts(seq, "<unknown>\n");
+ else
+ seq_printf(seq, "%s\n", profile->base.name);
+ aa_put_label(label);
+
+ return 0;
+}
+
+static int seq_profile_hash_show(struct seq_file *seq, void *v)
+{
+ struct aa_proxy *proxy = seq->private;
+ struct aa_label *label = aa_get_label_rcu(&proxy->label);
+ struct aa_profile *profile = labels_profile(label);
+ unsigned int i, size = aa_hash_size();
+
+ if (profile->hash) {
+ for (i = 0; i < size; i++)
+ seq_printf(seq, "%.2x", profile->hash[i]);
+ seq_putc(seq, '\n');
+ }
+ aa_put_label(label);
+
+ return 0;
+}
+
+SEQ_PROFILE_FOPS(name);
+SEQ_PROFILE_FOPS(mode);
+SEQ_PROFILE_FOPS(attach);
+SEQ_PROFILE_FOPS(hash);
+
+/*
+ * namespace based files
+ * several root files and
+ * policy/ *
+ */
+
+#define SEQ_NS_FOPS(NAME) \
+static int seq_ns_ ##NAME ##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, seq_ns_ ##NAME ##_show, inode->i_private); \
+} \
+ \
+static const struct file_operations seq_ns_ ##NAME ##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = seq_ns_ ##NAME ##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+} \
+
+static int seq_ns_stacked_show(struct seq_file *seq, void *v)
+{
+ struct aa_label *label;
+
+ label = begin_current_label_crit_section();
+ seq_printf(seq, "%s\n", label->size > 1 ? "yes" : "no");
+ end_current_label_crit_section(label);
+
+ return 0;
+}
+
+static int seq_ns_nsstacked_show(struct seq_file *seq, void *v)
+{
+ struct aa_label *label;
+ struct aa_profile *profile;
+ struct label_it it;
+ int count = 1;
+
+ label = begin_current_label_crit_section();
+
+ if (label->size > 1) {
+ label_for_each(it, label, profile)
+ if (profile->ns != labels_ns(label)) {
+ count++;
+ break;
+ }
+ }
+
+ seq_printf(seq, "%s\n", count > 1 ? "yes" : "no");
+ end_current_label_crit_section(label);
+
+ return 0;
+}
+
+static int seq_ns_level_show(struct seq_file *seq, void *v)
+{
+ struct aa_label *label;
+
+ label = begin_current_label_crit_section();
+ seq_printf(seq, "%d\n", labels_ns(label)->level);
+ end_current_label_crit_section(label);
+
+ return 0;
+}
+
+static int seq_ns_name_show(struct seq_file *seq, void *v)
+{
+ struct aa_label *label = begin_current_label_crit_section();
+ seq_printf(seq, "%s\n", labels_ns(label)->base.name);
+ end_current_label_crit_section(label);
+
+ return 0;
+}
+
+SEQ_NS_FOPS(stacked);
+SEQ_NS_FOPS(nsstacked);
+SEQ_NS_FOPS(level);
+SEQ_NS_FOPS(name);
+
+
+/* policy/raw_data/ * file ops */
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+#define SEQ_RAWDATA_FOPS(NAME) \
+static int seq_rawdata_ ##NAME ##_open(struct inode *inode, struct file *file)\
+{ \
+ return seq_rawdata_open(inode, file, seq_rawdata_ ##NAME ##_show); \
+} \
+ \
+static const struct file_operations seq_rawdata_ ##NAME ##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = seq_rawdata_ ##NAME ##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_rawdata_release, \
+} \
+
+static int seq_rawdata_open(struct inode *inode, struct file *file,
+ int (*show)(struct seq_file *, void *))
+{
+ struct aa_loaddata *data = __aa_get_loaddata(inode->i_private);
+ int error;
+
+ if (!data)
+ /* lost race this ent is being reaped */
+ return -ENOENT;
+
+ error = single_open(file, show, data);
+ if (error) {
+ AA_BUG(file->private_data &&
+ ((struct seq_file *)file->private_data)->private);
+ aa_put_loaddata(data);
+ }
+
+ return error;
+}
+
+static int seq_rawdata_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+
+ if (seq)
+ aa_put_loaddata(seq->private);
+
+ return single_release(inode, file);
+}
+
+static int seq_rawdata_abi_show(struct seq_file *seq, void *v)
+{
+ struct aa_loaddata *data = seq->private;
+
+ seq_printf(seq, "v%d\n", data->abi);
+
+ return 0;
+}
+
+static int seq_rawdata_revision_show(struct seq_file *seq, void *v)
+{
+ struct aa_loaddata *data = seq->private;
+
+ seq_printf(seq, "%ld\n", data->revision);
+
+ return 0;
+}
+
+static int seq_rawdata_hash_show(struct seq_file *seq, void *v)
+{
+ struct aa_loaddata *data = seq->private;
+ unsigned int i, size = aa_hash_size();
+
+ if (data->hash) {
+ for (i = 0; i < size; i++)
+ seq_printf(seq, "%.2x", data->hash[i]);
+ seq_putc(seq, '\n');
+ }
+
+ return 0;
+}
+
+static int seq_rawdata_compressed_size_show(struct seq_file *seq, void *v)
+{
+ struct aa_loaddata *data = seq->private;
+
+ seq_printf(seq, "%zu\n", data->compressed_size);
+
+ return 0;
+}
+
+SEQ_RAWDATA_FOPS(abi);
+SEQ_RAWDATA_FOPS(revision);
+SEQ_RAWDATA_FOPS(hash);
+SEQ_RAWDATA_FOPS(compressed_size);
+
+static int deflate_decompress(char *src, size_t slen, char *dst, size_t dlen)
+{
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+ if (aa_g_rawdata_compression_level != 0) {
+ int error = 0;
+ struct z_stream_s strm;
+
+ memset(&strm, 0, sizeof(strm));
+
+ strm.workspace = kvzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
+
+ strm.next_in = src;
+ strm.avail_in = slen;
+
+ error = zlib_inflateInit(&strm);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_inflate_init;
+ }
+
+ strm.next_out = dst;
+ strm.avail_out = dlen;
+
+ error = zlib_inflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END)
+ error = -EINVAL;
+ else
+ error = 0;
+
+ zlib_inflateEnd(&strm);
+fail_inflate_init:
+ kvfree(strm.workspace);
+
+ return error;
+ }
+#endif
+
+ if (dlen < slen)
+ return -EINVAL;
+ memcpy(dst, src, slen);
+ return 0;
+}
+
+static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
+{
+ struct rawdata_f_data *private = file->private_data;
+
+ return simple_read_from_buffer(buf, size, ppos,
+ RAWDATA_F_DATA_BUF(private),
+ private->loaddata->size);
+}
+
+static int rawdata_release(struct inode *inode, struct file *file)
+{
+ rawdata_f_data_free(file->private_data);
+
+ return 0;
+}
+
+static int rawdata_open(struct inode *inode, struct file *file)
+{
+ int error;
+ struct aa_loaddata *loaddata;
+ struct rawdata_f_data *private;
+
+ if (!aa_current_policy_view_capable(NULL))
+ return -EACCES;
+
+ loaddata = __aa_get_loaddata(inode->i_private);
+ if (!loaddata)
+ /* lost race: this entry is being reaped */
+ return -ENOENT;
+
+ private = rawdata_f_data_alloc(loaddata->size);
+ if (IS_ERR(private)) {
+ error = PTR_ERR(private);
+ goto fail_private_alloc;
+ }
+
+ private->loaddata = loaddata;
+
+ error = deflate_decompress(loaddata->data, loaddata->compressed_size,
+ RAWDATA_F_DATA_BUF(private),
+ loaddata->size);
+ if (error)
+ goto fail_decompress;
+
+ file->private_data = private;
+ return 0;
+
+fail_decompress:
+ rawdata_f_data_free(private);
+ return error;
+
+fail_private_alloc:
+ aa_put_loaddata(loaddata);
+ return error;
+}
+
+static const struct file_operations rawdata_fops = {
+ .open = rawdata_open,
+ .read = rawdata_read,
+ .llseek = generic_file_llseek,
+ .release = rawdata_release,
+};
+
+static void remove_rawdata_dents(struct aa_loaddata *rawdata)
+{
+ int i;
+
+ for (i = 0; i < AAFS_LOADDATA_NDENTS; i++) {
+ if (!IS_ERR_OR_NULL(rawdata->dents[i])) {
+ /* no refcounts on i_private */
+ aafs_remove(rawdata->dents[i]);
+ rawdata->dents[i] = NULL;
+ }
+ }
+}
+
+void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata)
+{
+ AA_BUG(rawdata->ns && !mutex_is_locked(&rawdata->ns->lock));
+
+ if (rawdata->ns) {
+ remove_rawdata_dents(rawdata);
+ list_del_init(&rawdata->list);
+ aa_put_ns(rawdata->ns);
+ rawdata->ns = NULL;
+ }
+}
+
+int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
+{
+ struct dentry *dent, *dir;
+
+ AA_BUG(!ns);
+ AA_BUG(!rawdata);
+ AA_BUG(!mutex_is_locked(&ns->lock));
+ AA_BUG(!ns_subdata_dir(ns));
+
+ /*
+ * just use ns revision dir was originally created at. This is
+ * under ns->lock and if load is successful revision will be
+ * bumped and is guaranteed to be unique
+ */
+ rawdata->name = kasprintf(GFP_KERNEL, "%ld", ns->revision);
+ if (!rawdata->name)
+ return -ENOMEM;
+
+ dir = aafs_create_dir(rawdata->name, ns_subdata_dir(ns));
+ if (IS_ERR(dir))
+ /* ->name freed when rawdata freed */
+ return PTR_ERR(dir);
+ rawdata->dents[AAFS_LOADDATA_DIR] = dir;
+
+ dent = aafs_create_file("abi", S_IFREG | 0444, dir, rawdata,
+ &seq_rawdata_abi_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_ABI] = dent;
+
+ dent = aafs_create_file("revision", S_IFREG | 0444, dir, rawdata,
+ &seq_rawdata_revision_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_REVISION] = dent;
+
+ if (aa_g_hash_policy) {
+ dent = aafs_create_file("sha1", S_IFREG | 0444, dir,
+ rawdata, &seq_rawdata_hash_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_HASH] = dent;
+ }
+
+ dent = aafs_create_file("compressed_size", S_IFREG | 0444, dir,
+ rawdata,
+ &seq_rawdata_compressed_size_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_COMPRESSED_SIZE] = dent;
+
+ dent = aafs_create_file("raw_data", S_IFREG | 0444,
+ dir, rawdata, &rawdata_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_DATA] = dent;
+ d_inode(dent)->i_size = rawdata->size;
+
+ rawdata->ns = aa_get_ns(ns);
+ list_add(&rawdata->list, &ns->rawdata_list);
+ /* no refcount on inode rawdata */
+
+ return 0;
+
+fail:
+ remove_rawdata_dents(rawdata);
+
+ return PTR_ERR(dent);
+}
+#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
+
+
+/** fns to setup dynamic per profile/namespace files **/
+
+/*
+ *
+ * Requires: @profile->ns->lock held
+ */
+void __aafs_profile_rmdir(struct aa_profile *profile)
+{
+ struct aa_profile *child;
+ int i;
+
+ if (!profile)
+ return;
+
+ list_for_each_entry(child, &profile->base.profiles, base.list)
+ __aafs_profile_rmdir(child);
+
+ for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
+ struct aa_proxy *proxy;
+ if (!profile->dents[i])
+ continue;
+
+ proxy = d_inode(profile->dents[i])->i_private;
+ aafs_remove(profile->dents[i]);
+ aa_put_proxy(proxy);
+ profile->dents[i] = NULL;
+ }
+}
+
+/*
+ *
+ * Requires: @old->ns->lock held
+ */
+void __aafs_profile_migrate_dents(struct aa_profile *old,
+ struct aa_profile *new)
+{
+ int i;
+
+ AA_BUG(!old);
+ AA_BUG(!new);
+ AA_BUG(!mutex_is_locked(&profiles_ns(old)->lock));
+
+ for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
+ new->dents[i] = old->dents[i];
+ if (new->dents[i])
+ new->dents[i]->d_inode->i_mtime = current_time(new->dents[i]->d_inode);
+ old->dents[i] = NULL;
+ }
+}
+
+static struct dentry *create_profile_file(struct dentry *dir, const char *name,
+ struct aa_profile *profile,
+ const struct file_operations *fops)
+{
+ struct aa_proxy *proxy = aa_get_proxy(profile->label.proxy);
+ struct dentry *dent;
+
+ dent = aafs_create_file(name, S_IFREG | 0444, dir, proxy, fops);
+ if (IS_ERR(dent))
+ aa_put_proxy(proxy);
+
+ return dent;
+}
+
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+static int profile_depth(struct aa_profile *profile)
+{
+ int depth = 0;
+
+ rcu_read_lock();
+ for (depth = 0; profile; profile = rcu_access_pointer(profile->parent))
+ depth++;
+ rcu_read_unlock();
+
+ return depth;
+}
+
+static char *gen_symlink_name(int depth, const char *dirname, const char *fname)
+{
+ char *buffer, *s;
+ int error;
+ int size = depth * 6 + strlen(dirname) + strlen(fname) + 11;
+
+ s = buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ for (; depth > 0; depth--) {
+ strcpy(s, "../../");
+ s += 6;
+ size -= 6;
+ }
+
+ error = snprintf(s, size, "raw_data/%s/%s", dirname, fname);
+ if (error >= size || error < 0) {
+ kfree(buffer);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ return buffer;
+}
+
+static void rawdata_link_cb(void *arg)
+{
+ kfree(arg);
+}
+
+static const char *rawdata_get_link_base(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done,
+ const char *name)
+{
+ struct aa_proxy *proxy = inode->i_private;
+ struct aa_label *label;
+ struct aa_profile *profile;
+ char *target;
+ int depth;
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ label = aa_get_label_rcu(&proxy->label);
+ profile = labels_profile(label);
+ depth = profile_depth(profile);
+ target = gen_symlink_name(depth, profile->rawdata->name, name);
+ aa_put_label(label);
+
+ if (IS_ERR(target))
+ return target;
+
+ set_delayed_call(done, rawdata_link_cb, target);
+
+ return target;
+}
+
+static const char *rawdata_get_link_sha1(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ return rawdata_get_link_base(dentry, inode, done, "sha1");
+}
+
+static const char *rawdata_get_link_abi(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ return rawdata_get_link_base(dentry, inode, done, "abi");
+}
+
+static const char *rawdata_get_link_data(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ return rawdata_get_link_base(dentry, inode, done, "raw_data");
+}
+
+static const struct inode_operations rawdata_link_sha1_iops = {
+ .get_link = rawdata_get_link_sha1,
+};
+
+static const struct inode_operations rawdata_link_abi_iops = {
+ .get_link = rawdata_get_link_abi,
+};
+static const struct inode_operations rawdata_link_data_iops = {
+ .get_link = rawdata_get_link_data,
+};
+#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
+
+/*
+ * Requires: @profile->ns->lock held
+ */
+int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+{
+ struct aa_profile *child;
+ struct dentry *dent = NULL, *dir;
+ int error;
+
+ AA_BUG(!profile);
+ AA_BUG(!mutex_is_locked(&profiles_ns(profile)->lock));
+
+ if (!parent) {
+ struct aa_profile *p;
+ p = aa_deref_parent(profile);
+ dent = prof_dir(p);
+ /* adding to parent that previously didn't have children */
+ dent = aafs_create_dir("profiles", dent);
+ if (IS_ERR(dent))
+ goto fail;
+ prof_child_dir(p) = parent = dent;
+ }
+
+ if (!profile->dirname) {
+ int len, id_len;
+ len = mangle_name(profile->base.name, NULL);
+ id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
+
+ profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
+ if (!profile->dirname) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+
+ mangle_name(profile->base.name, profile->dirname);
+ sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
+ }
+
+ dent = aafs_create_dir(profile->dirname, parent);
+ if (IS_ERR(dent))
+ goto fail;
+ prof_dir(profile) = dir = dent;
+
+ dent = create_profile_file(dir, "name", profile,
+ &seq_profile_name_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_NAME] = dent;
+
+ dent = create_profile_file(dir, "mode", profile,
+ &seq_profile_mode_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_MODE] = dent;
+
+ dent = create_profile_file(dir, "attach", profile,
+ &seq_profile_attach_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_ATTACH] = dent;
+
+ if (profile->hash) {
+ dent = create_profile_file(dir, "sha1", profile,
+ &seq_profile_hash_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_HASH] = dent;
+ }
+
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+ if (profile->rawdata) {
+ if (aa_g_hash_policy) {
+ dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_sha1_iops);
+ if (IS_ERR(dent))
+ goto fail;
+ aa_get_proxy(profile->label.proxy);
+ profile->dents[AAFS_PROF_RAW_HASH] = dent;
+ }
+ dent = aafs_create("raw_abi", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_abi_iops);
+ if (IS_ERR(dent))
+ goto fail;
+ aa_get_proxy(profile->label.proxy);
+ profile->dents[AAFS_PROF_RAW_ABI] = dent;
+
+ dent = aafs_create("raw_data", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_data_iops);
+ if (IS_ERR(dent))
+ goto fail;
+ aa_get_proxy(profile->label.proxy);
+ profile->dents[AAFS_PROF_RAW_DATA] = dent;
+ }
+#endif /*CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
+
+ list_for_each_entry(child, &profile->base.profiles, base.list) {
+ error = __aafs_profile_mkdir(child, prof_child_dir(profile));
+ if (error)
+ goto fail2;
+ }
+
+ return 0;
+
+fail:
+ error = PTR_ERR(dent);
+
+fail2:
+ __aafs_profile_rmdir(profile);
+
+ return error;
+}
+
+static int ns_mkdir_op(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
+{
+ struct aa_ns *ns, *parent;
+ /* TODO: improve permission check */
+ struct aa_label *label;
+ int error;
+
+ label = begin_current_label_crit_section();
+ error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
+ end_current_label_crit_section(label);
+ if (error)
+ return error;
+
+ parent = aa_get_ns(dir->i_private);
+ AA_BUG(d_inode(ns_subns_dir(parent)) != dir);
+
+ /* we have to unlock and then relock to get locking order right
+ * for pin_fs
+ */
+ inode_unlock(dir);
+ error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
+ mutex_lock_nested(&parent->lock, parent->level);
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ if (error)
+ goto out;
+
+ error = __aafs_setup_d_inode(dir, dentry, mode | S_IFDIR, NULL,
+ NULL, NULL, NULL);
+ if (error)
+ goto out_pin;
+
+ ns = __aa_find_or_create_ns(parent, READ_ONCE(dentry->d_name.name),
+ dentry);
+ if (IS_ERR(ns)) {
+ error = PTR_ERR(ns);
+ ns = NULL;
+ }
+
+ aa_put_ns(ns); /* list ref remains */
+out_pin:
+ if (error)
+ simple_release_fs(&aafs_mnt, &aafs_count);
+out:
+ mutex_unlock(&parent->lock);
+ aa_put_ns(parent);
+
+ return error;
+}
+
+static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
+{
+ struct aa_ns *ns, *parent;
+ /* TODO: improve permission check */
+ struct aa_label *label;
+ int error;
+
+ label = begin_current_label_crit_section();
+ error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
+ end_current_label_crit_section(label);
+ if (error)
+ return error;
+
+ parent = aa_get_ns(dir->i_private);
+ /* rmdir calls the generic securityfs functions to remove files
+ * from the apparmor dir. It is up to the apparmor ns locking
+ * to avoid races.
+ */
+ inode_unlock(dir);
+ inode_unlock(dentry->d_inode);
+
+ mutex_lock_nested(&parent->lock, parent->level);
+ ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name,
+ dentry->d_name.len));
+ if (!ns) {
+ error = -ENOENT;
+ goto out;
+ }
+ AA_BUG(ns_dir(ns) != dentry);
+
+ __aa_remove_ns(ns);
+ aa_put_ns(ns);
+
+out:
+ mutex_unlock(&parent->lock);
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ inode_lock(dentry->d_inode);
+ aa_put_ns(parent);
+
+ return error;
+}
+
+static const struct inode_operations ns_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = ns_mkdir_op,
+ .rmdir = ns_rmdir_op,
+};
+
+static void __aa_fs_list_remove_rawdata(struct aa_ns *ns)
+{
+ struct aa_loaddata *ent, *tmp;
+
+ AA_BUG(!mutex_is_locked(&ns->lock));
+
+ list_for_each_entry_safe(ent, tmp, &ns->rawdata_list, list)
+ __aa_fs_remove_rawdata(ent);
+}
+
+/*
+ *
+ * Requires: @ns->lock held
+ */
+void __aafs_ns_rmdir(struct aa_ns *ns)
+{
+ struct aa_ns *sub;
+ struct aa_profile *child;
+ int i;
+
+ if (!ns)
+ return;
+ AA_BUG(!mutex_is_locked(&ns->lock));
+
+ list_for_each_entry(child, &ns->base.profiles, base.list)
+ __aafs_profile_rmdir(child);
+
+ list_for_each_entry(sub, &ns->sub_ns, base.list) {
+ mutex_lock_nested(&sub->lock, sub->level);
+ __aafs_ns_rmdir(sub);
+ mutex_unlock(&sub->lock);
+ }
+
+ __aa_fs_list_remove_rawdata(ns);
+
+ if (ns_subns_dir(ns)) {
+ sub = d_inode(ns_subns_dir(ns))->i_private;
+ aa_put_ns(sub);
+ }
+ if (ns_subload(ns)) {
+ sub = d_inode(ns_subload(ns))->i_private;
+ aa_put_ns(sub);
+ }
+ if (ns_subreplace(ns)) {
+ sub = d_inode(ns_subreplace(ns))->i_private;
+ aa_put_ns(sub);
+ }
+ if (ns_subremove(ns)) {
+ sub = d_inode(ns_subremove(ns))->i_private;
+ aa_put_ns(sub);
+ }
+ if (ns_subrevision(ns)) {
+ sub = d_inode(ns_subrevision(ns))->i_private;
+ aa_put_ns(sub);
+ }
+
+ for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
+ aafs_remove(ns->dents[i]);
+ ns->dents[i] = NULL;
+ }
+}
+
+/* assumes cleanup in caller */
+static int __aafs_ns_mkdir_entries(struct aa_ns *ns, struct dentry *dir)
+{
+ struct dentry *dent;
+
+ AA_BUG(!ns);
+ AA_BUG(!dir);
+
+ dent = aafs_create_dir("profiles", dir);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ ns_subprofs_dir(ns) = dent;
+
+ dent = aafs_create_dir("raw_data", dir);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ ns_subdata_dir(ns) = dent;
+
+ dent = aafs_create_file("revision", 0444, dir, ns,
+ &aa_fs_ns_revision_fops);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ aa_get_ns(ns);
+ ns_subrevision(ns) = dent;
+
+ dent = aafs_create_file(".load", 0640, dir, ns,
+ &aa_fs_profile_load);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ aa_get_ns(ns);
+ ns_subload(ns) = dent;
+
+ dent = aafs_create_file(".replace", 0640, dir, ns,
+ &aa_fs_profile_replace);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ aa_get_ns(ns);
+ ns_subreplace(ns) = dent;
+
+ dent = aafs_create_file(".remove", 0640, dir, ns,
+ &aa_fs_profile_remove);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ aa_get_ns(ns);
+ ns_subremove(ns) = dent;
+
+ /* use create_dentry so we can supply private data */
+ dent = aafs_create("namespaces", S_IFDIR | 0755, dir, ns, NULL, NULL,
+ &ns_dir_inode_operations);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+ aa_get_ns(ns);
+ ns_subns_dir(ns) = dent;
+
+ return 0;
+}
+
+/*
+ * Requires: @ns->lock held
+ */
+int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
+ struct dentry *dent)
+{
+ struct aa_ns *sub;
+ struct aa_profile *child;
+ struct dentry *dir;
+ int error;
+
+ AA_BUG(!ns);
+ AA_BUG(!parent);
+ AA_BUG(!mutex_is_locked(&ns->lock));
+
+ if (!name)
+ name = ns->base.name;
+
+ if (!dent) {
+ /* create ns dir if it doesn't already exist */
+ dent = aafs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ goto fail;
+ } else
+ dget(dent);
+ ns_dir(ns) = dir = dent;
+ error = __aafs_ns_mkdir_entries(ns, dir);
+ if (error)
+ goto fail2;
+
+ /* profiles */
+ list_for_each_entry(child, &ns->base.profiles, base.list) {
+ error = __aafs_profile_mkdir(child, ns_subprofs_dir(ns));
+ if (error)
+ goto fail2;
+ }
+
+ /* subnamespaces */
+ list_for_each_entry(sub, &ns->sub_ns, base.list) {
+ mutex_lock_nested(&sub->lock, sub->level);
+ error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL);
+ mutex_unlock(&sub->lock);
+ if (error)
+ goto fail2;
+ }
+
+ return 0;
+
+fail:
+ error = PTR_ERR(dent);
+
+fail2:
+ __aafs_ns_rmdir(ns);
+
+ return error;
+}
+
+/**
+ * __next_ns - find the next namespace to list
+ * @root: root namespace to stop search at (NOT NULL)
+ * @ns: current ns position (NOT NULL)
+ *
+ * Find the next namespace from @ns under @root and handle all locking needed
+ * while switching current namespace.
+ *
+ * Returns: next namespace or NULL if at last namespace under @root
+ * Requires: ns->parent->lock to be held
+ * NOTE: will not unlock root->lock
+ */
+static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
+{
+ struct aa_ns *parent, *next;
+
+ AA_BUG(!root);
+ AA_BUG(!ns);
+ AA_BUG(ns != root && !mutex_is_locked(&ns->parent->lock));
+
+ /* is next namespace a child */
+ if (!list_empty(&ns->sub_ns)) {
+ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
+ mutex_lock_nested(&next->lock, next->level);
+ return next;
+ }
+
+ /* check if the next ns is a sibling, parent, gp, .. */
+ parent = ns->parent;
+ while (ns != root) {
+ mutex_unlock(&ns->lock);
+ next = list_next_entry(ns, base.list);
+ if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
+ mutex_lock_nested(&next->lock, next->level);
+ return next;
+ }
+ ns = parent;
+ parent = parent->parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * __first_profile - find the first profile in a namespace
+ * @root: namespace that is root of profiles being displayed (NOT NULL)
+ * @ns: namespace to start in (NOT NULL)
+ *
+ * Returns: unrefcounted profile or NULL if no profile
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__first_profile(struct aa_ns *root,
+ struct aa_ns *ns)
+{
+ AA_BUG(!root);
+ AA_BUG(ns && !mutex_is_locked(&ns->lock));
+
+ for (; ns; ns = __next_ns(root, ns)) {
+ if (!list_empty(&ns->base.profiles))
+ return list_first_entry(&ns->base.profiles,
+ struct aa_profile, base.list);
+ }
+ return NULL;
+}
+
+/**
+ * __next_profile - step to the next profile in a profile tree
+ * @p: current profile in tree (NOT NULL)
+ *
+ * Perform a depth first traversal on the profile tree in a namespace
+ *
+ * Returns: next profile or NULL if done
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__next_profile(struct aa_profile *p)
+{
+ struct aa_profile *parent;
+ struct aa_ns *ns = p->ns;
+
+ AA_BUG(!mutex_is_locked(&profiles_ns(p)->lock));
+
+ /* is next profile a child */
+ if (!list_empty(&p->base.profiles))
+ return list_first_entry(&p->base.profiles, typeof(*p),
+ base.list);
+
+ /* is next profile a sibling, parent sibling, gp, sibling, .. */
+ parent = rcu_dereference_protected(p->parent,
+ mutex_is_locked(&p->ns->lock));
+ while (parent) {
+ p = list_next_entry(p, base.list);
+ if (!list_entry_is_head(p, &parent->base.profiles, base.list))
+ return p;
+ p = parent;
+ parent = rcu_dereference_protected(parent->parent,
+ mutex_is_locked(&parent->ns->lock));
+ }
+
+ /* is next another profile in the namespace */
+ p = list_next_entry(p, base.list);
+ if (!list_entry_is_head(p, &ns->base.profiles, base.list))
+ return p;
+
+ return NULL;
+}
+
+/**
+ * next_profile - step to the next profile in where ever it may be
+ * @root: root namespace (NOT NULL)
+ * @profile: current profile (NOT NULL)
+ *
+ * Returns: next profile or NULL if there isn't one
+ */
+static struct aa_profile *next_profile(struct aa_ns *root,
+ struct aa_profile *profile)
+{
+ struct aa_profile *next = __next_profile(profile);
+ if (next)
+ return next;
+
+ /* finished all profiles in namespace move to next namespace */
+ return __first_profile(root, __next_ns(root, profile->ns));
+}
+
+/**
+ * p_start - start a depth first traversal of profile tree
+ * @f: seq_file to fill
+ * @pos: current position
+ *
+ * Returns: first profile under current namespace or NULL if none found
+ *
+ * acquires first ns->lock
+ */
+static void *p_start(struct seq_file *f, loff_t *pos)
+{
+ struct aa_profile *profile = NULL;
+ struct aa_ns *root = aa_get_current_ns();
+ loff_t l = *pos;
+ f->private = root;
+
+ /* find the first profile */
+ mutex_lock_nested(&root->lock, root->level);
+ profile = __first_profile(root, root);
+
+ /* skip to position */
+ for (; profile && l > 0; l--)
+ profile = next_profile(root, profile);
+
+ return profile;
+}
+
+/**
+ * p_next - read the next profile entry
+ * @f: seq_file to fill
+ * @p: profile previously returned
+ * @pos: current position
+ *
+ * Returns: next profile after @p or NULL if none
+ *
+ * may acquire/release locks in namespace tree as necessary
+ */
+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
+{
+ struct aa_profile *profile = p;
+ struct aa_ns *ns = f->private;
+ (*pos)++;
+
+ return next_profile(ns, profile);
+}
+
+/**
+ * p_stop - stop depth first traversal
+ * @f: seq_file we are filling
+ * @p: the last profile writen
+ *
+ * Release all locking done by p_start/p_next on namespace tree
+ */
+static void p_stop(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = p;
+ struct aa_ns *root = f->private, *ns;
+
+ if (profile) {
+ for (ns = profile->ns; ns && ns != root; ns = ns->parent)
+ mutex_unlock(&ns->lock);
+ }
+ mutex_unlock(&root->lock);
+ aa_put_ns(root);
+}
+
+/**
+ * seq_show_profile - show a profile entry
+ * @f: seq_file to file
+ * @p: current position (profile) (NOT NULL)
+ *
+ * Returns: error on failure
+ */
+static int seq_show_profile(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = (struct aa_profile *)p;
+ struct aa_ns *root = f->private;
+
+ aa_label_seq_xprint(f, root, &profile->label,
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS, GFP_KERNEL);
+ seq_putc(f, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations aa_sfs_profiles_op = {
+ .start = p_start,
+ .next = p_next,
+ .stop = p_stop,
+ .show = seq_show_profile,
+};
+
+static int profiles_open(struct inode *inode, struct file *file)
+{
+ if (!aa_current_policy_view_capable(NULL))
+ return -EACCES;
+
+ return seq_open(file, &aa_sfs_profiles_op);
+}
+
+static int profiles_release(struct inode *inode, struct file *file)
+{
+ return seq_release(inode, file);
+}
+
+static const struct file_operations aa_sfs_profiles_fops = {
+ .open = profiles_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = profiles_release,
+};
+
+
+/** Base file system setup **/
+static struct aa_sfs_entry aa_sfs_entry_file[] = {
+ AA_SFS_FILE_STRING("mask",
+ "create read write exec append mmap_exec link lock"),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_ptrace[] = {
+ AA_SFS_FILE_STRING("mask", "read trace"),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_signal[] = {
+ AA_SFS_FILE_STRING("mask", AA_SFS_SIG_MASK),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_attach[] = {
+ AA_SFS_FILE_BOOLEAN("xattr", 1),
+ { }
+};
+static struct aa_sfs_entry aa_sfs_entry_domain[] = {
+ AA_SFS_FILE_BOOLEAN("change_hat", 1),
+ AA_SFS_FILE_BOOLEAN("change_hatv", 1),
+ AA_SFS_FILE_BOOLEAN("change_onexec", 1),
+ AA_SFS_FILE_BOOLEAN("change_profile", 1),
+ AA_SFS_FILE_BOOLEAN("stack", 1),
+ AA_SFS_FILE_BOOLEAN("fix_binfmt_elf_mmap", 1),
+ AA_SFS_FILE_BOOLEAN("post_nnp_subset", 1),
+ AA_SFS_FILE_BOOLEAN("computed_longest_left", 1),
+ AA_SFS_DIR("attach_conditions", aa_sfs_entry_attach),
+ AA_SFS_FILE_STRING("version", "1.2"),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_versions[] = {
+ AA_SFS_FILE_BOOLEAN("v5", 1),
+ AA_SFS_FILE_BOOLEAN("v6", 1),
+ AA_SFS_FILE_BOOLEAN("v7", 1),
+ AA_SFS_FILE_BOOLEAN("v8", 1),
+ AA_SFS_FILE_BOOLEAN("v9", 1),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_policy[] = {
+ AA_SFS_DIR("versions", aa_sfs_entry_versions),
+ AA_SFS_FILE_BOOLEAN("set_load", 1),
+ /* number of out of band transitions supported */
+ AA_SFS_FILE_U64("outofband", MAX_OOB_SUPPORTED),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_mount[] = {
+ AA_SFS_FILE_STRING("mask", "mount umount pivot_root"),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_ns[] = {
+ AA_SFS_FILE_BOOLEAN("profile", 1),
+ AA_SFS_FILE_BOOLEAN("pivot_root", 0),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_query_label[] = {
+ AA_SFS_FILE_STRING("perms", "allow deny audit quiet"),
+ AA_SFS_FILE_BOOLEAN("data", 1),
+ AA_SFS_FILE_BOOLEAN("multi_transaction", 1),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_query[] = {
+ AA_SFS_DIR("label", aa_sfs_entry_query_label),
+ { }
+};
+static struct aa_sfs_entry aa_sfs_entry_features[] = {
+ AA_SFS_DIR("policy", aa_sfs_entry_policy),
+ AA_SFS_DIR("domain", aa_sfs_entry_domain),
+ AA_SFS_DIR("file", aa_sfs_entry_file),
+ AA_SFS_DIR("network_v8", aa_sfs_entry_network),
+ AA_SFS_DIR("mount", aa_sfs_entry_mount),
+ AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
+ AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
+ AA_SFS_DIR("rlimit", aa_sfs_entry_rlimit),
+ AA_SFS_DIR("caps", aa_sfs_entry_caps),
+ AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace),
+ AA_SFS_DIR("signal", aa_sfs_entry_signal),
+ AA_SFS_DIR("query", aa_sfs_entry_query),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_apparmor[] = {
+ AA_SFS_FILE_FOPS(".access", 0666, &aa_sfs_access),
+ AA_SFS_FILE_FOPS(".stacked", 0444, &seq_ns_stacked_fops),
+ AA_SFS_FILE_FOPS(".ns_stacked", 0444, &seq_ns_nsstacked_fops),
+ AA_SFS_FILE_FOPS(".ns_level", 0444, &seq_ns_level_fops),
+ AA_SFS_FILE_FOPS(".ns_name", 0444, &seq_ns_name_fops),
+ AA_SFS_FILE_FOPS("profiles", 0444, &aa_sfs_profiles_fops),
+ AA_SFS_DIR("features", aa_sfs_entry_features),
+ { }
+};
+
+static struct aa_sfs_entry aa_sfs_entry =
+ AA_SFS_DIR("apparmor", aa_sfs_entry_apparmor);
+
+/**
+ * entry_create_file - create a file entry in the apparmor securityfs
+ * @fs_file: aa_sfs_entry to build an entry for (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use entry_remove_file to remove entries created with this fn.
+ */
+static int __init entry_create_file(struct aa_sfs_entry *fs_file,
+ struct dentry *parent)
+{
+ int error = 0;
+
+ fs_file->dentry = securityfs_create_file(fs_file->name,
+ S_IFREG | fs_file->mode,
+ parent, fs_file,
+ fs_file->file_ops);
+ if (IS_ERR(fs_file->dentry)) {
+ error = PTR_ERR(fs_file->dentry);
+ fs_file->dentry = NULL;
+ }
+ return error;
+}
+
+static void __init entry_remove_dir(struct aa_sfs_entry *fs_dir);
+/**
+ * entry_create_dir - recursively create a directory entry in the securityfs
+ * @fs_dir: aa_sfs_entry (and all child entries) to build (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use entry_remove_dir to remove entries created with this fn.
+ */
+static int __init entry_create_dir(struct aa_sfs_entry *fs_dir,
+ struct dentry *parent)
+{
+ struct aa_sfs_entry *fs_file;
+ struct dentry *dir;
+ int error;
+
+ dir = securityfs_create_dir(fs_dir->name, parent);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ fs_dir->dentry = dir;
+
+ for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+ if (fs_file->v_type == AA_SFS_TYPE_DIR)
+ error = entry_create_dir(fs_file, fs_dir->dentry);
+ else
+ error = entry_create_file(fs_file, fs_dir->dentry);
+ if (error)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ entry_remove_dir(fs_dir);
+
+ return error;
+}
+
+/**
+ * entry_remove_file - drop a single file entry in the apparmor securityfs
+ * @fs_file: aa_sfs_entry to detach from the securityfs (NOT NULL)
+ */
+static void __init entry_remove_file(struct aa_sfs_entry *fs_file)
+{
+ if (!fs_file->dentry)
+ return;
+
+ securityfs_remove(fs_file->dentry);
+ fs_file->dentry = NULL;
+}
+
+/**
+ * entry_remove_dir - recursively drop a directory entry from the securityfs
+ * @fs_dir: aa_sfs_entry (and all child entries) to detach (NOT NULL)
+ */
+static void __init entry_remove_dir(struct aa_sfs_entry *fs_dir)
+{
+ struct aa_sfs_entry *fs_file;
+
+ for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+ if (fs_file->v_type == AA_SFS_TYPE_DIR)
+ entry_remove_dir(fs_file);
+ else
+ entry_remove_file(fs_file);
+ }
+
+ entry_remove_file(fs_dir);
+}
+
+/**
+ * aa_destroy_aafs - cleanup and free aafs
+ *
+ * releases dentries allocated by aa_create_aafs
+ */
+void __init aa_destroy_aafs(void)
+{
+ entry_remove_dir(&aa_sfs_entry);
+}
+
+
+#define NULL_FILE_NAME ".null"
+struct path aa_null;
+
+static int aa_mk_null_file(struct dentry *parent)
+{
+ struct vfsmount *mount = NULL;
+ struct dentry *dentry;
+ struct inode *inode;
+ int count = 0;
+ int error = simple_pin_fs(parent->d_sb->s_type, &mount, &count);
+
+ if (error)
+ return error;
+
+ inode_lock(d_inode(parent));
+ dentry = lookup_one_len(NULL_FILE_NAME, parent, strlen(NULL_FILE_NAME));
+ if (IS_ERR(dentry)) {
+ error = PTR_ERR(dentry);
+ goto out;
+ }
+ inode = new_inode(parent->d_inode->i_sb);
+ if (!inode) {
+ error = -ENOMEM;
+ goto out1;
+ }
+
+ inode->i_ino = get_next_ino();
+ inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
+ MKDEV(MEM_MAJOR, 3));
+ d_instantiate(dentry, inode);
+ aa_null.dentry = dget(dentry);
+ aa_null.mnt = mntget(mount);
+
+ error = 0;
+
+out1:
+ dput(dentry);
+out:
+ inode_unlock(d_inode(parent));
+ simple_release_fs(&mount, &count);
+ return error;
+}
+
+
+
+static const char *policy_get_link(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ struct aa_ns *ns;
+ struct path path;
+ int error;
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ ns = aa_get_current_ns();
+ path.mnt = mntget(aafs_mnt);
+ path.dentry = dget(ns_dir(ns));
+ error = nd_jump_link(&path);
+ aa_put_ns(ns);
+
+ return ERR_PTR(error);
+}
+
+static int policy_readlink(struct dentry *dentry, char __user *buffer,
+ int buflen)
+{
+ char name[32];
+ int res;
+
+ res = snprintf(name, sizeof(name), "%s:[%lu]", AAFS_NAME,
+ d_inode(dentry)->i_ino);
+ if (res > 0 && res < sizeof(name))
+ res = readlink_copy(buffer, buflen, name);
+ else
+ res = -ENOENT;
+
+ return res;
+}
+
+static const struct inode_operations policy_link_iops = {
+ .readlink = policy_readlink,
+ .get_link = policy_get_link,
+};
+
+
+/**
+ * aa_create_aafs - create the apparmor security filesystem
+ *
+ * dentries created here are released by aa_destroy_aafs
+ *
+ * Returns: error on failure
+ */
+static int __init aa_create_aafs(void)
+{
+ struct dentry *dent;
+ int error;
+
+ if (!apparmor_initialized)
+ return 0;
+
+ if (aa_sfs_entry.dentry) {
+ AA_ERROR("%s: AppArmor securityfs already exists\n", __func__);
+ return -EEXIST;
+ }
+
+ /* setup apparmorfs used to virtualize policy/ */
+ aafs_mnt = kern_mount(&aafs_ops);
+ if (IS_ERR(aafs_mnt))
+ panic("can't set apparmorfs up\n");
+ aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
+
+ /* Populate fs tree. */
+ error = entry_create_dir(&aa_sfs_entry, NULL);
+ if (error)
+ goto error;
+
+ dent = securityfs_create_file(".load", 0666, aa_sfs_entry.dentry,
+ NULL, &aa_fs_profile_load);
+ if (IS_ERR(dent))
+ goto dent_error;
+ ns_subload(root_ns) = dent;
+
+ dent = securityfs_create_file(".replace", 0666, aa_sfs_entry.dentry,
+ NULL, &aa_fs_profile_replace);
+ if (IS_ERR(dent))
+ goto dent_error;
+ ns_subreplace(root_ns) = dent;
+
+ dent = securityfs_create_file(".remove", 0666, aa_sfs_entry.dentry,
+ NULL, &aa_fs_profile_remove);
+ if (IS_ERR(dent))
+ goto dent_error;
+ ns_subremove(root_ns) = dent;
+
+ dent = securityfs_create_file("revision", 0444, aa_sfs_entry.dentry,
+ NULL, &aa_fs_ns_revision_fops);
+ if (IS_ERR(dent))
+ goto dent_error;
+ ns_subrevision(root_ns) = dent;
+
+ /* policy tree referenced by magic policy symlink */
+ mutex_lock_nested(&root_ns->lock, root_ns->level);
+ error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy",
+ aafs_mnt->mnt_root);
+ mutex_unlock(&root_ns->lock);
+ if (error)
+ goto error;
+
+ /* magic symlink similar to nsfs redirects based on task policy */
+ dent = securityfs_create_symlink("policy", aa_sfs_entry.dentry,
+ NULL, &policy_link_iops);
+ if (IS_ERR(dent))
+ goto dent_error;
+
+ error = aa_mk_null_file(aa_sfs_entry.dentry);
+ if (error)
+ goto error;
+
+ /* TODO: add default profile to apparmorfs */
+
+ /* Report that AppArmor fs is enabled */
+ aa_info_message("AppArmor Filesystem Enabled");
+ return 0;
+
+dent_error:
+ error = PTR_ERR(dent);
+error:
+ aa_destroy_aafs();
+ AA_ERROR("Error creating AppArmor securityfs\n");
+ return error;
+}
+
+fs_initcall(aa_create_aafs);
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
new file mode 100644
index 000000000..704b0c895
--- /dev/null
+++ b/security/apparmor/audit.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor auditing functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/audit.h>
+#include <linux/socket.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/policy.h"
+#include "include/policy_ns.h"
+#include "include/secid.h"
+
+const char *const audit_mode_names[] = {
+ "normal",
+ "quiet_denied",
+ "quiet",
+ "noquiet",
+ "all"
+};
+
+static const char *const aa_audit_type[] = {
+ "AUDIT",
+ "ALLOWED",
+ "DENIED",
+ "HINT",
+ "STATUS",
+ "ERROR",
+ "KILLED",
+ "AUTO"
+};
+
+/*
+ * Currently AppArmor auditing is fed straight into the audit framework.
+ *
+ * TODO:
+ * netlink interface for complain mode
+ * user auditing, - send user auditing to netlink interface
+ * system control of whether user audit messages go to system log
+ */
+
+/**
+ * audit_base - core AppArmor function.
+ * @ab: audit buffer to fill (NOT NULL)
+ * @ca: audit structure containing data to audit (NOT NULL)
+ *
+ * Record common AppArmor audit data from @sa
+ */
+static void audit_pre(struct audit_buffer *ab, void *ca)
+{
+ struct common_audit_data *sa = ca;
+
+ if (aa_g_audit_header) {
+ audit_log_format(ab, "apparmor=\"%s\"",
+ aa_audit_type[aad(sa)->type]);
+ }
+
+ if (aad(sa)->op) {
+ audit_log_format(ab, " operation=\"%s\"", aad(sa)->op);
+ }
+
+ if (aad(sa)->info) {
+ audit_log_format(ab, " info=\"%s\"", aad(sa)->info);
+ if (aad(sa)->error)
+ audit_log_format(ab, " error=%d", aad(sa)->error);
+ }
+
+ if (aad(sa)->label) {
+ struct aa_label *label = aad(sa)->label;
+
+ if (label_isprofile(label)) {
+ struct aa_profile *profile = labels_profile(label);
+
+ if (profile->ns != root_ns) {
+ audit_log_format(ab, " namespace=");
+ audit_log_untrustedstring(ab,
+ profile->ns->base.hname);
+ }
+ audit_log_format(ab, " profile=");
+ audit_log_untrustedstring(ab, profile->base.hname);
+ } else {
+ audit_log_format(ab, " label=");
+ aa_label_xaudit(ab, root_ns, label, FLAG_VIEW_SUBNS,
+ GFP_ATOMIC);
+ }
+ }
+
+ if (aad(sa)->name) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, aad(sa)->name);
+ }
+}
+
+/**
+ * aa_audit_msg - Log a message to the audit subsystem
+ * @sa: audit event structure (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ */
+void aa_audit_msg(int type, struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *))
+{
+ aad(sa)->type = type;
+ common_lsm_audit(sa, audit_pre, cb);
+}
+
+/**
+ * aa_audit - Log a profile based audit event to the audit subsystem
+ * @type: audit type for the message
+ * @profile: profile to check against (NOT NULL)
+ * @sa: audit event (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ *
+ * Handle default message switching based off of audit mode flags
+ *
+ * Returns: error on failure
+ */
+int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *))
+{
+ AA_BUG(!profile);
+
+ if (type == AUDIT_APPARMOR_AUTO) {
+ if (likely(!aad(sa)->error)) {
+ if (AUDIT_MODE(profile) != AUDIT_ALL)
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else if (COMPLAIN_MODE(profile))
+ type = AUDIT_APPARMOR_ALLOWED;
+ else
+ type = AUDIT_APPARMOR_DENIED;
+ }
+ if (AUDIT_MODE(profile) == AUDIT_QUIET ||
+ (type == AUDIT_APPARMOR_DENIED &&
+ AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
+ return aad(sa)->error;
+
+ if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
+ type = AUDIT_APPARMOR_KILL;
+
+ aad(sa)->label = &profile->label;
+
+ aa_audit_msg(type, sa, cb);
+
+ if (aad(sa)->type == AUDIT_APPARMOR_KILL)
+ (void)send_sig_info(SIGKILL, NULL,
+ sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
+ sa->u.tsk : current);
+
+ if (aad(sa)->type == AUDIT_APPARMOR_ALLOWED)
+ return complain_error(aad(sa)->error);
+
+ return aad(sa)->error;
+}
+
+struct aa_audit_rule {
+ struct aa_label *label;
+};
+
+void aa_audit_rule_free(void *vrule)
+{
+ struct aa_audit_rule *rule = vrule;
+
+ if (rule) {
+ if (!IS_ERR(rule->label))
+ aa_put_label(rule->label);
+ kfree(rule);
+ }
+}
+
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+{
+ struct aa_audit_rule *rule;
+
+ switch (field) {
+ case AUDIT_SUBJ_ROLE:
+ if (op != Audit_equal && op != Audit_not_equal)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
+
+ if (!rule)
+ return -ENOMEM;
+
+ /* Currently rules are treated as coming from the root ns */
+ rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
+ GFP_KERNEL, true, false);
+ if (IS_ERR(rule->label)) {
+ int err = PTR_ERR(rule->label);
+ aa_audit_rule_free(rule);
+ return err;
+ }
+
+ *vrule = rule;
+ return 0;
+}
+
+int aa_audit_rule_known(struct audit_krule *rule)
+{
+ int i;
+
+ for (i = 0; i < rule->field_count; i++) {
+ struct audit_field *f = &rule->fields[i];
+
+ switch (f->type) {
+ case AUDIT_SUBJ_ROLE:
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule)
+{
+ struct aa_audit_rule *rule = vrule;
+ struct aa_label *label;
+ int found = 0;
+
+ label = aa_secid_to_label(sid);
+
+ if (!label)
+ return -ENOENT;
+
+ if (aa_label_is_subset(label, rule->label))
+ found = 1;
+
+ switch (field) {
+ case AUDIT_SUBJ_ROLE:
+ switch (op) {
+ case Audit_equal:
+ return found;
+ case Audit_not_equal:
+ return !found;
+ }
+ }
+ return 0;
+}
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
new file mode 100644
index 000000000..deccea865
--- /dev/null
+++ b/security/apparmor/capability.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor capability mediation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/security.h>
+
+#include "include/apparmor.h"
+#include "include/capability.h"
+#include "include/cred.h"
+#include "include/policy.h"
+#include "include/audit.h"
+
+/*
+ * Table of capability names: we generate it from capabilities.h.
+ */
+#include "capability_names.h"
+
+struct aa_sfs_entry aa_sfs_entry_caps[] = {
+ AA_SFS_FILE_STRING("mask", AA_SFS_CAPS_MASK),
+ { }
+};
+
+struct audit_cache {
+ struct aa_profile *profile;
+ kernel_cap_t caps;
+};
+
+static DEFINE_PER_CPU(struct audit_cache, audit_cache);
+
+/**
+ * audit_cb - call back for capability components of audit struct
+ * @ab - audit buffer (NOT NULL)
+ * @va - audit struct to audit data from (NOT NULL)
+ */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ audit_log_format(ab, " capname=");
+ audit_log_untrustedstring(ab, capability_names[sa->u.cap]);
+}
+
+/**
+ * audit_caps - audit a capability
+ * @sa: audit data
+ * @profile: profile being tested for confinement (NOT NULL)
+ * @cap: capability tested
+ * @error: error code returned by test
+ *
+ * Do auditing of capability and handle, audit/complain/kill modes switching
+ * and duplicate message elimination.
+ *
+ * Returns: 0 or sa->error on success, error code on failure
+ */
+static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ int cap, int error)
+{
+ struct audit_cache *ent;
+ int type = AUDIT_APPARMOR_AUTO;
+
+ aad(sa)->error = error;
+
+ if (likely(!error)) {
+ /* test if auditing is being forced */
+ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
+ !cap_raised(profile->caps.audit, cap)))
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else if (KILL_MODE(profile) ||
+ cap_raised(profile->caps.kill, cap)) {
+ type = AUDIT_APPARMOR_KILL;
+ } else if (cap_raised(profile->caps.quiet, cap) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL) {
+ /* quiet auditing */
+ return error;
+ }
+
+ /* Do simple duplicate message elimination */
+ ent = &get_cpu_var(audit_cache);
+ if (profile == ent->profile && cap_raised(ent->caps, cap)) {
+ put_cpu_var(audit_cache);
+ if (COMPLAIN_MODE(profile))
+ return complain_error(error);
+ return error;
+ } else {
+ aa_put_profile(ent->profile);
+ ent->profile = aa_get_profile(profile);
+ cap_raise(ent->caps, cap);
+ }
+ put_cpu_var(audit_cache);
+
+ return aa_audit(type, profile, sa, audit_cb);
+}
+
+/**
+ * profile_capable - test if profile allows use of capability @cap
+ * @profile: profile being enforced (NOT NULL, NOT unconfined)
+ * @cap: capability to test if allowed
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
+ * @sa: audit data (MAY BE NULL indicating no auditing)
+ *
+ * Returns: 0 if allowed else -EPERM
+ */
+static int profile_capable(struct aa_profile *profile, int cap,
+ unsigned int opts, struct common_audit_data *sa)
+{
+ int error;
+
+ if (cap_raised(profile->caps.allow, cap) &&
+ !cap_raised(profile->caps.denied, cap))
+ error = 0;
+ else
+ error = -EPERM;
+
+ if (opts & CAP_OPT_NOAUDIT) {
+ if (!COMPLAIN_MODE(profile))
+ return error;
+ /* audit the cap request in complain mode but note that it
+ * should be optional.
+ */
+ aad(sa)->info = "optional: no audit";
+ }
+
+ return audit_caps(sa, profile, cap, error);
+}
+
+/**
+ * aa_capable - test permission to use capability
+ * @label: label being tested for capability (NOT NULL)
+ * @cap: capability to be tested
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
+ *
+ * Look up capability in profile capability set.
+ *
+ * Returns: 0 on success, or else an error code.
+ */
+int aa_capable(struct aa_label *label, int cap, unsigned int opts)
+{
+ struct aa_profile *profile;
+ int error = 0;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, OP_CAPABLE);
+
+ sa.u.cap = cap;
+ error = fn_for_each_confined(label, profile,
+ profile_capable(profile, cap, opts, &sa));
+
+ return error;
+}
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
new file mode 100644
index 000000000..b498ed302
--- /dev/null
+++ b/security/apparmor/crypto.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * Fns to provide a checksum of policy that has been loaded this can be
+ * compared to userspace policy compiles to check loaded policy is what
+ * it should be.
+ */
+
+#include <crypto/hash.h>
+
+#include "include/apparmor.h"
+#include "include/crypto.h"
+
+static unsigned int apparmor_hash_size;
+
+static struct crypto_shash *apparmor_tfm;
+
+unsigned int aa_hash_size(void)
+{
+ return apparmor_hash_size;
+}
+
+char *aa_calc_hash(void *data, size_t len)
+{
+ SHASH_DESC_ON_STACK(desc, apparmor_tfm);
+ char *hash = NULL;
+ int error = -ENOMEM;
+
+ if (!apparmor_tfm)
+ return NULL;
+
+ hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+ if (!hash)
+ goto fail;
+
+ desc->tfm = apparmor_tfm;
+
+ error = crypto_shash_init(desc);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(desc, (u8 *) data, len);
+ if (error)
+ goto fail;
+ error = crypto_shash_final(desc, hash);
+ if (error)
+ goto fail;
+
+ return hash;
+
+fail:
+ kfree(hash);
+
+ return ERR_PTR(error);
+}
+
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+ size_t len)
+{
+ SHASH_DESC_ON_STACK(desc, apparmor_tfm);
+ int error = -ENOMEM;
+ __le32 le32_version = cpu_to_le32(version);
+
+ if (!aa_g_hash_policy)
+ return 0;
+
+ if (!apparmor_tfm)
+ return 0;
+
+ profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+ if (!profile->hash)
+ goto fail;
+
+ desc->tfm = apparmor_tfm;
+
+ error = crypto_shash_init(desc);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(desc, (u8 *) &le32_version, 4);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(desc, (u8 *) start, len);
+ if (error)
+ goto fail;
+ error = crypto_shash_final(desc, profile->hash);
+ if (error)
+ goto fail;
+
+ return 0;
+
+fail:
+ kfree(profile->hash);
+ profile->hash = NULL;
+
+ return error;
+}
+
+static int __init init_profile_hash(void)
+{
+ struct crypto_shash *tfm;
+
+ if (!apparmor_initialized)
+ return 0;
+
+ tfm = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(tfm)) {
+ int error = PTR_ERR(tfm);
+ AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+ return error;
+ }
+ apparmor_tfm = tfm;
+ apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
+
+ aa_info_message("AppArmor sha1 policy hashing enabled");
+
+ return 0;
+}
+
+late_initcall(init_profile_hash);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
new file mode 100644
index 000000000..91689d34d
--- /dev/null
+++ b/security/apparmor/domain.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy attachment and domain transitions
+ *
+ * Copyright (C) 2002-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/errno.h>
+#include <linux/fdtable.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/syscalls.h>
+#include <linux/personality.h>
+#include <linux/xattr.h>
+#include <linux/user_namespace.h>
+
+#include "include/audit.h"
+#include "include/apparmorfs.h"
+#include "include/cred.h"
+#include "include/domain.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/policy_ns.h"
+
+/**
+ * aa_free_domain_entries - free entries in a domain table
+ * @domain: the domain table to free (MAYBE NULL)
+ */
+void aa_free_domain_entries(struct aa_domain *domain)
+{
+ int i;
+ if (domain) {
+ if (!domain->table)
+ return;
+
+ for (i = 0; i < domain->size; i++)
+ kfree_sensitive(domain->table[i]);
+ kfree_sensitive(domain->table);
+ domain->table = NULL;
+ }
+}
+
+/**
+ * may_change_ptraced_domain - check if can change profile on ptraced task
+ * @to_label: profile to change to (NOT NULL)
+ * @info: message if there is an error
+ *
+ * Check if current is ptraced and if so if the tracing task is allowed
+ * to trace the new domain
+ *
+ * Returns: %0 or error if change not allowed
+ */
+static int may_change_ptraced_domain(struct aa_label *to_label,
+ const char **info)
+{
+ struct task_struct *tracer;
+ struct aa_label *tracerl = NULL;
+ int error = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+ if (tracer)
+ /* released below */
+ tracerl = aa_get_task_label(tracer);
+
+ /* not ptraced */
+ if (!tracer || unconfined(tracerl))
+ goto out;
+
+ error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
+
+out:
+ rcu_read_unlock();
+ aa_put_label(tracerl);
+
+ if (error)
+ *info = "ptrace prevents transition";
+ return error;
+}
+
+/**** TODO: dedup to aa_label_match - needs perm and dfa, merging
+ * specifically this is an exact copy of aa_label_match except
+ * aa_compute_perms is replaced with aa_compute_fperms
+ * and policy.dfa with file.dfa
+ ****/
+/* match a profile and its associated ns component if needed
+ * Assumes visibility test has already been done.
+ * If a subns profile is not to be matched should be prescreened with
+ * visibility test.
+ */
+static inline unsigned int match_component(struct aa_profile *profile,
+ struct aa_profile *tp,
+ bool stack, unsigned int state)
+{
+ const char *ns_name;
+
+ if (stack)
+ state = aa_dfa_match(profile->file.dfa, state, "&");
+ if (profile->ns == tp->ns)
+ return aa_dfa_match(profile->file.dfa, state, tp->base.hname);
+
+ /* try matching with namespace name and then profile */
+ ns_name = aa_ns_name(profile->ns, tp->ns, true);
+ state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
+ state = aa_dfa_match(profile->file.dfa, state, ns_name);
+ state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
+ return aa_dfa_match(profile->file.dfa, state, tp->base.hname);
+}
+
+/**
+ * label_compound_match - find perms for full compound label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @stack: whether this is a stacking request
+ * @state: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: perms struct to set
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for A//&B//&C
+ * @perms should be preinitialized with allperms OR a previous permission
+ * check to be stacked.
+ */
+static int label_compound_match(struct aa_profile *profile,
+ struct aa_label *label, bool stack,
+ unsigned int state, bool subns, u32 request,
+ struct aa_perms *perms)
+{
+ struct aa_profile *tp;
+ struct label_it i;
+ struct path_cond cond = { };
+
+ /* find first subcomponent that is visible */
+ label_for_each(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = match_component(profile, tp, stack, state);
+ if (!state)
+ goto fail;
+ goto next;
+ }
+
+ /* no component visible */
+ *perms = allperms;
+ return 0;
+
+next:
+ label_for_each_cont(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = aa_dfa_match(profile->file.dfa, state, "//&");
+ state = match_component(profile, tp, false, state);
+ if (!state)
+ goto fail;
+ }
+ *perms = aa_compute_fperms(profile->file.dfa, state, &cond);
+ aa_apply_modes_to_perms(profile, perms);
+ if ((perms->allow & request) != request)
+ return -EACCES;
+
+ return 0;
+
+fail:
+ *perms = nullperms;
+ return -EACCES;
+}
+
+/**
+ * label_components_match - find perms for all subcomponents of a label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @stack: whether this is a stacking request
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: an initialized perms struct to add accumulation to
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for each of A and B and C
+ * @perms should be preinitialized with allperms OR a previous permission
+ * check to be stacked.
+ */
+static int label_components_match(struct aa_profile *profile,
+ struct aa_label *label, bool stack,
+ unsigned int start, bool subns, u32 request,
+ struct aa_perms *perms)
+{
+ struct aa_profile *tp;
+ struct label_it i;
+ struct aa_perms tmp;
+ struct path_cond cond = { };
+ unsigned int state = 0;
+
+ /* find first subcomponent to test */
+ label_for_each(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = match_component(profile, tp, stack, start);
+ if (!state)
+ goto fail;
+ goto next;
+ }
+
+ /* no subcomponents visible - no change in perms */
+ return 0;
+
+next:
+ tmp = aa_compute_fperms(profile->file.dfa, state, &cond);
+ aa_apply_modes_to_perms(profile, &tmp);
+ aa_perms_accum(perms, &tmp);
+ label_for_each_cont(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = match_component(profile, tp, stack, start);
+ if (!state)
+ goto fail;
+ tmp = aa_compute_fperms(profile->file.dfa, state, &cond);
+ aa_apply_modes_to_perms(profile, &tmp);
+ aa_perms_accum(perms, &tmp);
+ }
+
+ if ((perms->allow & request) != request)
+ return -EACCES;
+
+ return 0;
+
+fail:
+ *perms = nullperms;
+ return -EACCES;
+}
+
+/**
+ * label_match - do a multi-component label match
+ * @profile: profile to match against (NOT NULL)
+ * @label: label to match (NOT NULL)
+ * @stack: whether this is a stacking request
+ * @state: state to start in
+ * @subns: whether to match subns components
+ * @request: permission request
+ * @perms: Returns computed perms (NOT NULL)
+ *
+ * Returns: the state the match finished in, may be the none matching state
+ */
+static int label_match(struct aa_profile *profile, struct aa_label *label,
+ bool stack, unsigned int state, bool subns, u32 request,
+ struct aa_perms *perms)
+{
+ int error;
+
+ *perms = nullperms;
+ error = label_compound_match(profile, label, stack, state, subns,
+ request, perms);
+ if (!error)
+ return error;
+
+ *perms = allperms;
+ return label_components_match(profile, label, stack, state, subns,
+ request, perms);
+}
+
+/******* end TODO: dedup *****/
+
+/**
+ * change_profile_perms - find permissions for change_profile
+ * @profile: the current profile (NOT NULL)
+ * @target: label to transition to (NOT NULL)
+ * @stack: whether this is a stacking request
+ * @request: requested perms
+ * @start: state to start matching in
+ *
+ *
+ * Returns: permission set
+ *
+ * currently only matches full label A//&B//&C or individual components A, B, C
+ * not arbitrary combinations. Eg. A//&B, C
+ */
+static int change_profile_perms(struct aa_profile *profile,
+ struct aa_label *target, bool stack,
+ u32 request, unsigned int start,
+ struct aa_perms *perms)
+{
+ if (profile_unconfined(profile)) {
+ perms->allow = AA_MAY_CHANGE_PROFILE | AA_MAY_ONEXEC;
+ perms->audit = perms->quiet = perms->kill = 0;
+ return 0;
+ }
+
+ /* TODO: add profile in ns screening */
+ return label_match(profile, target, stack, start, true, request, perms);
+}
+
+/**
+ * aa_xattrs_match - check whether a file matches the xattrs defined in profile
+ * @bprm: binprm struct for the process to validate
+ * @profile: profile to match against (NOT NULL)
+ * @state: state to start match in
+ *
+ * Returns: number of extended attributes that matched, or < 0 on error
+ */
+static int aa_xattrs_match(const struct linux_binprm *bprm,
+ struct aa_profile *profile, unsigned int state)
+{
+ int i;
+ ssize_t size;
+ struct dentry *d;
+ char *value = NULL;
+ int value_size = 0, ret = profile->xattr_count;
+
+ if (!bprm || !profile->xattr_count)
+ return 0;
+ might_sleep();
+
+ /* transition from exec match to xattr set */
+ state = aa_dfa_outofband_transition(profile->xmatch, state);
+ d = bprm->file->f_path.dentry;
+
+ for (i = 0; i < profile->xattr_count; i++) {
+ size = vfs_getxattr_alloc(&init_user_ns, d, profile->xattrs[i],
+ &value, value_size, GFP_KERNEL);
+ if (size >= 0) {
+ u32 perm;
+
+ /*
+ * Check the xattr presence before value. This ensure
+ * that not present xattr can be distinguished from a 0
+ * length value or rule that matches any value
+ */
+ state = aa_dfa_null_transition(profile->xmatch, state);
+ /* Check xattr value */
+ state = aa_dfa_match_len(profile->xmatch, state, value,
+ size);
+ perm = dfa_user_allow(profile->xmatch, state);
+ if (!(perm & MAY_EXEC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ /* transition to next element */
+ state = aa_dfa_outofband_transition(profile->xmatch, state);
+ if (size < 0) {
+ /*
+ * No xattr match, so verify if transition to
+ * next element was valid. IFF so the xattr
+ * was optional.
+ */
+ if (!state) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* don't count missing optional xattr as matched */
+ ret--;
+ }
+ }
+
+out:
+ kfree(value);
+ return ret;
+}
+
+/**
+ * find_attach - do attachment search for unconfined processes
+ * @bprm - binprm structure of transitioning task
+ * @ns: the current namespace (NOT NULL)
+ * @head - profile list to walk (NOT NULL)
+ * @name - to match against (NOT NULL)
+ * @info - info message if there was an error (NOT NULL)
+ *
+ * Do a linear search on the profiles in the list. There is a matching
+ * preference where an exact match is preferred over a name which uses
+ * expressions to match, and matching expressions with the greatest
+ * xmatch_len are preferred.
+ *
+ * Requires: @head not be shared or have appropriate locks held
+ *
+ * Returns: label or NULL if no match found
+ */
+static struct aa_label *find_attach(const struct linux_binprm *bprm,
+ struct aa_ns *ns, struct list_head *head,
+ const char *name, const char **info)
+{
+ int candidate_len = 0, candidate_xattrs = 0;
+ bool conflict = false;
+ struct aa_profile *profile, *candidate = NULL;
+
+ AA_BUG(!name);
+ AA_BUG(!head);
+
+ rcu_read_lock();
+restart:
+ list_for_each_entry_rcu(profile, head, base.list) {
+ if (profile->label.flags & FLAG_NULL &&
+ &profile->label == ns_unconfined(profile->ns))
+ continue;
+
+ /* Find the "best" matching profile. Profiles must
+ * match the path and extended attributes (if any)
+ * associated with the file. A more specific path
+ * match will be preferred over a less specific one,
+ * and a match with more matching extended attributes
+ * will be preferred over one with fewer. If the best
+ * match has both the same level of path specificity
+ * and the same number of matching extended attributes
+ * as another profile, signal a conflict and refuse to
+ * match.
+ */
+ if (profile->xmatch) {
+ unsigned int state, count;
+ u32 perm;
+
+ state = aa_dfa_leftmatch(profile->xmatch, DFA_START,
+ name, &count);
+ perm = dfa_user_allow(profile->xmatch, state);
+ /* any accepting state means a valid match. */
+ if (perm & MAY_EXEC) {
+ int ret = 0;
+
+ if (count < candidate_len)
+ continue;
+
+ if (bprm && profile->xattr_count) {
+ long rev = READ_ONCE(ns->revision);
+
+ if (!aa_get_profile_not0(profile))
+ goto restart;
+ rcu_read_unlock();
+ ret = aa_xattrs_match(bprm, profile,
+ state);
+ rcu_read_lock();
+ aa_put_profile(profile);
+ if (rev !=
+ READ_ONCE(ns->revision))
+ /* policy changed */
+ goto restart;
+ /*
+ * Fail matching if the xattrs don't
+ * match
+ */
+ if (ret < 0)
+ continue;
+ }
+ /*
+ * TODO: allow for more flexible best match
+ *
+ * The new match isn't more specific
+ * than the current best match
+ */
+ if (count == candidate_len &&
+ ret <= candidate_xattrs) {
+ /* Match is equivalent, so conflict */
+ if (ret == candidate_xattrs)
+ conflict = true;
+ continue;
+ }
+
+ /* Either the same length with more matching
+ * xattrs, or a longer match
+ */
+ candidate = profile;
+ candidate_len = max(count, profile->xmatch_len);
+ candidate_xattrs = ret;
+ conflict = false;
+ }
+ } else if (!strcmp(profile->base.name, name)) {
+ /*
+ * old exact non-re match, without conditionals such
+ * as xattrs. no more searching required
+ */
+ candidate = profile;
+ goto out;
+ }
+ }
+
+ if (!candidate || conflict) {
+ if (conflict)
+ *info = "conflicting profile attachments";
+ rcu_read_unlock();
+ return NULL;
+ }
+
+out:
+ candidate = aa_get_newest_profile(candidate);
+ rcu_read_unlock();
+
+ return &candidate->label;
+}
+
+static const char *next_name(int xtype, const char *name)
+{
+ return NULL;
+}
+
+/**
+ * x_table_lookup - lookup an x transition name via transition table
+ * @profile: current profile (NOT NULL)
+ * @xindex: index into x transition table
+ * @name: returns: name tested to find label (NOT NULL)
+ *
+ * Returns: refcounted label, or NULL on failure (MAYBE NULL)
+ */
+struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+ const char **name)
+{
+ struct aa_label *label = NULL;
+ u32 xtype = xindex & AA_X_TYPE_MASK;
+ int index = xindex & AA_X_INDEX_MASK;
+
+ AA_BUG(!name);
+
+ /* index is guaranteed to be in range, validated at load time */
+ /* TODO: move lookup parsing to unpack time so this is a straight
+ * index into the resultant label
+ */
+ for (*name = profile->file.trans.table[index]; !label && *name;
+ *name = next_name(xtype, *name)) {
+ if (xindex & AA_X_CHILD) {
+ struct aa_profile *new_profile;
+ /* release by caller */
+ new_profile = aa_find_child(profile, *name);
+ if (new_profile)
+ label = &new_profile->label;
+ continue;
+ }
+ label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
+ true, false);
+ if (IS_ERR(label))
+ label = NULL;
+ }
+
+ /* released by caller */
+
+ return label;
+}
+
+/**
+ * x_to_label - get target label for a given xindex
+ * @profile: current profile (NOT NULL)
+ * @bprm: binprm structure of transitioning task
+ * @name: name to lookup (NOT NULL)
+ * @xindex: index into x transition table
+ * @lookupname: returns: name used in lookup if one was specified (NOT NULL)
+ *
+ * find label for a transition index
+ *
+ * Returns: refcounted label or NULL if not found available
+ */
+static struct aa_label *x_to_label(struct aa_profile *profile,
+ const struct linux_binprm *bprm,
+ const char *name, u32 xindex,
+ const char **lookupname,
+ const char **info)
+{
+ struct aa_label *new = NULL;
+ struct aa_ns *ns = profile->ns;
+ u32 xtype = xindex & AA_X_TYPE_MASK;
+ const char *stack = NULL;
+
+ switch (xtype) {
+ case AA_X_NONE:
+ /* fail exec unless ix || ux fallback - handled by caller */
+ *lookupname = NULL;
+ break;
+ case AA_X_TABLE:
+ /* TODO: fix when perm mapping done at unload */
+ stack = profile->file.trans.table[xindex & AA_X_INDEX_MASK];
+ if (*stack != '&') {
+ /* released by caller */
+ new = x_table_lookup(profile, xindex, lookupname);
+ stack = NULL;
+ break;
+ }
+ fallthrough; /* to X_NAME */
+ case AA_X_NAME:
+ if (xindex & AA_X_CHILD)
+ /* released by caller */
+ new = find_attach(bprm, ns, &profile->base.profiles,
+ name, info);
+ else
+ /* released by caller */
+ new = find_attach(bprm, ns, &ns->base.profiles,
+ name, info);
+ *lookupname = name;
+ break;
+ }
+
+ if (!new) {
+ if (xindex & AA_X_INHERIT) {
+ /* (p|c|n)ix - don't change profile but do
+ * use the newest version
+ */
+ *info = "ix fallback";
+ /* no profile && no error */
+ new = aa_get_newest_label(&profile->label);
+ } else if (xindex & AA_X_UNCONFINED) {
+ new = aa_get_newest_label(ns_unconfined(profile->ns));
+ *info = "ux fallback";
+ }
+ }
+
+ if (new && stack) {
+ /* base the stack on post domain transition */
+ struct aa_label *base = new;
+
+ new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
+ if (IS_ERR(new))
+ new = NULL;
+ aa_put_label(base);
+ }
+
+ /* released by caller */
+ return new;
+}
+
+static struct aa_label *profile_transition(struct aa_profile *profile,
+ const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *secure_exec)
+{
+ struct aa_label *new = NULL;
+ const char *info = NULL, *name = NULL, *target = NULL;
+ unsigned int state = profile->file.start;
+ struct aa_perms perms = {};
+ bool nonewprivs = false;
+ int error = 0;
+
+ AA_BUG(!profile);
+ AA_BUG(!bprm);
+ AA_BUG(!buffer);
+
+ error = aa_path_name(&bprm->file->f_path, profile->path_flags, buffer,
+ &name, &info, profile->disconnected);
+ if (error) {
+ if (profile_unconfined(profile) ||
+ (profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
+ AA_DEBUG("name lookup ix on error");
+ error = 0;
+ new = aa_get_newest_label(&profile->label);
+ }
+ name = bprm->filename;
+ goto audit;
+ }
+
+ if (profile_unconfined(profile)) {
+ new = find_attach(bprm, profile->ns,
+ &profile->ns->base.profiles, name, &info);
+ if (new) {
+ AA_DEBUG("unconfined attached to new label");
+ return new;
+ }
+ AA_DEBUG("unconfined exec no attachment");
+ return aa_get_newest_label(&profile->label);
+ }
+
+ /* find exec permissions for name */
+ state = aa_str_perms(profile->file.dfa, state, name, cond, &perms);
+ if (perms.allow & MAY_EXEC) {
+ /* exec permission determine how to transition */
+ new = x_to_label(profile, bprm, name, perms.xindex, &target,
+ &info);
+ if (new && new->proxy == profile->label.proxy && info) {
+ /* hack ix fallback - improve how this is detected */
+ goto audit;
+ } else if (!new) {
+ error = -EACCES;
+ info = "profile transition not found";
+ /* remove MAY_EXEC to audit as failure */
+ perms.allow &= ~MAY_EXEC;
+ }
+ } else if (COMPLAIN_MODE(profile)) {
+ /* no exec permission - learning mode */
+ struct aa_profile *new_profile = NULL;
+
+ new_profile = aa_new_null_profile(profile, false, name,
+ GFP_KERNEL);
+ if (!new_profile) {
+ error = -ENOMEM;
+ info = "could not create null profile";
+ } else {
+ error = -EACCES;
+ new = &new_profile->label;
+ }
+ perms.xindex |= AA_X_UNSAFE;
+ } else
+ /* fail exec */
+ error = -EACCES;
+
+ if (!new)
+ goto audit;
+
+
+ if (!(perms.xindex & AA_X_UNSAFE)) {
+ if (DEBUG_ON) {
+ dbg_printk("apparmor: scrubbing environment variables"
+ " for %s profile=", name);
+ aa_label_printk(new, GFP_KERNEL);
+ dbg_printk("\n");
+ }
+ *secure_exec = true;
+ }
+
+audit:
+ aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
+ cond->uid, info, error);
+ if (!new || nonewprivs) {
+ aa_put_label(new);
+ return ERR_PTR(error);
+ }
+
+ return new;
+}
+
+static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
+ bool stack, const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *secure_exec)
+{
+ unsigned int state = profile->file.start;
+ struct aa_perms perms = {};
+ const char *xname = NULL, *info = "change_profile onexec";
+ int error = -EACCES;
+
+ AA_BUG(!profile);
+ AA_BUG(!onexec);
+ AA_BUG(!bprm);
+ AA_BUG(!buffer);
+
+ if (profile_unconfined(profile)) {
+ /* change_profile on exec already granted */
+ /*
+ * NOTE: Domain transitions from unconfined are allowed
+ * even when no_new_privs is set because this aways results
+ * in a further reduction of permissions.
+ */
+ return 0;
+ }
+
+ error = aa_path_name(&bprm->file->f_path, profile->path_flags, buffer,
+ &xname, &info, profile->disconnected);
+ if (error) {
+ if (profile_unconfined(profile) ||
+ (profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
+ AA_DEBUG("name lookup ix on error");
+ error = 0;
+ }
+ xname = bprm->filename;
+ goto audit;
+ }
+
+ /* find exec permissions for name */
+ state = aa_str_perms(profile->file.dfa, state, xname, cond, &perms);
+ if (!(perms.allow & AA_MAY_ONEXEC)) {
+ info = "no change_onexec valid for executable";
+ goto audit;
+ }
+ /* test if this exec can be paired with change_profile onexec.
+ * onexec permission is linked to exec with a standard pairing
+ * exec\0change_profile
+ */
+ state = aa_dfa_null_transition(profile->file.dfa, state);
+ error = change_profile_perms(profile, onexec, stack, AA_MAY_ONEXEC,
+ state, &perms);
+ if (error) {
+ perms.allow &= ~AA_MAY_ONEXEC;
+ goto audit;
+ }
+
+ if (!(perms.xindex & AA_X_UNSAFE)) {
+ if (DEBUG_ON) {
+ dbg_printk("apparmor: scrubbing environment "
+ "variables for %s label=", xname);
+ aa_label_printk(onexec, GFP_KERNEL);
+ dbg_printk("\n");
+ }
+ *secure_exec = true;
+ }
+
+audit:
+ return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
+ NULL, onexec, cond->uid, info, error);
+}
+
+/* ensure none ns domain transitions are correctly applied with onexec */
+
+static struct aa_label *handle_onexec(struct aa_label *label,
+ struct aa_label *onexec, bool stack,
+ const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *unsafe)
+{
+ struct aa_profile *profile;
+ struct aa_label *new;
+ int error;
+
+ AA_BUG(!label);
+ AA_BUG(!onexec);
+ AA_BUG(!bprm);
+ AA_BUG(!buffer);
+
+ if (!stack) {
+ error = fn_for_each_in_ns(label, profile,
+ profile_onexec(profile, onexec, stack,
+ bprm, buffer, cond, unsafe));
+ if (error)
+ return ERR_PTR(error);
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_get_newest_label(onexec),
+ profile_transition(profile, bprm, buffer,
+ cond, unsafe));
+
+ } else {
+ /* TODO: determine how much we want to loosen this */
+ error = fn_for_each_in_ns(label, profile,
+ profile_onexec(profile, onexec, stack, bprm,
+ buffer, cond, unsafe));
+ if (error)
+ return ERR_PTR(error);
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_label_merge(&profile->label, onexec,
+ GFP_KERNEL),
+ profile_transition(profile, bprm, buffer,
+ cond, unsafe));
+ }
+
+ if (new)
+ return new;
+
+ /* TODO: get rid of GLOBAL_ROOT_UID */
+ error = fn_for_each_in_ns(label, profile,
+ aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
+ AA_MAY_ONEXEC, bprm->filename, NULL,
+ onexec, GLOBAL_ROOT_UID,
+ "failed to build target label", -ENOMEM));
+ return ERR_PTR(error);
+}
+
+/**
+ * apparmor_bprm_creds_for_exec - Update the new creds on the bprm struct
+ * @bprm: binprm for the exec (NOT NULL)
+ *
+ * Returns: %0 or error on failure
+ *
+ * TODO: once the other paths are done see if we can't refactor into a fn
+ */
+int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ struct aa_task_ctx *ctx;
+ struct aa_label *label, *new = NULL;
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ const char *info = NULL;
+ int error = 0;
+ bool unsafe = false;
+ kuid_t i_uid = i_uid_into_mnt(file_mnt_user_ns(bprm->file),
+ file_inode(bprm->file));
+ struct path_cond cond = {
+ i_uid,
+ file_inode(bprm->file)->i_mode
+ };
+
+ ctx = task_ctx(current);
+ AA_BUG(!cred_label(bprm->cred));
+ AA_BUG(!ctx);
+
+ label = aa_get_newest_label(cred_label(bprm->cred));
+
+ /*
+ * Detect no new privs being set, and store the label it
+ * occurred under. Ideally this would happen when nnp
+ * is set but there isn't a good way to do that yet.
+ *
+ * Testing for unconfined must be done before the subset test
+ */
+ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && !unconfined(label) &&
+ !ctx->nnp)
+ ctx->nnp = aa_get_label(label);
+
+ /* buffer freed below, name is pointer into buffer */
+ buffer = aa_get_buffer(false);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto done;
+ }
+
+ /* Test for onexec first as onexec override other x transitions. */
+ if (ctx->onexec)
+ new = handle_onexec(label, ctx->onexec, ctx->token,
+ bprm, buffer, &cond, &unsafe);
+ else
+ new = fn_label_build(label, profile, GFP_KERNEL,
+ profile_transition(profile, bprm, buffer,
+ &cond, &unsafe));
+
+ AA_BUG(!new);
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
+ goto done;
+ } else if (!new) {
+ error = -ENOMEM;
+ goto done;
+ }
+
+ /* Policy has specified a domain transitions. If no_new_privs and
+ * confined ensure the transition is to confinement that is subset
+ * of the confinement when the task entered no new privs.
+ *
+ * NOTE: Domain transitions from unconfined and to stacked
+ * subsets are allowed even when no_new_privs is set because this
+ * aways results in a further reduction of permissions.
+ */
+ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
+ !unconfined(label) &&
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
+ error = -EPERM;
+ info = "no new privs";
+ goto audit;
+ }
+
+ if (bprm->unsafe & LSM_UNSAFE_SHARE) {
+ /* FIXME: currently don't mediate shared state */
+ ;
+ }
+
+ if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
+ /* TODO: test needs to be profile of label to new */
+ error = may_change_ptraced_domain(new, &info);
+ if (error)
+ goto audit;
+ }
+
+ if (unsafe) {
+ if (DEBUG_ON) {
+ dbg_printk("scrubbing environment variables for %s "
+ "label=", bprm->filename);
+ aa_label_printk(new, GFP_KERNEL);
+ dbg_printk("\n");
+ }
+ bprm->secureexec = 1;
+ }
+
+ if (label->proxy != new->proxy) {
+ /* when transitioning clear unsafe personality bits */
+ if (DEBUG_ON) {
+ dbg_printk("apparmor: clearing unsafe personality "
+ "bits. %s label=", bprm->filename);
+ aa_label_printk(new, GFP_KERNEL);
+ dbg_printk("\n");
+ }
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+ }
+ aa_put_label(cred_label(bprm->cred));
+ /* transfer reference, released when cred is freed */
+ set_cred_label(bprm->cred, new);
+
+done:
+ aa_put_label(label);
+ aa_put_buffer(buffer);
+
+ return error;
+
+audit:
+ error = fn_for_each(label, profile,
+ aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
+ bprm->filename, NULL, new,
+ i_uid, info, error));
+ aa_put_label(new);
+ goto done;
+}
+
+/*
+ * Functions for self directed profile change
+ */
+
+
+/* helper fn for change_hat
+ *
+ * Returns: label for hat transition OR ERR_PTR. Does NOT return NULL
+ */
+static struct aa_label *build_change_hat(struct aa_profile *profile,
+ const char *name, bool sibling)
+{
+ struct aa_profile *root, *hat = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ if (sibling && PROFILE_IS_HAT(profile)) {
+ root = aa_get_profile_rcu(&profile->parent);
+ } else if (!sibling && !PROFILE_IS_HAT(profile)) {
+ root = aa_get_profile(profile);
+ } else {
+ info = "conflicting target types";
+ error = -EPERM;
+ goto audit;
+ }
+
+ hat = aa_find_child(root, name);
+ if (!hat) {
+ error = -ENOENT;
+ if (COMPLAIN_MODE(profile)) {
+ hat = aa_new_null_profile(profile, true, name,
+ GFP_KERNEL);
+ if (!hat) {
+ info = "failed null profile create";
+ error = -ENOMEM;
+ }
+ }
+ }
+ aa_put_profile(root);
+
+audit:
+ aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
+ name, hat ? hat->base.hname : NULL,
+ hat ? &hat->label : NULL, GLOBAL_ROOT_UID, info,
+ error);
+ if (!hat || (error && error != -ENOENT))
+ return ERR_PTR(error);
+ /* if hat && error - complain mode, already audited and we adjust for
+ * complain mode allow by returning hat->label
+ */
+ return &hat->label;
+}
+
+/* helper fn for changing into a hat
+ *
+ * Returns: label for hat transition or ERR_PTR. Does not return NULL
+ */
+static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
+ int count, int flags)
+{
+ struct aa_profile *profile, *root, *hat = NULL;
+ struct aa_label *new;
+ struct label_it it;
+ bool sibling = false;
+ const char *name, *info = NULL;
+ int i, error;
+
+ AA_BUG(!label);
+ AA_BUG(!hats);
+ AA_BUG(count < 1);
+
+ if (PROFILE_IS_HAT(labels_profile(label)))
+ sibling = true;
+
+ /*find first matching hat */
+ for (i = 0; i < count && !hat; i++) {
+ name = hats[i];
+ label_for_each_in_ns(it, labels_ns(label), label, profile) {
+ if (sibling && PROFILE_IS_HAT(profile)) {
+ root = aa_get_profile_rcu(&profile->parent);
+ } else if (!sibling && !PROFILE_IS_HAT(profile)) {
+ root = aa_get_profile(profile);
+ } else { /* conflicting change type */
+ info = "conflicting targets types";
+ error = -EPERM;
+ goto fail;
+ }
+ hat = aa_find_child(root, name);
+ aa_put_profile(root);
+ if (!hat) {
+ if (!COMPLAIN_MODE(profile))
+ goto outer_continue;
+ /* complain mode succeed as if hat */
+ } else if (!PROFILE_IS_HAT(hat)) {
+ info = "target not hat";
+ error = -EPERM;
+ aa_put_profile(hat);
+ goto fail;
+ }
+ aa_put_profile(hat);
+ }
+ /* found a hat for all profiles in ns */
+ goto build;
+outer_continue:
+ ;
+ }
+ /* no hats that match, find appropriate error
+ *
+ * In complain mode audit of the failure is based off of the first
+ * hat supplied. This is done due how userspace interacts with
+ * change_hat.
+ */
+ name = NULL;
+ label_for_each_in_ns(it, labels_ns(label), label, profile) {
+ if (!list_empty(&profile->base.profiles)) {
+ info = "hat not found";
+ error = -ENOENT;
+ goto fail;
+ }
+ }
+ info = "no hats defined";
+ error = -ECHILD;
+
+fail:
+ label_for_each_in_ns(it, labels_ns(label), label, profile) {
+ /*
+ * no target as it has failed to be found or built
+ *
+ * change_hat uses probing and should not log failures
+ * related to missing hats
+ */
+ /* TODO: get rid of GLOBAL_ROOT_UID */
+ if (count > 1 || COMPLAIN_MODE(profile)) {
+ aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT, name, NULL, NULL,
+ GLOBAL_ROOT_UID, info, error);
+ }
+ }
+ return ERR_PTR(error);
+
+build:
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ build_change_hat(profile, name, sibling),
+ aa_get_label(&profile->label));
+ if (!new) {
+ info = "label build failed";
+ error = -ENOMEM;
+ goto fail;
+ } /* else if (IS_ERR) build_change_hat has logged error so return new */
+
+ return new;
+}
+
+/**
+ * aa_change_hat - change hat to/from subprofile
+ * @hats: vector of hat names to try changing into (MAYBE NULL if @count == 0)
+ * @count: number of hat names in @hats
+ * @token: magic value to validate the hat change
+ * @flags: flags affecting behavior of the change
+ *
+ * Returns %0 on success, error otherwise.
+ *
+ * Change to the first profile specified in @hats that exists, and store
+ * the @hat_magic in the current task context. If the count == 0 and the
+ * @token matches that stored in the current task context, return to the
+ * top level profile.
+ *
+ * change_hat only applies to profiles in the current ns, and each profile
+ * in the ns must make the same transition otherwise change_hat will fail.
+ */
+int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+{
+ const struct cred *cred;
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct aa_label *label, *previous, *new = NULL, *target = NULL;
+ struct aa_profile *profile;
+ struct aa_perms perms = {};
+ const char *info = NULL;
+ int error = 0;
+
+ /* released below */
+ cred = get_current_cred();
+ label = aa_get_newest_cred_label(cred);
+ previous = aa_get_newest_label(ctx->previous);
+
+ /*
+ * Detect no new privs being set, and store the label it
+ * occurred under. Ideally this would happen when nnp
+ * is set but there isn't a good way to do that yet.
+ *
+ * Testing for unconfined must be done before the subset test
+ */
+ if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
+ ctx->nnp = aa_get_label(label);
+
+ if (unconfined(label)) {
+ info = "unconfined can not change_hat";
+ error = -EPERM;
+ goto fail;
+ }
+
+ if (count) {
+ new = change_hat(label, hats, count, flags);
+ AA_BUG(!new);
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
+ new = NULL;
+ /* already audited */
+ goto out;
+ }
+
+ error = may_change_ptraced_domain(new, &info);
+ if (error)
+ goto fail;
+
+ /*
+ * no new privs prevents domain transitions that would
+ * reduce restrictions.
+ */
+ if (task_no_new_privs(current) && !unconfined(label) &&
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
+ /* not an apparmor denial per se, so don't log it */
+ AA_DEBUG("no_new_privs - change_hat denied");
+ error = -EPERM;
+ goto out;
+ }
+
+ if (flags & AA_CHANGE_TEST)
+ goto out;
+
+ target = new;
+ error = aa_set_current_hat(new, token);
+ if (error == -EACCES)
+ /* kill task in case of brute force attacks */
+ goto kill;
+ } else if (previous && !(flags & AA_CHANGE_TEST)) {
+ /*
+ * no new privs prevents domain transitions that would
+ * reduce restrictions.
+ */
+ if (task_no_new_privs(current) && !unconfined(label) &&
+ !aa_label_is_unconfined_subset(previous, ctx->nnp)) {
+ /* not an apparmor denial per se, so don't log it */
+ AA_DEBUG("no_new_privs - change_hat denied");
+ error = -EPERM;
+ goto out;
+ }
+
+ /* Return to saved label. Kill task if restore fails
+ * to avoid brute force attacks
+ */
+ target = previous;
+ error = aa_restore_previous_label(token);
+ if (error) {
+ if (error == -EACCES)
+ goto kill;
+ goto fail;
+ }
+ } /* else ignore @flags && restores when there is no saved profile */
+
+out:
+ aa_put_label(new);
+ aa_put_label(previous);
+ aa_put_label(label);
+ put_cred(cred);
+
+ return error;
+
+kill:
+ info = "failed token match";
+ perms.kill = AA_MAY_CHANGEHAT;
+
+fail:
+ fn_for_each_in_ns(label, profile,
+ aa_audit_file(profile, &perms, OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT, NULL, NULL, target,
+ GLOBAL_ROOT_UID, info, error));
+
+ goto out;
+}
+
+
+static int change_profile_perms_wrapper(const char *op, const char *name,
+ struct aa_profile *profile,
+ struct aa_label *target, bool stack,
+ u32 request, struct aa_perms *perms)
+{
+ const char *info = NULL;
+ int error = 0;
+
+ if (!error)
+ error = change_profile_perms(profile, target, stack, request,
+ profile->file.start, perms);
+ if (error)
+ error = aa_audit_file(profile, perms, op, request, name,
+ NULL, target, GLOBAL_ROOT_UID, info,
+ error);
+
+ return error;
+}
+
+/**
+ * aa_change_profile - perform a one-way profile transition
+ * @fqname: name of profile may include namespace (NOT NULL)
+ * @flags: flags affecting change behavior
+ *
+ * Change to new profile @name. Unlike with hats, there is no way
+ * to change back. If @name isn't specified the current profile name is
+ * used.
+ * If @onexec then the transition is delayed until
+ * the next exec.
+ *
+ * Returns %0 on success, error otherwise.
+ */
+int aa_change_profile(const char *fqname, int flags)
+{
+ struct aa_label *label, *new = NULL, *target = NULL;
+ struct aa_profile *profile;
+ struct aa_perms perms = {};
+ const char *info = NULL;
+ const char *auditname = fqname; /* retain leading & if stack */
+ bool stack = flags & AA_CHANGE_STACK;
+ struct aa_task_ctx *ctx = task_ctx(current);
+ int error = 0;
+ char *op;
+ u32 request;
+
+ label = aa_get_current_label();
+
+ /*
+ * Detect no new privs being set, and store the label it
+ * occurred under. Ideally this would happen when nnp
+ * is set but there isn't a good way to do that yet.
+ *
+ * Testing for unconfined must be done before the subset test
+ */
+ if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
+ ctx->nnp = aa_get_label(label);
+
+ if (!fqname || !*fqname) {
+ aa_put_label(label);
+ AA_DEBUG("no profile name");
+ return -EINVAL;
+ }
+
+ if (flags & AA_CHANGE_ONEXEC) {
+ request = AA_MAY_ONEXEC;
+ if (stack)
+ op = OP_STACK_ONEXEC;
+ else
+ op = OP_CHANGE_ONEXEC;
+ } else {
+ request = AA_MAY_CHANGE_PROFILE;
+ if (stack)
+ op = OP_STACK;
+ else
+ op = OP_CHANGE_PROFILE;
+ }
+
+ if (*fqname == '&') {
+ stack = true;
+ /* don't have label_parse() do stacking */
+ fqname++;
+ }
+ target = aa_label_parse(label, fqname, GFP_KERNEL, true, false);
+ if (IS_ERR(target)) {
+ struct aa_profile *tprofile;
+
+ info = "label not found";
+ error = PTR_ERR(target);
+ target = NULL;
+ /*
+ * TODO: fixme using labels_profile is not right - do profile
+ * per complain profile
+ */
+ if ((flags & AA_CHANGE_TEST) ||
+ !COMPLAIN_MODE(labels_profile(label)))
+ goto audit;
+ /* released below */
+ tprofile = aa_new_null_profile(labels_profile(label), false,
+ fqname, GFP_KERNEL);
+ if (!tprofile) {
+ info = "failed null profile create";
+ error = -ENOMEM;
+ goto audit;
+ }
+ target = &tprofile->label;
+ goto check;
+ }
+
+ /*
+ * self directed transitions only apply to current policy ns
+ * TODO: currently requiring perms for stacking and straight change
+ * stacking doesn't strictly need this. Determine how much
+ * we want to loosen this restriction for stacking
+ *
+ * if (!stack) {
+ */
+ error = fn_for_each_in_ns(label, profile,
+ change_profile_perms_wrapper(op, auditname,
+ profile, target, stack,
+ request, &perms));
+ if (error)
+ /* auditing done in change_profile_perms_wrapper */
+ goto out;
+
+ /* } */
+
+check:
+ /* check if tracing task is allowed to trace target domain */
+ error = may_change_ptraced_domain(target, &info);
+ if (error && !fn_for_each_in_ns(label, profile,
+ COMPLAIN_MODE(profile)))
+ goto audit;
+
+ /* TODO: add permission check to allow this
+ * if ((flags & AA_CHANGE_ONEXEC) && !current_is_single_threaded()) {
+ * info = "not a single threaded task";
+ * error = -EACCES;
+ * goto audit;
+ * }
+ */
+ if (flags & AA_CHANGE_TEST)
+ goto out;
+
+ /* stacking is always a subset, so only check the nonstack case */
+ if (!stack) {
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_get_label(target),
+ aa_get_label(&profile->label));
+ /*
+ * no new privs prevents domain transitions that would
+ * reduce restrictions.
+ */
+ if (task_no_new_privs(current) && !unconfined(label) &&
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
+ /* not an apparmor denial per se, so don't log it */
+ AA_DEBUG("no_new_privs - change_hat denied");
+ error = -EPERM;
+ goto out;
+ }
+ }
+
+ if (!(flags & AA_CHANGE_ONEXEC)) {
+ /* only transition profiles in the current ns */
+ if (stack)
+ new = aa_label_merge(label, target, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(new)) {
+ info = "failed to build target label";
+ if (!new)
+ error = -ENOMEM;
+ else
+ error = PTR_ERR(new);
+ new = NULL;
+ perms.allow = 0;
+ goto audit;
+ }
+ error = aa_replace_current_label(new);
+ } else {
+ if (new) {
+ aa_put_label(new);
+ new = NULL;
+ }
+
+ /* full transition will be built in exec path */
+ error = aa_set_current_onexec(target, stack);
+ }
+
+audit:
+ error = fn_for_each_in_ns(label, profile,
+ aa_audit_file(profile, &perms, op, request, auditname,
+ NULL, new ? new : target,
+ GLOBAL_ROOT_UID, info, error));
+
+out:
+ aa_put_label(new);
+ aa_put_label(target);
+ aa_put_label(label);
+
+ return error;
+}
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
new file mode 100644
index 000000000..e1b7e9360
--- /dev/null
+++ b/security/apparmor/file.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor mediation of files
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/tty.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/file.h"
+#include "include/match.h"
+#include "include/net.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/label.h"
+
+static u32 map_mask_to_chr_mask(u32 mask)
+{
+ u32 m = mask & PERMS_CHRS_MASK;
+
+ if (mask & AA_MAY_GETATTR)
+ m |= MAY_READ;
+ if (mask & (AA_MAY_SETATTR | AA_MAY_CHMOD | AA_MAY_CHOWN))
+ m |= MAY_WRITE;
+
+ return m;
+}
+
+/**
+ * file_audit_cb - call back for file specific audit fields
+ * @ab: audit_buffer (NOT NULL)
+ * @va: audit struct to audit values of (NOT NULL)
+ */
+static void file_audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+ kuid_t fsuid = current_fsuid();
+ char str[10];
+
+ if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
+ aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
+ map_mask_to_chr_mask(aad(sa)->request));
+ audit_log_format(ab, " requested_mask=\"%s\"", str);
+ }
+ if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
+ aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
+ map_mask_to_chr_mask(aad(sa)->denied));
+ audit_log_format(ab, " denied_mask=\"%s\"", str);
+ }
+ if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " fsuid=%d",
+ from_kuid(&init_user_ns, fsuid));
+ audit_log_format(ab, " ouid=%d",
+ from_kuid(&init_user_ns, aad(sa)->fs.ouid));
+ }
+
+ if (aad(sa)->peer) {
+ audit_log_format(ab, " target=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAG_VIEW_SUBNS, GFP_KERNEL);
+ } else if (aad(sa)->fs.target) {
+ audit_log_format(ab, " target=");
+ audit_log_untrustedstring(ab, aad(sa)->fs.target);
+ }
+}
+
+/**
+ * aa_audit_file - handle the auditing of file operations
+ * @profile: the profile being enforced (NOT NULL)
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @op: operation being mediated
+ * @request: permissions requested
+ * @name: name of object being mediated (MAYBE NULL)
+ * @target: name of target (MAYBE NULL)
+ * @tlabel: target label (MAY BE NULL)
+ * @ouid: object uid
+ * @info: extra information message (MAYBE NULL)
+ * @error: 0 if operation allowed else failure error code
+ *
+ * Returns: %0 or error on failure
+ */
+int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
+ const char *op, u32 request, const char *name,
+ const char *target, struct aa_label *tlabel,
+ kuid_t ouid, const char *info, int error)
+{
+ int type = AUDIT_APPARMOR_AUTO;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, op);
+
+ sa.u.tsk = NULL;
+ aad(&sa)->request = request;
+ aad(&sa)->name = name;
+ aad(&sa)->fs.target = target;
+ aad(&sa)->peer = tlabel;
+ aad(&sa)->fs.ouid = ouid;
+ aad(&sa)->info = info;
+ aad(&sa)->error = error;
+ sa.u.tsk = NULL;
+
+ if (likely(!aad(&sa)->error)) {
+ u32 mask = perms->audit;
+
+ if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+ mask = 0xffff;
+
+ /* mask off perms that are not being force audited */
+ aad(&sa)->request &= mask;
+
+ if (likely(!aad(&sa)->request))
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else {
+ /* only report permissions that were denied */
+ aad(&sa)->request = aad(&sa)->request & ~perms->allow;
+ AA_BUG(!aad(&sa)->request);
+
+ if (aad(&sa)->request & perms->kill)
+ type = AUDIT_APPARMOR_KILL;
+
+ /* quiet known rejects, assumes quiet and kill do not overlap */
+ if ((aad(&sa)->request & perms->quiet) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL)
+ aad(&sa)->request &= ~perms->quiet;
+
+ if (!aad(&sa)->request)
+ return aad(&sa)->error;
+ }
+
+ aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
+ return aa_audit(type, profile, &sa, file_audit_cb);
+}
+
+/**
+ * is_deleted - test if a file has been completely unlinked
+ * @dentry: dentry of file to test for deletion (NOT NULL)
+ *
+ * Returns: true if deleted else false
+ */
+static inline bool is_deleted(struct dentry *dentry)
+{
+ if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
+ return true;
+ return false;
+}
+
+static int path_name(const char *op, struct aa_label *label,
+ const struct path *path, int flags, char *buffer,
+ const char **name, struct path_cond *cond, u32 request)
+{
+ struct aa_profile *profile;
+ const char *info = NULL;
+ int error;
+
+ error = aa_path_name(path, flags, buffer, name, &info,
+ labels_profile(label)->disconnected);
+ if (error) {
+ fn_for_each_confined(label, profile,
+ aa_audit_file(profile, &nullperms, op, request, *name,
+ NULL, NULL, cond->uid, info, error));
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * map_old_perms - map old file perms layout to the new layout
+ * @old: permission set in old mapping
+ *
+ * Returns: new permission mapping
+ */
+static u32 map_old_perms(u32 old)
+{
+ u32 new = old & 0xf;
+ if (old & MAY_READ)
+ new |= AA_MAY_GETATTR | AA_MAY_OPEN;
+ if (old & MAY_WRITE)
+ new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
+ AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
+ if (old & 0x10)
+ new |= AA_MAY_LINK;
+ /* the old mapping lock and link_subset flags where overlaid
+ * and use was determined by part of a pair that they were in
+ */
+ if (old & 0x20)
+ new |= AA_MAY_LOCK | AA_LINK_SUBSET;
+ if (old & 0x40) /* AA_EXEC_MMAP */
+ new |= AA_EXEC_MMAP;
+
+ return new;
+}
+
+/**
+ * aa_compute_fperms - convert dfa compressed perms to internal perms
+ * @dfa: dfa to compute perms for (NOT NULL)
+ * @state: state in dfa
+ * @cond: conditions to consider (NOT NULL)
+ *
+ * TODO: convert from dfa + state to permission entry, do computation conversion
+ * at load time.
+ *
+ * Returns: computed permission set
+ */
+struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
+ struct path_cond *cond)
+{
+ /* FIXME: change over to new dfa format
+ * currently file perms are encoded in the dfa, new format
+ * splits the permissions from the dfa. This mapping can be
+ * done at profile load
+ */
+ struct aa_perms perms = { };
+
+ if (uid_eq(current_fsuid(), cond->uid)) {
+ perms.allow = map_old_perms(dfa_user_allow(dfa, state));
+ perms.audit = map_old_perms(dfa_user_audit(dfa, state));
+ perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
+ perms.xindex = dfa_user_xindex(dfa, state);
+ } else {
+ perms.allow = map_old_perms(dfa_other_allow(dfa, state));
+ perms.audit = map_old_perms(dfa_other_audit(dfa, state));
+ perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
+ perms.xindex = dfa_other_xindex(dfa, state);
+ }
+ perms.allow |= AA_MAY_GETATTR;
+
+ /* change_profile wasn't determined by ownership in old mapping */
+ if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
+ perms.allow |= AA_MAY_CHANGE_PROFILE;
+ if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
+ perms.allow |= AA_MAY_ONEXEC;
+
+ return perms;
+}
+
+/**
+ * aa_str_perms - find permission that match @name
+ * @dfa: to match against (MAYBE NULL)
+ * @state: state to start matching in
+ * @name: string to match against dfa (NOT NULL)
+ * @cond: conditions to consider for permission set computation (NOT NULL)
+ * @perms: Returns - the permissions found when matching @name
+ *
+ * Returns: the final state in @dfa when beginning @start and walking @name
+ */
+unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
+ const char *name, struct path_cond *cond,
+ struct aa_perms *perms)
+{
+ unsigned int state;
+ state = aa_dfa_match(dfa, start, name);
+ *perms = aa_compute_fperms(dfa, state, cond);
+
+ return state;
+}
+
+int __aa_path_perm(const char *op, struct aa_profile *profile, const char *name,
+ u32 request, struct path_cond *cond, int flags,
+ struct aa_perms *perms)
+{
+ int e = 0;
+
+ if (profile_unconfined(profile))
+ return 0;
+ aa_str_perms(profile->file.dfa, profile->file.start, name, cond, perms);
+ if (request & ~perms->allow)
+ e = -EACCES;
+ return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
+ cond->uid, NULL, e);
+}
+
+
+static int profile_path_perm(const char *op, struct aa_profile *profile,
+ const struct path *path, char *buffer, u32 request,
+ struct path_cond *cond, int flags,
+ struct aa_perms *perms)
+{
+ const char *name;
+ int error;
+
+ if (profile_unconfined(profile))
+ return 0;
+
+ error = path_name(op, &profile->label, path,
+ flags | profile->path_flags, buffer, &name, cond,
+ request);
+ if (error)
+ return error;
+ return __aa_path_perm(op, profile, name, request, cond, flags,
+ perms);
+}
+
+/**
+ * aa_path_perm - do permissions check & audit for @path
+ * @op: operation being checked
+ * @label: profile being enforced (NOT NULL)
+ * @path: path to check permissions of (NOT NULL)
+ * @flags: any additional path flags beyond what the profile specifies
+ * @request: requested permissions
+ * @cond: conditional info for this request (NOT NULL)
+ *
+ * Returns: %0 else error if access denied or other error
+ */
+int aa_path_perm(const char *op, struct aa_label *label,
+ const struct path *path, int flags, u32 request,
+ struct path_cond *cond)
+{
+ struct aa_perms perms = {};
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ int error;
+
+ flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
+ 0);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+ profile_path_perm(op, profile, path, buffer, request,
+ cond, flags, &perms));
+
+ aa_put_buffer(buffer);
+
+ return error;
+}
+
+/**
+ * xindex_is_subset - helper for aa_path_link
+ * @link: link permission set
+ * @target: target permission set
+ *
+ * test target x permissions are equal OR a subset of link x permissions
+ * this is done as part of the subset test, where a hardlink must have
+ * a subset of permissions that the target has.
+ *
+ * Returns: true if subset else false
+ */
+static inline bool xindex_is_subset(u32 link, u32 target)
+{
+ if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
+ ((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
+ return false;
+
+ return true;
+}
+
+static int profile_path_link(struct aa_profile *profile,
+ const struct path *link, char *buffer,
+ const struct path *target, char *buffer2,
+ struct path_cond *cond)
+{
+ const char *lname, *tname = NULL;
+ struct aa_perms lperms = {}, perms;
+ const char *info = NULL;
+ u32 request = AA_MAY_LINK;
+ unsigned int state;
+ int error;
+
+ error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
+ buffer, &lname, cond, AA_MAY_LINK);
+ if (error)
+ goto audit;
+
+ /* buffer2 freed below, tname is pointer in buffer2 */
+ error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
+ buffer2, &tname, cond, AA_MAY_LINK);
+ if (error)
+ goto audit;
+
+ error = -EACCES;
+ /* aa_str_perms - handles the case of the dfa being NULL */
+ state = aa_str_perms(profile->file.dfa, profile->file.start, lname,
+ cond, &lperms);
+
+ if (!(lperms.allow & AA_MAY_LINK))
+ goto audit;
+
+ /* test to see if target can be paired with link */
+ state = aa_dfa_null_transition(profile->file.dfa, state);
+ aa_str_perms(profile->file.dfa, state, tname, cond, &perms);
+
+ /* force audit/quiet masks for link are stored in the second entry
+ * in the link pair.
+ */
+ lperms.audit = perms.audit;
+ lperms.quiet = perms.quiet;
+ lperms.kill = perms.kill;
+
+ if (!(perms.allow & AA_MAY_LINK)) {
+ info = "target restricted";
+ lperms = perms;
+ goto audit;
+ }
+
+ /* done if link subset test is not required */
+ if (!(perms.allow & AA_LINK_SUBSET))
+ goto done_tests;
+
+ /* Do link perm subset test requiring allowed permission on link are
+ * a subset of the allowed permissions on target.
+ */
+ aa_str_perms(profile->file.dfa, profile->file.start, tname, cond,
+ &perms);
+
+ /* AA_MAY_LINK is not considered in the subset test */
+ request = lperms.allow & ~AA_MAY_LINK;
+ lperms.allow &= perms.allow | AA_MAY_LINK;
+
+ request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow);
+ if (request & ~lperms.allow) {
+ goto audit;
+ } else if ((lperms.allow & MAY_EXEC) &&
+ !xindex_is_subset(lperms.xindex, perms.xindex)) {
+ lperms.allow &= ~MAY_EXEC;
+ request |= MAY_EXEC;
+ info = "link not subset of target";
+ goto audit;
+ }
+
+done_tests:
+ error = 0;
+
+audit:
+ return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
+ NULL, cond->uid, info, error);
+}
+
+/**
+ * aa_path_link - Handle hard link permission check
+ * @label: the label being enforced (NOT NULL)
+ * @old_dentry: the target dentry (NOT NULL)
+ * @new_dir: directory the new link will be created in (NOT NULL)
+ * @new_dentry: the link being created (NOT NULL)
+ *
+ * Handle the permission test for a link & target pair. Permission
+ * is encoded as a pair where the link permission is determined
+ * first, and if allowed, the target is tested. The target test
+ * is done from the point of the link match (not start of DFA)
+ * making the target permission dependent on the link permission match.
+ *
+ * The subset test if required forces that permissions granted
+ * on link are a subset of the permission granted to target.
+ *
+ * Returns: %0 if allowed else error
+ */
+int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+{
+ struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+ struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
+ struct path_cond cond = {
+ d_backing_inode(old_dentry)->i_uid,
+ d_backing_inode(old_dentry)->i_mode
+ };
+ char *buffer = NULL, *buffer2 = NULL;
+ struct aa_profile *profile;
+ int error;
+
+ /* buffer freed below, lname is pointer in buffer */
+ buffer = aa_get_buffer(false);
+ buffer2 = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !buffer2)
+ goto out;
+
+ error = fn_for_each_confined(label, profile,
+ profile_path_link(profile, &link, buffer, &target,
+ buffer2, &cond));
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(buffer2);
+ return error;
+}
+
+static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
+ u32 request)
+{
+ struct aa_label *l, *old;
+
+ /* update caching of label on file_ctx */
+ spin_lock(&fctx->lock);
+ old = rcu_dereference_protected(fctx->label,
+ lockdep_is_held(&fctx->lock));
+ l = aa_label_merge(old, label, GFP_ATOMIC);
+ if (l) {
+ if (l != old) {
+ rcu_assign_pointer(fctx->label, l);
+ aa_put_label(old);
+ } else
+ aa_put_label(l);
+ fctx->allow |= request;
+ }
+ spin_unlock(&fctx->lock);
+}
+
+static int __file_path_perm(const char *op, struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied, bool in_atomic)
+{
+ struct aa_profile *profile;
+ struct aa_perms perms = {};
+ struct path_cond cond = {
+ .uid = i_uid_into_mnt(file_mnt_user_ns(file), file_inode(file)),
+ .mode = file_inode(file)->i_mode
+ };
+ char *buffer;
+ int flags, error;
+
+ /* revalidation due to label out of date. No revocation at this time */
+ if (!denied && aa_label_is_subset(flabel, label))
+ /* TODO: check for revocation on stale profiles */
+ return 0;
+
+ flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
+ buffer = aa_get_buffer(in_atomic);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* check every profile in task label not in current cache */
+ error = fn_for_each_not_in_set(flabel, label, profile,
+ profile_path_perm(op, profile, &file->f_path, buffer,
+ request, &cond, flags, &perms));
+ if (denied && !error) {
+ /*
+ * check every profile in file label that was not tested
+ * in the initial check above.
+ *
+ * TODO: cache full perms so this only happens because of
+ * conditionals
+ * TODO: don't audit here
+ */
+ if (label == flabel)
+ error = fn_for_each(label, profile,
+ profile_path_perm(op, profile, &file->f_path,
+ buffer, request, &cond, flags,
+ &perms));
+ else
+ error = fn_for_each_not_in_set(label, flabel, profile,
+ profile_path_perm(op, profile, &file->f_path,
+ buffer, request, &cond, flags,
+ &perms));
+ }
+ if (!error)
+ update_file_ctx(file_ctx(file), label, request);
+
+ aa_put_buffer(buffer);
+
+ return error;
+}
+
+static int __file_sock_perm(const char *op, struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied)
+{
+ struct socket *sock = (struct socket *) file->private_data;
+ int error;
+
+ AA_BUG(!sock);
+
+ /* revalidation due to label out of date. No revocation at this time */
+ if (!denied && aa_label_is_subset(flabel, label))
+ return 0;
+
+ /* TODO: improve to skip profiles cached in flabel */
+ error = aa_sock_file_perm(label, op, request, sock);
+ if (denied) {
+ /* TODO: improve to skip profiles checked above */
+ /* check every profile in file label to is cached */
+ last_error(error, aa_sock_file_perm(flabel, op, request, sock));
+ }
+ if (!error)
+ update_file_ctx(file_ctx(file), label, request);
+
+ return error;
+}
+
+/**
+ * aa_file_perm - do permission revalidation check & audit for @file
+ * @op: operation being checked
+ * @label: label being enforced (NOT NULL)
+ * @file: file to revalidate access permissions on (NOT NULL)
+ * @request: requested permissions
+ * @in_atomic: whether allocations need to be done in atomic context
+ *
+ * Returns: %0 if access allowed else error
+ */
+int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
+ u32 request, bool in_atomic)
+{
+ struct aa_file_ctx *fctx;
+ struct aa_label *flabel;
+ u32 denied;
+ int error = 0;
+
+ AA_BUG(!label);
+ AA_BUG(!file);
+
+ fctx = file_ctx(file);
+
+ rcu_read_lock();
+ flabel = rcu_dereference(fctx->label);
+ AA_BUG(!flabel);
+
+ /* revalidate access, if task is unconfined, or the cached cred
+ * doesn't match or if the request is for more permissions than
+ * was granted.
+ *
+ * Note: the test for !unconfined(flabel) is to handle file
+ * delegation from unconfined tasks
+ */
+ denied = request & ~fctx->allow;
+ if (unconfined(label) || unconfined(flabel) ||
+ (!denied && aa_label_is_subset(flabel, label))) {
+ rcu_read_unlock();
+ goto done;
+ }
+
+ flabel = aa_get_newest_label(flabel);
+ rcu_read_unlock();
+ /* TODO: label cross check */
+
+ if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
+ error = __file_path_perm(op, label, flabel, file, request,
+ denied, in_atomic);
+
+ else if (S_ISSOCK(file_inode(file)->i_mode))
+ error = __file_sock_perm(op, label, flabel, file, request,
+ denied);
+ aa_put_label(flabel);
+
+done:
+ return error;
+}
+
+static void revalidate_tty(struct aa_label *label)
+{
+ struct tty_struct *tty;
+ int drop_tty = 0;
+
+ tty = get_current_tty();
+ if (!tty)
+ return;
+
+ spin_lock(&tty->files_lock);
+ if (!list_empty(&tty->tty_files)) {
+ struct tty_file_private *file_priv;
+ struct file *file;
+ /* TODO: Revalidate access to controlling tty. */
+ file_priv = list_first_entry(&tty->tty_files,
+ struct tty_file_private, list);
+ file = file_priv->file;
+
+ if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
+ IN_ATOMIC))
+ drop_tty = 1;
+ }
+ spin_unlock(&tty->files_lock);
+ tty_kref_put(tty);
+
+ if (drop_tty)
+ no_tty();
+}
+
+static int match_file(const void *p, struct file *file, unsigned int fd)
+{
+ struct aa_label *label = (struct aa_label *)p;
+
+ if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
+ IN_ATOMIC))
+ return fd + 1;
+ return 0;
+}
+
+
+/* based on selinux's flush_unauthorized_files */
+void aa_inherit_files(const struct cred *cred, struct files_struct *files)
+{
+ struct aa_label *label = aa_get_newest_cred_label(cred);
+ struct file *devnull = NULL;
+ unsigned int n;
+
+ revalidate_tty(label);
+
+ /* Revalidate access to inherited open files. */
+ n = iterate_fd(files, 0, match_file, label);
+ if (!n) /* none found? */
+ goto out;
+
+ devnull = dentry_open(&aa_null, O_RDWR, cred);
+ if (IS_ERR(devnull))
+ devnull = NULL;
+ /* replace all the matching ones with this */
+ do {
+ replace_fd(n - 1, devnull, 0);
+ } while ((n = iterate_fd(files, n, match_file, label)) != 0);
+ if (devnull)
+ fput(devnull);
+out:
+ aa_put_label(label);
+}
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
new file mode 100644
index 000000000..9c3fc36a0
--- /dev/null
+++ b/security/apparmor/include/apparmor.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic global
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#ifndef __APPARMOR_H
+#define __APPARMOR_H
+
+#include <linux/types.h>
+
+/*
+ * Class of mediation types in the AppArmor policy db
+ */
+#define AA_CLASS_ENTRY 0
+#define AA_CLASS_UNKNOWN 1
+#define AA_CLASS_FILE 2
+#define AA_CLASS_CAP 3
+#define AA_CLASS_DEPRECATED 4
+#define AA_CLASS_RLIMITS 5
+#define AA_CLASS_DOMAIN 6
+#define AA_CLASS_MOUNT 7
+#define AA_CLASS_PTRACE 9
+#define AA_CLASS_SIGNAL 10
+#define AA_CLASS_NET 14
+#define AA_CLASS_LABEL 16
+
+#define AA_CLASS_LAST AA_CLASS_LABEL
+
+/* Control parameters settable through module/boot flags */
+extern enum audit_mode aa_g_audit;
+extern bool aa_g_audit_header;
+extern bool aa_g_debug;
+extern bool aa_g_hash_policy;
+extern bool aa_g_export_binary;
+extern int aa_g_rawdata_compression_level;
+extern bool aa_g_lock_policy;
+extern bool aa_g_logsyscall;
+extern bool aa_g_paranoid_load;
+extern unsigned int aa_g_path_max;
+
+#endif /* __APPARMOR_H */
diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
new file mode 100644
index 000000000..1e94904f6
--- /dev/null
+++ b/security/apparmor/include/apparmorfs.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor filesystem definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_APPARMORFS_H
+#define __AA_APPARMORFS_H
+
+extern struct path aa_null;
+
+enum aa_sfs_type {
+ AA_SFS_TYPE_BOOLEAN,
+ AA_SFS_TYPE_STRING,
+ AA_SFS_TYPE_U64,
+ AA_SFS_TYPE_FOPS,
+ AA_SFS_TYPE_DIR,
+};
+
+struct aa_sfs_entry;
+
+struct aa_sfs_entry {
+ const char *name;
+ struct dentry *dentry;
+ umode_t mode;
+ enum aa_sfs_type v_type;
+ union {
+ bool boolean;
+ char *string;
+ unsigned long u64;
+ struct aa_sfs_entry *files;
+ } v;
+ const struct file_operations *file_ops;
+};
+
+extern const struct file_operations aa_sfs_seq_file_ops;
+
+#define AA_SFS_FILE_BOOLEAN(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_SFS_TYPE_BOOLEAN, .v.boolean = (_value), \
+ .file_ops = &aa_sfs_seq_file_ops }
+#define AA_SFS_FILE_STRING(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_SFS_TYPE_STRING, .v.string = (_value), \
+ .file_ops = &aa_sfs_seq_file_ops }
+#define AA_SFS_FILE_U64(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_SFS_TYPE_U64, .v.u64 = (_value), \
+ .file_ops = &aa_sfs_seq_file_ops }
+#define AA_SFS_FILE_FOPS(_name, _mode, _fops) \
+ { .name = (_name), .v_type = AA_SFS_TYPE_FOPS, \
+ .mode = (_mode), .file_ops = (_fops) }
+#define AA_SFS_DIR(_name, _value) \
+ { .name = (_name), .v_type = AA_SFS_TYPE_DIR, .v.files = (_value) }
+
+extern void __init aa_destroy_aafs(void);
+
+struct aa_profile;
+struct aa_ns;
+
+enum aafs_ns_type {
+ AAFS_NS_DIR,
+ AAFS_NS_PROFS,
+ AAFS_NS_NS,
+ AAFS_NS_RAW_DATA,
+ AAFS_NS_LOAD,
+ AAFS_NS_REPLACE,
+ AAFS_NS_REMOVE,
+ AAFS_NS_REVISION,
+ AAFS_NS_COUNT,
+ AAFS_NS_MAX_COUNT,
+ AAFS_NS_SIZE,
+ AAFS_NS_MAX_SIZE,
+ AAFS_NS_OWNER,
+ AAFS_NS_SIZEOF,
+};
+
+enum aafs_prof_type {
+ AAFS_PROF_DIR,
+ AAFS_PROF_PROFS,
+ AAFS_PROF_NAME,
+ AAFS_PROF_MODE,
+ AAFS_PROF_ATTACH,
+ AAFS_PROF_HASH,
+ AAFS_PROF_RAW_DATA,
+ AAFS_PROF_RAW_HASH,
+ AAFS_PROF_RAW_ABI,
+ AAFS_PROF_SIZEOF,
+};
+
+#define ns_dir(X) ((X)->dents[AAFS_NS_DIR])
+#define ns_subns_dir(X) ((X)->dents[AAFS_NS_NS])
+#define ns_subprofs_dir(X) ((X)->dents[AAFS_NS_PROFS])
+#define ns_subdata_dir(X) ((X)->dents[AAFS_NS_RAW_DATA])
+#define ns_subload(X) ((X)->dents[AAFS_NS_LOAD])
+#define ns_subreplace(X) ((X)->dents[AAFS_NS_REPLACE])
+#define ns_subremove(X) ((X)->dents[AAFS_NS_REMOVE])
+#define ns_subrevision(X) ((X)->dents[AAFS_NS_REVISION])
+
+#define prof_dir(X) ((X)->dents[AAFS_PROF_DIR])
+#define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS])
+
+void __aa_bump_ns_revision(struct aa_ns *ns);
+void __aafs_profile_rmdir(struct aa_profile *profile);
+void __aafs_profile_migrate_dents(struct aa_profile *old,
+ struct aa_profile *new);
+int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
+void __aafs_ns_rmdir(struct aa_ns *ns);
+int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
+ struct dentry *dent);
+
+struct aa_loaddata;
+
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata);
+int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata);
+#else
+static inline void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata)
+{
+ /* empty stub */
+}
+
+static inline int __aa_fs_create_rawdata(struct aa_ns *ns,
+ struct aa_loaddata *rawdata)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
+
+#endif /* __AA_APPARMORFS_H */
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
new file mode 100644
index 000000000..18519a4eb
--- /dev/null
+++ b/security/apparmor/include/audit.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor auditing function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_AUDIT_H
+#define __AA_AUDIT_H
+
+#include <linux/audit.h>
+#include <linux/fs.h>
+#include <linux/lsm_audit.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "file.h"
+#include "label.h"
+
+extern const char *const audit_mode_names[];
+#define AUDIT_MAX_INDEX 5
+enum audit_mode {
+ AUDIT_NORMAL, /* follow normal auditing of accesses */
+ AUDIT_QUIET_DENIED, /* quiet all denied access messages */
+ AUDIT_QUIET, /* quiet all messages */
+ AUDIT_NOQUIET, /* do not quiet audit messages */
+ AUDIT_ALL /* audit all accesses */
+};
+
+enum audit_type {
+ AUDIT_APPARMOR_AUDIT,
+ AUDIT_APPARMOR_ALLOWED,
+ AUDIT_APPARMOR_DENIED,
+ AUDIT_APPARMOR_HINT,
+ AUDIT_APPARMOR_STATUS,
+ AUDIT_APPARMOR_ERROR,
+ AUDIT_APPARMOR_KILL,
+ AUDIT_APPARMOR_AUTO
+};
+
+#define OP_NULL NULL
+
+#define OP_SYSCTL "sysctl"
+#define OP_CAPABLE "capable"
+
+#define OP_UNLINK "unlink"
+#define OP_MKDIR "mkdir"
+#define OP_RMDIR "rmdir"
+#define OP_MKNOD "mknod"
+#define OP_TRUNC "truncate"
+#define OP_LINK "link"
+#define OP_SYMLINK "symlink"
+#define OP_RENAME_SRC "rename_src"
+#define OP_RENAME_DEST "rename_dest"
+#define OP_CHMOD "chmod"
+#define OP_CHOWN "chown"
+#define OP_GETATTR "getattr"
+#define OP_OPEN "open"
+
+#define OP_FRECEIVE "file_receive"
+#define OP_FPERM "file_perm"
+#define OP_FLOCK "file_lock"
+#define OP_FMMAP "file_mmap"
+#define OP_FMPROT "file_mprotect"
+#define OP_INHERIT "file_inherit"
+
+#define OP_PIVOTROOT "pivotroot"
+#define OP_MOUNT "mount"
+#define OP_UMOUNT "umount"
+
+#define OP_CREATE "create"
+#define OP_POST_CREATE "post_create"
+#define OP_BIND "bind"
+#define OP_CONNECT "connect"
+#define OP_LISTEN "listen"
+#define OP_ACCEPT "accept"
+#define OP_SENDMSG "sendmsg"
+#define OP_RECVMSG "recvmsg"
+#define OP_GETSOCKNAME "getsockname"
+#define OP_GETPEERNAME "getpeername"
+#define OP_GETSOCKOPT "getsockopt"
+#define OP_SETSOCKOPT "setsockopt"
+#define OP_SHUTDOWN "socket_shutdown"
+
+#define OP_PTRACE "ptrace"
+#define OP_SIGNAL "signal"
+
+#define OP_EXEC "exec"
+
+#define OP_CHANGE_HAT "change_hat"
+#define OP_CHANGE_PROFILE "change_profile"
+#define OP_CHANGE_ONEXEC "change_onexec"
+#define OP_STACK "stack"
+#define OP_STACK_ONEXEC "stack_onexec"
+
+#define OP_SETPROCATTR "setprocattr"
+#define OP_SETRLIMIT "setrlimit"
+
+#define OP_PROF_REPL "profile_replace"
+#define OP_PROF_LOAD "profile_load"
+#define OP_PROF_RM "profile_remove"
+
+
+struct apparmor_audit_data {
+ int error;
+ int type;
+ const char *op;
+ struct aa_label *label;
+ const char *name;
+ const char *info;
+ u32 request;
+ u32 denied;
+ union {
+ /* these entries require a custom callback fn */
+ struct {
+ struct aa_label *peer;
+ union {
+ struct {
+ const char *target;
+ kuid_t ouid;
+ } fs;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
+ struct {
+ int signal;
+ int unmappedsig;
+ };
+ struct {
+ int type, protocol;
+ struct sock *peer_sk;
+ void *addr;
+ int addrlen;
+ } net;
+ };
+ };
+ struct {
+ struct aa_profile *profile;
+ const char *ns;
+ long pos;
+ } iface;
+ struct {
+ const char *src_name;
+ const char *type;
+ const char *trans;
+ const char *data;
+ unsigned long flags;
+ } mnt;
+ };
+};
+
+/* macros for dealing with apparmor_audit_data structure */
+#define aad(SA) ((SA)->apparmor_audit_data)
+#define DEFINE_AUDIT_DATA(NAME, T, X) \
+ /* TODO: cleanup audit init so we don't need _aad = {0,} */ \
+ struct apparmor_audit_data NAME ## _aad = { .op = (X), }; \
+ struct common_audit_data NAME = \
+ { \
+ .type = (T), \
+ .u.tsk = NULL, \
+ }; \
+ NAME.apparmor_audit_data = &(NAME ## _aad)
+
+void aa_audit_msg(int type, struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *));
+int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *));
+
+#define aa_audit_error(ERROR, SA, CB) \
+({ \
+ aad((SA))->error = (ERROR); \
+ aa_audit_msg(AUDIT_APPARMOR_ERROR, (SA), (CB)); \
+ aad((SA))->error; \
+})
+
+
+static inline int complain_error(int error)
+{
+ if (error == -EPERM || error == -EACCES)
+ return 0;
+ return error;
+}
+
+void aa_audit_rule_free(void *vrule);
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
+int aa_audit_rule_known(struct audit_krule *rule);
+int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
+
+#endif /* __AA_AUDIT_H */
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
new file mode 100644
index 000000000..d420e2d10
--- /dev/null
+++ b/security/apparmor/include/capability.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor capability mediation definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2013 Canonical Ltd.
+ */
+
+#ifndef __AA_CAPABILITY_H
+#define __AA_CAPABILITY_H
+
+#include <linux/sched.h>
+
+#include "apparmorfs.h"
+
+struct aa_label;
+
+/* aa_caps - confinement data for capabilities
+ * @allowed: capabilities mask
+ * @audit: caps that are to be audited
+ * @denied: caps that are explicitly denied
+ * @quiet: caps that should not be audited
+ * @kill: caps that when requested will result in the task being killed
+ * @extended: caps that are subject finer grained mediation
+ */
+struct aa_caps {
+ kernel_cap_t allow;
+ kernel_cap_t audit;
+ kernel_cap_t denied;
+ kernel_cap_t quiet;
+ kernel_cap_t kill;
+ kernel_cap_t extended;
+};
+
+extern struct aa_sfs_entry aa_sfs_entry_caps[];
+
+int aa_capable(struct aa_label *label, int cap, unsigned int opts);
+
+static inline void aa_free_cap_rules(struct aa_caps *caps)
+{
+ /* NOP */
+}
+
+#endif /* __AA_CAPBILITY_H */
diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h
new file mode 100644
index 000000000..0b9ae4804
--- /dev/null
+++ b/security/apparmor/include/cred.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor contexts used to associate "labels" to objects.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_CONTEXT_H
+#define __AA_CONTEXT_H
+
+#include <linux/cred.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "label.h"
+#include "policy_ns.h"
+#include "task.h"
+
+static inline struct aa_label *cred_label(const struct cred *cred)
+{
+ struct aa_label **blob = cred->security + apparmor_blob_sizes.lbs_cred;
+
+ AA_BUG(!blob);
+ return *blob;
+}
+
+static inline void set_cred_label(const struct cred *cred,
+ struct aa_label *label)
+{
+ struct aa_label **blob = cred->security + apparmor_blob_sizes.lbs_cred;
+
+ AA_BUG(!blob);
+ *blob = label;
+}
+
+/**
+ * aa_cred_raw_label - obtain cred's label
+ * @cred: cred to obtain label from (NOT NULL)
+ *
+ * Returns: confining label
+ *
+ * does NOT increment reference count
+ */
+static inline struct aa_label *aa_cred_raw_label(const struct cred *cred)
+{
+ struct aa_label *label = cred_label(cred);
+
+ AA_BUG(!label);
+ return label;
+}
+
+/**
+ * aa_get_newest_cred_label - obtain the newest label on a cred
+ * @cred: cred to obtain label from (NOT NULL)
+ *
+ * Returns: newest version of confining label
+ */
+static inline struct aa_label *aa_get_newest_cred_label(const struct cred *cred)
+{
+ return aa_get_newest_label(aa_cred_raw_label(cred));
+}
+
+/**
+ * __aa_task_raw_label - retrieve another task's label
+ * @task: task to query (NOT NULL)
+ *
+ * Returns: @task's label without incrementing its ref count
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline struct aa_label *__aa_task_raw_label(struct task_struct *task)
+{
+ return aa_cred_raw_label(__task_cred(task));
+}
+
+/**
+ * aa_current_raw_label - find the current tasks confining label
+ *
+ * Returns: up to date confining label or the ns unconfined label (NOT NULL)
+ *
+ * This fn will not update the tasks cred to the most up to date version
+ * of the label so it is safe to call when inside of locks.
+ */
+static inline struct aa_label *aa_current_raw_label(void)
+{
+ return aa_cred_raw_label(current_cred());
+}
+
+/**
+ * aa_get_current_label - get the newest version of the current tasks label
+ *
+ * Returns: newest version of confining label (NOT NULL)
+ *
+ * This fn will not update the tasks cred, so it is safe inside of locks
+ *
+ * The returned reference must be put with aa_put_label()
+ */
+static inline struct aa_label *aa_get_current_label(void)
+{
+ struct aa_label *l = aa_current_raw_label();
+
+ if (label_is_stale(l))
+ return aa_get_newest_label(l);
+ return aa_get_label(l);
+}
+
+#define __end_current_label_crit_section(X) end_current_label_crit_section(X)
+
+/**
+ * end_label_crit_section - put a reference found with begin_current_label..
+ * @label: label reference to put
+ *
+ * Should only be used with a reference obtained with
+ * begin_current_label_crit_section and never used in situations where the
+ * task cred may be updated
+ */
+static inline void end_current_label_crit_section(struct aa_label *label)
+{
+ if (label != aa_current_raw_label())
+ aa_put_label(label);
+}
+
+/**
+ * __begin_current_label_crit_section - current's confining label
+ *
+ * Returns: up to date confining label or the ns unconfined label (NOT NULL)
+ *
+ * safe to call inside locks
+ *
+ * The returned reference must be put with __end_current_label_crit_section()
+ * This must NOT be used if the task cred could be updated within the
+ * critical section between __begin_current_label_crit_section() ..
+ * __end_current_label_crit_section()
+ */
+static inline struct aa_label *__begin_current_label_crit_section(void)
+{
+ struct aa_label *label = aa_current_raw_label();
+
+ if (label_is_stale(label))
+ label = aa_get_newest_label(label);
+
+ return label;
+}
+
+/**
+ * begin_current_label_crit_section - current's confining label and update it
+ *
+ * Returns: up to date confining label or the ns unconfined label (NOT NULL)
+ *
+ * Not safe to call inside locks
+ *
+ * The returned reference must be put with end_current_label_crit_section()
+ * This must NOT be used if the task cred could be updated within the
+ * critical section between begin_current_label_crit_section() ..
+ * end_current_label_crit_section()
+ */
+static inline struct aa_label *begin_current_label_crit_section(void)
+{
+ struct aa_label *label = aa_current_raw_label();
+
+ might_sleep();
+
+ if (label_is_stale(label)) {
+ label = aa_get_newest_label(label);
+ if (aa_replace_current_label(label) == 0)
+ /* task cred will keep the reference */
+ aa_put_label(label);
+ }
+
+ return label;
+}
+
+static inline struct aa_ns *aa_get_current_ns(void)
+{
+ struct aa_label *label;
+ struct aa_ns *ns;
+
+ label = __begin_current_label_crit_section();
+ ns = aa_get_ns(labels_ns(label));
+ __end_current_label_crit_section(label);
+
+ return ns;
+}
+
+#endif /* __AA_CONTEXT_H */
diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h
new file mode 100644
index 000000000..636a04e20
--- /dev/null
+++ b/security/apparmor/include/crypto.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ */
+
+#ifndef __APPARMOR_CRYPTO_H
+#define __APPARMOR_CRYPTO_H
+
+#include "policy.h"
+
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
+unsigned int aa_hash_size(void);
+char *aa_calc_hash(void *data, size_t len);
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+ size_t len);
+#else
+static inline char *aa_calc_hash(void *data, size_t len)
+{
+ return NULL;
+}
+static inline int aa_calc_profile_hash(struct aa_profile *profile, u32 version,
+ void *start, size_t len)
+{
+ return 0;
+}
+
+static inline unsigned int aa_hash_size(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __APPARMOR_CRYPTO_H */
diff --git a/security/apparmor/include/domain.h b/security/apparmor/include/domain.h
new file mode 100644
index 000000000..d14928fe1
--- /dev/null
+++ b/security/apparmor/include/domain.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security domain transition function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/binfmts.h>
+#include <linux/types.h>
+
+#include "label.h"
+
+#ifndef __AA_DOMAIN_H
+#define __AA_DOMAIN_H
+
+struct aa_domain {
+ int size;
+ char **table;
+};
+
+#define AA_CHANGE_NOFLAGS 0
+#define AA_CHANGE_TEST 1
+#define AA_CHANGE_CHILD 2
+#define AA_CHANGE_ONEXEC 4
+#define AA_CHANGE_STACK 8
+
+struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+ const char **name);
+
+int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm);
+
+void aa_free_domain_entries(struct aa_domain *domain);
+int aa_change_hat(const char *hats[], int count, u64 token, int flags);
+int aa_change_profile(const char *fqname, int flags);
+
+#endif /* __AA_DOMAIN_H */
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
new file mode 100644
index 000000000..029cb20e3
--- /dev/null
+++ b/security/apparmor/include/file.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor file mediation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_FILE_H
+#define __AA_FILE_H
+
+#include <linux/spinlock.h>
+
+#include "domain.h"
+#include "match.h"
+#include "perms.h"
+
+struct aa_profile;
+struct path;
+
+#define mask_mode_t(X) (X & (MAY_EXEC | MAY_WRITE | MAY_READ | MAY_APPEND))
+
+#define AA_AUDIT_FILE_MASK (MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND |\
+ AA_MAY_CREATE | AA_MAY_DELETE | \
+ AA_MAY_GETATTR | AA_MAY_SETATTR | \
+ AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_LOCK | \
+ AA_EXEC_MMAP | AA_MAY_LINK)
+
+static inline struct aa_file_ctx *file_ctx(struct file *file)
+{
+ return file->f_security + apparmor_blob_sizes.lbs_file;
+}
+
+/* struct aa_file_ctx - the AppArmor context the file was opened in
+ * @lock: lock to update the ctx
+ * @label: label currently cached on the ctx
+ * @perms: the permission the file was opened with
+ */
+struct aa_file_ctx {
+ spinlock_t lock;
+ struct aa_label __rcu *label;
+ u32 allow;
+};
+
+/**
+ * aa_alloc_file_ctx - allocate file_ctx
+ * @label: initial label of task creating the file
+ * @gfp: gfp flags for allocation
+ *
+ * Returns: file_ctx or NULL on failure
+ */
+static inline struct aa_file_ctx *aa_alloc_file_ctx(struct aa_label *label,
+ gfp_t gfp)
+{
+ struct aa_file_ctx *ctx;
+
+ ctx = kzalloc(sizeof(struct aa_file_ctx), gfp);
+ if (ctx) {
+ spin_lock_init(&ctx->lock);
+ rcu_assign_pointer(ctx->label, aa_get_label(label));
+ }
+ return ctx;
+}
+
+/**
+ * aa_free_file_ctx - free a file_ctx
+ * @ctx: file_ctx to free (MAYBE_NULL)
+ */
+static inline void aa_free_file_ctx(struct aa_file_ctx *ctx)
+{
+ if (ctx) {
+ aa_put_label(rcu_access_pointer(ctx->label));
+ kfree_sensitive(ctx);
+ }
+}
+
+static inline struct aa_label *aa_get_file_label(struct aa_file_ctx *ctx)
+{
+ return aa_get_label_rcu(&ctx->label);
+}
+
+/*
+ * The xindex is broken into 3 parts
+ * - index - an index into either the exec name table or the variable table
+ * - exec type - which determines how the executable name and index are used
+ * - flags - which modify how the destination name is applied
+ */
+#define AA_X_INDEX_MASK 0x03ff
+
+#define AA_X_TYPE_MASK 0x0c00
+#define AA_X_TYPE_SHIFT 10
+#define AA_X_NONE 0x0000
+#define AA_X_NAME 0x0400 /* use executable name px */
+#define AA_X_TABLE 0x0800 /* use a specified name ->n# */
+
+#define AA_X_UNSAFE 0x1000
+#define AA_X_CHILD 0x2000 /* make >AA_X_NONE apply to children */
+#define AA_X_INHERIT 0x4000
+#define AA_X_UNCONFINED 0x8000
+
+/* need to make conditional which ones are being set */
+struct path_cond {
+ kuid_t uid;
+ umode_t mode;
+};
+
+#define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
+
+/* FIXME: split perms from dfa and match this to description
+ * also add delegation info.
+ */
+static inline u16 dfa_map_xindex(u16 mask)
+{
+ u16 old_index = (mask >> 10) & 0xf;
+ u16 index = 0;
+
+ if (mask & 0x100)
+ index |= AA_X_UNSAFE;
+ if (mask & 0x200)
+ index |= AA_X_INHERIT;
+ if (mask & 0x80)
+ index |= AA_X_UNCONFINED;
+
+ if (old_index == 1) {
+ index |= AA_X_UNCONFINED;
+ } else if (old_index == 2) {
+ index |= AA_X_NAME;
+ } else if (old_index == 3) {
+ index |= AA_X_NAME | AA_X_CHILD;
+ } else if (old_index) {
+ index |= AA_X_TABLE;
+ index |= old_index - 4;
+ }
+
+ return index;
+}
+
+/*
+ * map old dfa inline permissions to new format
+ */
+#define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
+ ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_user_xbits(dfa, state) (((ACCEPT_TABLE(dfa)[state]) >> 7) & 0x7f)
+#define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
+#define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
+#define dfa_user_xindex(dfa, state) \
+ (dfa_map_xindex(ACCEPT_TABLE(dfa)[state] & 0x3fff))
+
+#define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
+ 0x7f) | \
+ ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_other_xbits(dfa, state) \
+ ((((ACCEPT_TABLE(dfa)[state]) >> 7) >> 14) & 0x7f)
+#define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
+#define dfa_other_quiet(dfa, state) \
+ ((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
+#define dfa_other_xindex(dfa, state) \
+ dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
+
+int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
+ const char *op, u32 request, const char *name,
+ const char *target, struct aa_label *tlabel, kuid_t ouid,
+ const char *info, int error);
+
+/**
+ * struct aa_file_rules - components used for file rule permissions
+ * @dfa: dfa to match path names and conditionals against
+ * @perms: permission table indexed by the matched state accept entry of @dfa
+ * @trans: transition table for indexed by named x transitions
+ *
+ * File permission are determined by matching a path against @dfa and
+ * then using the value of the accept entry for the matching state as
+ * an index into @perms. If a named exec transition is required it is
+ * looked up in the transition table.
+ */
+struct aa_file_rules {
+ unsigned int start;
+ struct aa_dfa *dfa;
+ /* struct perms perms; */
+ struct aa_domain trans;
+ /* TODO: add delegate table */
+};
+
+struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
+ struct path_cond *cond);
+unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
+ const char *name, struct path_cond *cond,
+ struct aa_perms *perms);
+
+int __aa_path_perm(const char *op, struct aa_profile *profile,
+ const char *name, u32 request, struct path_cond *cond,
+ int flags, struct aa_perms *perms);
+int aa_path_perm(const char *op, struct aa_label *label,
+ const struct path *path, int flags, u32 request,
+ struct path_cond *cond);
+
+int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry);
+
+int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
+ u32 request, bool in_atomic);
+
+void aa_inherit_files(const struct cred *cred, struct files_struct *files);
+
+static inline void aa_free_file_rules(struct aa_file_rules *rules)
+{
+ aa_put_dfa(rules->dfa);
+ aa_free_domain_entries(&rules->trans);
+}
+
+/**
+ * aa_map_file_perms - map file flags to AppArmor permissions
+ * @file: open file to map flags to AppArmor permissions
+ *
+ * Returns: apparmor permission set for the file
+ */
+static inline u32 aa_map_file_to_perms(struct file *file)
+{
+ int flags = file->f_flags;
+ u32 perms = 0;
+
+ if (file->f_mode & FMODE_WRITE)
+ perms |= MAY_WRITE;
+ if (file->f_mode & FMODE_READ)
+ perms |= MAY_READ;
+
+ if ((flags & O_APPEND) && (perms & MAY_WRITE))
+ perms = (perms & ~MAY_WRITE) | MAY_APPEND;
+ /* trunc implies write permission */
+ if (flags & O_TRUNC)
+ perms |= MAY_WRITE;
+ if (flags & O_CREAT)
+ perms |= AA_MAY_CREATE;
+
+ return perms;
+}
+
+#endif /* __AA_FILE_H */
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
new file mode 100644
index 000000000..a1ac6ffb9
--- /dev/null
+++ b/security/apparmor/include/ipc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#ifndef __AA_IPC_H
+#define __AA_IPC_H
+
+#include <linux/sched.h>
+
+int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
+
+#endif /* __AA_IPC_H */
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
new file mode 100644
index 000000000..860484c6f
--- /dev/null
+++ b/security/apparmor/include/label.h
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor label definitions
+ *
+ * Copyright 2017 Canonical Ltd.
+ */
+
+#ifndef __AA_LABEL_H
+#define __AA_LABEL_H
+
+#include <linux/atomic.h>
+#include <linux/audit.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+#include "apparmor.h"
+#include "lib.h"
+
+struct aa_ns;
+
+#define LOCAL_VEC_ENTRIES 8
+#define DEFINE_VEC(T, V) \
+ struct aa_ ## T *(_ ## V ## _localtmp)[LOCAL_VEC_ENTRIES]; \
+ struct aa_ ## T **(V)
+
+#define vec_setup(T, V, N, GFP) \
+({ \
+ if ((N) <= LOCAL_VEC_ENTRIES) { \
+ typeof(N) i; \
+ (V) = (_ ## V ## _localtmp); \
+ for (i = 0; i < (N); i++) \
+ (V)[i] = NULL; \
+ } else \
+ (V) = kzalloc(sizeof(struct aa_ ## T *) * (N), (GFP)); \
+ (V) ? 0 : -ENOMEM; \
+})
+
+#define vec_cleanup(T, V, N) \
+do { \
+ int i; \
+ for (i = 0; i < (N); i++) { \
+ if (!IS_ERR_OR_NULL((V)[i])) \
+ aa_put_ ## T((V)[i]); \
+ } \
+ if ((V) != _ ## V ## _localtmp) \
+ kfree(V); \
+} while (0)
+
+#define vec_last(VEC, SIZE) ((VEC)[(SIZE) - 1])
+#define vec_ns(VEC, SIZE) (vec_last((VEC), (SIZE))->ns)
+#define vec_labelset(VEC, SIZE) (&vec_ns((VEC), (SIZE))->labels)
+#define cleanup_domain_vec(V, L) cleanup_label_vec((V), (L)->size)
+
+struct aa_profile;
+#define VEC_FLAG_TERMINATE 1
+int aa_vec_unique(struct aa_profile **vec, int n, int flags);
+struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
+ gfp_t gfp);
+#define aa_sort_and_merge_vec(N, V) \
+ aa_sort_and_merge_profiles((N), (struct aa_profile **)(V))
+
+
+/* struct aa_labelset - set of labels for a namespace
+ *
+ * Labels are reference counted; aa_labelset does not contribute to label
+ * reference counts. Once a label's last refcount is put it is removed from
+ * the set.
+ */
+struct aa_labelset {
+ rwlock_t lock;
+
+ struct rb_root root;
+};
+
+#define __labelset_for_each(LS, N) \
+ for ((N) = rb_first(&(LS)->root); (N); (N) = rb_next(N))
+
+enum label_flags {
+ FLAG_HAT = 1, /* profile is a hat */
+ FLAG_UNCONFINED = 2, /* label unconfined only if all */
+ FLAG_NULL = 4, /* profile is null learning profile */
+ FLAG_IX_ON_NAME_ERROR = 8, /* fallback to ix on name lookup fail */
+ FLAG_IMMUTIBLE = 0x10, /* don't allow changes/replacement */
+ FLAG_USER_DEFINED = 0x20, /* user based profile - lower privs */
+ FLAG_NO_LIST_REF = 0x40, /* list doesn't keep profile ref */
+ FLAG_NS_COUNT = 0x80, /* carries NS ref count */
+ FLAG_IN_TREE = 0x100, /* label is in tree */
+ FLAG_PROFILE = 0x200, /* label is a profile */
+ FLAG_EXPLICIT = 0x400, /* explicit static label */
+ FLAG_STALE = 0x800, /* replaced/removed */
+ FLAG_RENAMED = 0x1000, /* label has renaming in it */
+ FLAG_REVOKED = 0x2000, /* label has revocation in it */
+ FLAG_DEBUG1 = 0x4000,
+ FLAG_DEBUG2 = 0x8000,
+
+ /* These flags must correspond with PATH_flags */
+ /* TODO: add new path flags */
+};
+
+struct aa_label;
+struct aa_proxy {
+ struct kref count;
+ struct aa_label __rcu *label;
+};
+
+struct label_it {
+ int i, j;
+};
+
+/* struct aa_label - lazy labeling struct
+ * @count: ref count of active users
+ * @node: rbtree position
+ * @rcu: rcu callback struct
+ * @proxy: is set to the label that replaced this label
+ * @hname: text representation of the label (MAYBE_NULL)
+ * @flags: stale and other flags - values may change under label set lock
+ * @secid: secid that references this label
+ * @size: number of entries in @ent[]
+ * @ent: set of profiles for label, actual size determined by @size
+ */
+struct aa_label {
+ struct kref count;
+ struct rb_node node;
+ struct rcu_head rcu;
+ struct aa_proxy *proxy;
+ __counted char *hname;
+ long flags;
+ u32 secid;
+ int size;
+ struct aa_profile *vec[];
+};
+
+#define last_error(E, FN) \
+do { \
+ int __subE = (FN); \
+ if (__subE) \
+ (E) = __subE; \
+} while (0)
+
+#define label_isprofile(X) ((X)->flags & FLAG_PROFILE)
+#define label_unconfined(X) ((X)->flags & FLAG_UNCONFINED)
+#define unconfined(X) label_unconfined(X)
+#define label_is_stale(X) ((X)->flags & FLAG_STALE)
+#define __label_make_stale(X) ((X)->flags |= FLAG_STALE)
+#define labels_ns(X) (vec_ns(&((X)->vec[0]), (X)->size))
+#define labels_set(X) (&labels_ns(X)->labels)
+#define labels_view(X) labels_ns(X)
+#define labels_profile(X) ((X)->vec[(X)->size - 1])
+
+
+int aa_label_next_confined(struct aa_label *l, int i);
+
+/* for each profile in a label */
+#define label_for_each(I, L, P) \
+ for ((I).i = 0; ((P) = (L)->vec[(I).i]); ++((I).i))
+
+/* assumes break/goto ended label_for_each */
+#define label_for_each_cont(I, L, P) \
+ for (++((I).i); ((P) = (L)->vec[(I).i]); ++((I).i))
+
+#define next_comb(I, L1, L2) \
+do { \
+ (I).j++; \
+ if ((I).j >= (L2)->size) { \
+ (I).i++; \
+ (I).j = 0; \
+ } \
+} while (0)
+
+
+/* for each combination of P1 in L1, and P2 in L2 */
+#define label_for_each_comb(I, L1, L2, P1, P2) \
+for ((I).i = (I).j = 0; \
+ ((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]); \
+ (I) = next_comb(I, L1, L2))
+
+#define fn_for_each_comb(L1, L2, P1, P2, FN) \
+({ \
+ struct label_it i; \
+ int __E = 0; \
+ label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+/* for each profile that is enforcing confinement in a label */
+#define label_for_each_confined(I, L, P) \
+ for ((I).i = aa_label_next_confined((L), 0); \
+ ((P) = (L)->vec[(I).i]); \
+ (I).i = aa_label_next_confined((L), (I).i + 1))
+
+#define label_for_each_in_merge(I, A, B, P) \
+ for ((I).i = (I).j = 0; \
+ ((P) = aa_label_next_in_merge(&(I), (A), (B))); \
+ )
+
+#define label_for_each_not_in_set(I, SET, SUB, P) \
+ for ((I).i = (I).j = 0; \
+ ((P) = __aa_label_next_not_in_set(&(I), (SET), (SUB))); \
+ )
+
+#define next_in_ns(i, NS, L) \
+({ \
+ typeof(i) ___i = (i); \
+ while ((L)->vec[___i] && (L)->vec[___i]->ns != (NS)) \
+ (___i)++; \
+ (___i); \
+})
+
+#define label_for_each_in_ns(I, NS, L, P) \
+ for ((I).i = next_in_ns(0, (NS), (L)); \
+ ((P) = (L)->vec[(I).i]); \
+ (I).i = next_in_ns((I).i + 1, (NS), (L)))
+
+#define fn_for_each_in_ns(L, P, FN) \
+({ \
+ struct label_it __i; \
+ struct aa_ns *__ns = labels_ns(L); \
+ int __E = 0; \
+ label_for_each_in_ns(__i, __ns, (L), (P)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+
+#define fn_for_each_XXX(L, P, FN, ...) \
+({ \
+ struct label_it i; \
+ int __E = 0; \
+ label_for_each ## __VA_ARGS__(i, (L), (P)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+#define fn_for_each(L, P, FN) fn_for_each_XXX(L, P, FN)
+#define fn_for_each_confined(L, P, FN) fn_for_each_XXX(L, P, FN, _confined)
+
+#define fn_for_each2_XXX(L1, L2, P, FN, ...) \
+({ \
+ struct label_it i; \
+ int __E = 0; \
+ label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+#define fn_for_each_in_merge(L1, L2, P, FN) \
+ fn_for_each2_XXX((L1), (L2), P, FN, _in_merge)
+#define fn_for_each_not_in_set(L1, L2, P, FN) \
+ fn_for_each2_XXX((L1), (L2), P, FN, _not_in_set)
+
+#define LABEL_MEDIATES(L, C) \
+({ \
+ struct aa_profile *profile; \
+ struct label_it i; \
+ int ret = 0; \
+ label_for_each(i, (L), profile) { \
+ if (PROFILE_MEDIATES(profile, (C))) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+
+void aa_labelset_destroy(struct aa_labelset *ls);
+void aa_labelset_init(struct aa_labelset *ls);
+void __aa_labelset_update_subtree(struct aa_ns *ns);
+
+void aa_label_destroy(struct aa_label *label);
+void aa_label_free(struct aa_label *label);
+void aa_label_kref(struct kref *kref);
+bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
+struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
+
+bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
+bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub);
+struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
+ struct aa_label *set,
+ struct aa_label *sub);
+bool aa_label_remove(struct aa_label *label);
+struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *l);
+bool aa_label_replace(struct aa_label *old, struct aa_label *new);
+bool aa_label_make_newest(struct aa_labelset *ls, struct aa_label *old,
+ struct aa_label *new);
+
+struct aa_label *aa_label_find(struct aa_label *l);
+
+struct aa_profile *aa_label_next_in_merge(struct label_it *I,
+ struct aa_label *a,
+ struct aa_label *b);
+struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b);
+struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
+ gfp_t gfp);
+
+
+bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
+
+#define FLAGS_NONE 0
+#define FLAG_SHOW_MODE 1
+#define FLAG_VIEW_SUBNS 2
+#define FLAG_HIDDEN_UNCONFINED 4
+#define FLAG_ABS_ROOT 8
+int aa_label_snxprint(char *str, size_t size, struct aa_ns *view,
+ struct aa_label *label, int flags);
+int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
+ int flags, gfp_t gfp);
+int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
+ gfp_t gfp);
+void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp);
+void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp);
+void aa_label_printk(struct aa_label *label, gfp_t gfp);
+
+struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
+ size_t n, gfp_t gfp, bool create,
+ bool force_stack);
+struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
+ gfp_t gfp, bool create, bool force_stack);
+
+static inline const char *aa_label_strn_split(const char *str, int n)
+{
+ const char *pos;
+ unsigned int state;
+
+ state = aa_dfa_matchn_until(stacksplitdfa, DFA_START, str, n, &pos);
+ if (!ACCEPT_TABLE(stacksplitdfa)[state])
+ return NULL;
+
+ return pos - 3;
+}
+
+static inline const char *aa_label_str_split(const char *str)
+{
+ const char *pos;
+ unsigned int state;
+
+ state = aa_dfa_match_until(stacksplitdfa, DFA_START, str, &pos);
+ if (!ACCEPT_TABLE(stacksplitdfa)[state])
+ return NULL;
+
+ return pos - 3;
+}
+
+
+
+struct aa_perms;
+int aa_label_match(struct aa_profile *profile, struct aa_label *label,
+ unsigned int state, bool subns, u32 request,
+ struct aa_perms *perms);
+
+
+/**
+ * __aa_get_label - get a reference count to uncounted label reference
+ * @l: reference to get a count on
+ *
+ * Returns: pointer to reference OR NULL if race is lost and reference is
+ * being repeated.
+ * Requires: lock held, and the return code MUST be checked
+ */
+static inline struct aa_label *__aa_get_label(struct aa_label *l)
+{
+ if (l && kref_get_unless_zero(&l->count))
+ return l;
+
+ return NULL;
+}
+
+static inline struct aa_label *aa_get_label(struct aa_label *l)
+{
+ if (l)
+ kref_get(&(l->count));
+
+ return l;
+}
+
+
+/**
+ * aa_get_label_rcu - increment refcount on a label that can be replaced
+ * @l: pointer to label that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted label.
+ * else NULL if no label
+ */
+static inline struct aa_label *aa_get_label_rcu(struct aa_label __rcu **l)
+{
+ struct aa_label *c;
+
+ rcu_read_lock();
+ do {
+ c = rcu_dereference(*l);
+ } while (c && !kref_get_unless_zero(&c->count));
+ rcu_read_unlock();
+
+ return c;
+}
+
+/**
+ * aa_get_newest_label - find the newest version of @l
+ * @l: the label to check for newer versions of
+ *
+ * Returns: refcounted newest version of @l taking into account
+ * replacement, renames and removals
+ * return @l.
+ */
+static inline struct aa_label *aa_get_newest_label(struct aa_label *l)
+{
+ if (!l)
+ return NULL;
+
+ if (label_is_stale(l)) {
+ struct aa_label *tmp;
+
+ AA_BUG(!l->proxy);
+ AA_BUG(!l->proxy->label);
+ /* BUG: only way this can happen is @l ref count and its
+ * replacement count have gone to 0 and are on their way
+ * to destruction. ie. we have a refcounting error
+ */
+ tmp = aa_get_label_rcu(&l->proxy->label);
+ AA_BUG(!tmp);
+
+ return tmp;
+ }
+
+ return aa_get_label(l);
+}
+
+static inline void aa_put_label(struct aa_label *l)
+{
+ if (l)
+ kref_put(&l->count, aa_label_kref);
+}
+
+
+struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp);
+void aa_proxy_kref(struct kref *kref);
+
+static inline struct aa_proxy *aa_get_proxy(struct aa_proxy *proxy)
+{
+ if (proxy)
+ kref_get(&(proxy->count));
+
+ return proxy;
+}
+
+static inline void aa_put_proxy(struct aa_proxy *proxy)
+{
+ if (proxy)
+ kref_put(&proxy->count, aa_proxy_kref);
+}
+
+void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new);
+
+#endif /* __AA_LABEL_H */
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
new file mode 100644
index 000000000..f42359f58
--- /dev/null
+++ b/security/apparmor/include/lib.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor lib definitions
+ *
+ * 2017 Canonical Ltd.
+ */
+
+#ifndef __AA_LIB_H
+#define __AA_LIB_H
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/lsm_hooks.h>
+
+#include "match.h"
+
+/*
+ * DEBUG remains global (no per profile flag) since it is mostly used in sysctl
+ * which is not related to profile accesses.
+ */
+
+#define DEBUG_ON (aa_g_debug)
+/*
+ * split individual debug cases out in preparation for finer grained
+ * debug controls in the future.
+ */
+#define AA_DEBUG_LABEL DEBUG_ON
+#define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args)
+#define AA_DEBUG(fmt, args...) \
+ do { \
+ if (DEBUG_ON) \
+ pr_debug_ratelimited("AppArmor: " fmt, ##args); \
+ } while (0)
+
+#define AA_WARN(X) WARN((X), "APPARMOR WARN %s: %s\n", __func__, #X)
+
+#define AA_BUG(X, args...) \
+ do { \
+ _Pragma("GCC diagnostic ignored \"-Wformat-zero-length\""); \
+ AA_BUG_FMT((X), "" args); \
+ _Pragma("GCC diagnostic warning \"-Wformat-zero-length\""); \
+ } while (0)
+#ifdef CONFIG_SECURITY_APPARMOR_DEBUG_ASSERTS
+#define AA_BUG_FMT(X, fmt, args...) \
+ WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args)
+#else
+#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args)
+#endif
+
+#define AA_ERROR(fmt, args...) \
+ pr_err_ratelimited("AppArmor: " fmt, ##args)
+
+/* Flag indicating whether initialization completed */
+extern int apparmor_initialized;
+
+/* fn's in lib */
+const char *skipn_spaces(const char *str, size_t n);
+char *aa_split_fqname(char *args, char **ns_name);
+const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
+ size_t *ns_len);
+void aa_info_message(const char *str);
+
+/* Security blob offsets */
+extern struct lsm_blob_sizes apparmor_blob_sizes;
+
+/**
+ * aa_strneq - compare null terminated @str to a non null terminated substring
+ * @str: a null terminated string
+ * @sub: a substring, not necessarily null terminated
+ * @len: length of @sub to compare
+ *
+ * The @str string must be full consumed for this to be considered a match
+ */
+static inline bool aa_strneq(const char *str, const char *sub, int len)
+{
+ return !strncmp(str, sub, len) && !str[len];
+}
+
+/**
+ * aa_dfa_null_transition - step to next state after null character
+ * @dfa: the dfa to match against
+ * @start: the state of the dfa to start matching in
+ *
+ * aa_dfa_null_transition transitions to the next state after a null
+ * character which is not used in standard matching and is only
+ * used to separate pairs.
+ */
+static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
+ unsigned int start)
+{
+ /* the null transition only needs the string's null terminator byte */
+ return aa_dfa_next(dfa, start, 0);
+}
+
+static inline bool path_mediated_fs(struct dentry *dentry)
+{
+ return !(dentry->d_sb->s_flags & SB_NOUSER);
+}
+
+
+struct counted_str {
+ struct kref count;
+ char name[];
+};
+
+#define str_to_counted(str) \
+ ((struct counted_str *)(str - offsetof(struct counted_str, name)))
+
+#define __counted /* atm just a notation */
+
+void aa_str_kref(struct kref *kref);
+char *aa_str_alloc(int size, gfp_t gfp);
+
+
+static inline __counted char *aa_get_str(__counted char *str)
+{
+ if (str)
+ kref_get(&(str_to_counted(str)->count));
+
+ return str;
+}
+
+static inline void aa_put_str(__counted char *str)
+{
+ if (str)
+ kref_put(&str_to_counted(str)->count, aa_str_kref);
+}
+
+
+/* struct aa_policy - common part of both namespaces and profiles
+ * @name: name of the object
+ * @hname - The hierarchical name
+ * @list: list policy object is on
+ * @profiles: head of the profiles list contained in the object
+ */
+struct aa_policy {
+ const char *name;
+ __counted char *hname;
+ struct list_head list;
+ struct list_head profiles;
+};
+
+/**
+ * basename - find the last component of an hname
+ * @name: hname to find the base profile name component of (NOT NULL)
+ *
+ * Returns: the tail (base profile name) name component of an hname
+ */
+static inline const char *basename(const char *hname)
+{
+ char *split;
+
+ hname = strim((char *)hname);
+ for (split = strstr(hname, "//"); split; split = strstr(hname, "//"))
+ hname = split + 2;
+
+ return hname;
+}
+
+/**
+ * __policy_find - find a policy by @name on a policy list
+ * @head: list to search (NOT NULL)
+ * @name: name to search for (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy that match @name or NULL if not found
+ */
+static inline struct aa_policy *__policy_find(struct list_head *head,
+ const char *name)
+{
+ struct aa_policy *policy;
+
+ list_for_each_entry_rcu(policy, head, list) {
+ if (!strcmp(policy->name, name))
+ return policy;
+ }
+ return NULL;
+}
+
+/**
+ * __policy_strn_find - find a policy that's name matches @len chars of @str
+ * @head: list to search (NOT NULL)
+ * @str: string to search for (NOT NULL)
+ * @len: length of match required
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy that match @str or NULL if not found
+ *
+ * if @len == strlen(@strlen) then this is equiv to __policy_find
+ * other wise it allows searching for policy by a partial match of name
+ */
+static inline struct aa_policy *__policy_strn_find(struct list_head *head,
+ const char *str, int len)
+{
+ struct aa_policy *policy;
+
+ list_for_each_entry_rcu(policy, head, list) {
+ if (aa_strneq(policy->name, str, len))
+ return policy;
+ }
+
+ return NULL;
+}
+
+bool aa_policy_init(struct aa_policy *policy, const char *prefix,
+ const char *name, gfp_t gfp);
+void aa_policy_destroy(struct aa_policy *policy);
+
+
+/*
+ * fn_label_build - abstract out the build of a label transition
+ * @L: label the transition is being computed for
+ * @P: profile parameter derived from L by this macro, can be passed to FN
+ * @GFP: memory allocation type to use
+ * @FN: fn to call for each profile transition. @P is set to the profile
+ *
+ * Returns: new label on success
+ * ERR_PTR if build @FN fails
+ * NULL if label_build fails due to low memory conditions
+ *
+ * @FN must return a label or ERR_PTR on failure. NULL is not allowed
+ */
+#define fn_label_build(L, P, GFP, FN) \
+({ \
+ __label__ __cleanup, __done; \
+ struct aa_label *__new_; \
+ \
+ if ((L)->size > 1) { \
+ /* TODO: add cache of transitions already done */ \
+ struct label_it __i; \
+ int __j, __k, __count; \
+ DEFINE_VEC(label, __lvec); \
+ DEFINE_VEC(profile, __pvec); \
+ if (vec_setup(label, __lvec, (L)->size, (GFP))) { \
+ __new_ = NULL; \
+ goto __done; \
+ } \
+ __j = 0; \
+ label_for_each(__i, (L), (P)) { \
+ __new_ = (FN); \
+ AA_BUG(!__new_); \
+ if (IS_ERR(__new_)) \
+ goto __cleanup; \
+ __lvec[__j++] = __new_; \
+ } \
+ for (__j = __count = 0; __j < (L)->size; __j++) \
+ __count += __lvec[__j]->size; \
+ if (!vec_setup(profile, __pvec, __count, (GFP))) { \
+ for (__j = __k = 0; __j < (L)->size; __j++) { \
+ label_for_each(__i, __lvec[__j], (P)) \
+ __pvec[__k++] = aa_get_profile(P); \
+ } \
+ __count -= aa_vec_unique(__pvec, __count, 0); \
+ if (__count > 1) { \
+ __new_ = aa_vec_find_or_create_label(__pvec,\
+ __count, (GFP)); \
+ /* only fails if out of Mem */ \
+ if (!__new_) \
+ __new_ = NULL; \
+ } else \
+ __new_ = aa_get_label(&__pvec[0]->label); \
+ vec_cleanup(profile, __pvec, __count); \
+ } else \
+ __new_ = NULL; \
+__cleanup: \
+ vec_cleanup(label, __lvec, (L)->size); \
+ } else { \
+ (P) = labels_profile(L); \
+ __new_ = (FN); \
+ } \
+__done: \
+ if (!__new_) \
+ AA_DEBUG("label build failed\n"); \
+ (__new_); \
+})
+
+
+#define __fn_build_in_ns(NS, P, NS_FN, OTHER_FN) \
+({ \
+ struct aa_label *__new; \
+ if ((P)->ns != (NS)) \
+ __new = (OTHER_FN); \
+ else \
+ __new = (NS_FN); \
+ (__new); \
+})
+
+#define fn_label_build_in_ns(L, P, GFP, NS_FN, OTHER_FN) \
+({ \
+ fn_label_build((L), (P), (GFP), \
+ __fn_build_in_ns(labels_ns(L), (P), (NS_FN), (OTHER_FN))); \
+})
+
+#endif /* __AA_LIB_H */
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
new file mode 100644
index 000000000..884489590
--- /dev/null
+++ b/security/apparmor/include/match.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy dfa matching engine definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2012 Canonical Ltd.
+ */
+
+#ifndef __AA_MATCH_H
+#define __AA_MATCH_H
+
+#include <linux/kref.h>
+
+#define DFA_NOMATCH 0
+#define DFA_START 1
+
+
+/**
+ * The format used for transition tables is based on the GNU flex table
+ * file format (--tables-file option; see Table File Format in the flex
+ * info pages and the flex sources for documentation). The magic number
+ * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
+ * new tables have been defined and others YY_ID_CHK (check) and YY_ID_DEF
+ * (default) tables are used slightly differently (see the apparmor-parser
+ * package).
+ *
+ *
+ * The data in the packed dfa is stored in network byte order, and the tables
+ * are arranged for flexibility. We convert the table data to host native
+ * byte order.
+ *
+ * The dfa begins with a table set header, and is followed by the actual
+ * tables.
+ */
+
+#define YYTH_MAGIC 0x1B5E783D
+#define YYTH_FLAG_DIFF_ENCODE 1
+#define YYTH_FLAG_OOB_TRANS 2
+#define YYTH_FLAGS (YYTH_FLAG_DIFF_ENCODE | YYTH_FLAG_OOB_TRANS)
+
+#define MAX_OOB_SUPPORTED 1
+
+struct table_set_header {
+ u32 th_magic; /* YYTH_MAGIC */
+ u32 th_hsize;
+ u32 th_ssize;
+ u16 th_flags;
+ char th_version[];
+};
+
+/* The YYTD_ID are one less than flex table mappings. The flex id
+ * has 1 subtracted at table load time, this allows us to directly use the
+ * ID's as indexes.
+ */
+#define YYTD_ID_ACCEPT 0
+#define YYTD_ID_BASE 1
+#define YYTD_ID_CHK 2
+#define YYTD_ID_DEF 3
+#define YYTD_ID_EC 4
+#define YYTD_ID_META 5
+#define YYTD_ID_ACCEPT2 6
+#define YYTD_ID_NXT 7
+#define YYTD_ID_TSIZE 8
+#define YYTD_ID_MAX 8
+
+#define YYTD_DATA8 1
+#define YYTD_DATA16 2
+#define YYTD_DATA32 4
+#define YYTD_DATA64 8
+
+/* ACCEPT & ACCEPT2 tables gets 6 dedicated flags, YYTD_DATAX define the
+ * first flags
+ */
+#define ACCEPT1_FLAGS(X) ((X) & 0x3f)
+#define ACCEPT2_FLAGS(X) ACCEPT1_FLAGS((X) >> YYTD_ID_ACCEPT2)
+#define TO_ACCEPT1_FLAG(X) ACCEPT1_FLAGS(X)
+#define TO_ACCEPT2_FLAG(X) (ACCEPT1_FLAGS(X) << YYTD_ID_ACCEPT2)
+#define DFA_FLAG_VERIFY_STATES 0x1000
+
+struct table_header {
+ u16 td_id;
+ u16 td_flags;
+ u32 td_hilen;
+ u32 td_lolen;
+ char td_data[];
+};
+
+#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF]->td_data))
+#define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE]->td_data))
+#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT]->td_data))
+#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK]->td_data))
+#define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC]->td_data))
+#define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT]->td_data))
+#define ACCEPT_TABLE2(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT2]->td_data))
+
+struct aa_dfa {
+ struct kref count;
+ u16 flags;
+ u32 max_oob;
+ struct table_header *tables[YYTD_ID_TSIZE];
+};
+
+extern struct aa_dfa *nulldfa;
+extern struct aa_dfa *stacksplitdfa;
+
+#define byte_to_byte(X) (X)
+
+#define UNPACK_ARRAY(TABLE, BLOB, LEN, TTYPE, BTYPE, NTOHX) \
+ do { \
+ typeof(LEN) __i; \
+ TTYPE *__t = (TTYPE *) TABLE; \
+ BTYPE *__b = (BTYPE *) BLOB; \
+ for (__i = 0; __i < LEN; __i++) { \
+ __t[__i] = NTOHX(__b[__i]); \
+ } \
+ } while (0)
+
+static inline size_t table_size(size_t len, size_t el_size)
+{
+ return ALIGN(sizeof(struct table_header) + len * el_size, 8);
+}
+
+int aa_setup_dfa_engine(void);
+void aa_teardown_dfa_engine(void);
+
+struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags);
+unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int len);
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+ const char *str);
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c);
+unsigned int aa_dfa_outofband_transition(struct aa_dfa *dfa,
+ unsigned int state);
+unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, const char **retpos);
+unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int n, const char **retpos);
+
+void aa_dfa_free_kref(struct kref *kref);
+
+#define WB_HISTORY_SIZE 24
+struct match_workbuf {
+ unsigned int count;
+ unsigned int pos;
+ unsigned int len;
+ unsigned int size; /* power of 2, same as history size */
+ unsigned int history[WB_HISTORY_SIZE];
+};
+#define DEFINE_MATCH_WB(N) \
+struct match_workbuf N = { \
+ .count = 0, \
+ .pos = 0, \
+ .len = 0, \
+}
+
+unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
+ const char *str, unsigned int *count);
+
+/**
+ * aa_get_dfa - increment refcount on dfa @p
+ * @dfa: dfa (MAYBE NULL)
+ *
+ * Returns: pointer to @dfa if @dfa is NULL will return NULL
+ * Requires: @dfa must be held with valid refcount when called
+ */
+static inline struct aa_dfa *aa_get_dfa(struct aa_dfa *dfa)
+{
+ if (dfa)
+ kref_get(&(dfa->count));
+
+ return dfa;
+}
+
+/**
+ * aa_put_dfa - put a dfa refcount
+ * @dfa: dfa to put refcount (MAYBE NULL)
+ *
+ * Requires: if @dfa != NULL that a valid refcount be held
+ */
+static inline void aa_put_dfa(struct aa_dfa *dfa)
+{
+ if (dfa)
+ kref_put(&dfa->count, aa_dfa_free_kref);
+}
+
+#define MATCH_FLAG_DIFF_ENCODE 0x80000000
+#define MARK_DIFF_ENCODE 0x40000000
+#define MATCH_FLAG_OOB_TRANSITION 0x20000000
+#define MATCH_FLAGS_MASK 0xff000000
+#define MATCH_FLAGS_VALID (MATCH_FLAG_DIFF_ENCODE | MATCH_FLAG_OOB_TRANSITION)
+#define MATCH_FLAGS_INVALID (MATCH_FLAGS_MASK & ~MATCH_FLAGS_VALID)
+
+#endif /* __AA_MATCH_H */
diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h
new file mode 100644
index 000000000..a710683b2
--- /dev/null
+++ b/security/apparmor/include/mount.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor file mediation function definitions.
+ *
+ * Copyright 2017 Canonical Ltd.
+ */
+
+#ifndef __AA_MOUNT_H
+#define __AA_MOUNT_H
+
+#include <linux/fs.h>
+#include <linux/path.h>
+
+#include "domain.h"
+#include "policy.h"
+
+/* mount perms */
+#define AA_MAY_PIVOTROOT 0x01
+#define AA_MAY_MOUNT 0x02
+#define AA_MAY_UMOUNT 0x04
+#define AA_AUDIT_DATA 0x40
+#define AA_MNT_CONT_MATCH 0x40
+
+#define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN)
+
+int aa_remount(struct aa_label *label, const struct path *path,
+ unsigned long flags, void *data);
+
+int aa_bind_mount(struct aa_label *label, const struct path *path,
+ const char *old_name, unsigned long flags);
+
+
+int aa_mount_change_type(struct aa_label *label, const struct path *path,
+ unsigned long flags);
+
+int aa_move_mount(struct aa_label *label, const struct path *path,
+ const char *old_name);
+
+int aa_new_mount(struct aa_label *label, const char *dev_name,
+ const struct path *path, const char *type, unsigned long flags,
+ void *data);
+
+int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags);
+
+int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+ const struct path *new_path);
+
+#endif /* __AA_MOUNT_H */
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
new file mode 100644
index 000000000..aadb4b29f
--- /dev/null
+++ b/security/apparmor/include/net.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor network mediation definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#ifndef __AA_NET_H
+#define __AA_NET_H
+
+#include <net/sock.h>
+#include <linux/path.h>
+
+#include "apparmorfs.h"
+#include "label.h"
+#include "perms.h"
+#include "policy.h"
+
+#define AA_MAY_SEND AA_MAY_WRITE
+#define AA_MAY_RECEIVE AA_MAY_READ
+
+#define AA_MAY_SHUTDOWN AA_MAY_DELETE
+
+#define AA_MAY_CONNECT AA_MAY_OPEN
+#define AA_MAY_ACCEPT 0x00100000
+
+#define AA_MAY_BIND 0x00200000
+#define AA_MAY_LISTEN 0x00400000
+
+#define AA_MAY_SETOPT 0x01000000
+#define AA_MAY_GETOPT 0x02000000
+
+#define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
+ AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \
+ AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \
+ AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT)
+
+#define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
+ AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\
+ AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \
+ AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \
+ AA_MAY_MPROT)
+
+#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
+ AA_MAY_ACCEPT)
+struct aa_sk_ctx {
+ struct aa_label *label;
+ struct aa_label *peer;
+};
+
+#define SK_CTX(X) ((X)->sk_security)
+#define SOCK_ctx(X) SOCK_INODE(X)->i_security
+#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
+ struct lsm_network_audit NAME ## _net = { .sk = (SK), \
+ .family = (F)}; \
+ DEFINE_AUDIT_DATA(NAME, \
+ ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \
+ LSM_AUDIT_DATA_NONE, \
+ OP); \
+ NAME.u.net = &(NAME ## _net); \
+ aad(&NAME)->net.type = (T); \
+ aad(&NAME)->net.protocol = (P)
+
+#define DEFINE_AUDIT_SK(NAME, OP, SK) \
+ DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
+ (SK)->sk_protocol)
+
+
+#define af_select(FAMILY, FN, DEF_FN) \
+({ \
+ int __e; \
+ switch ((FAMILY)) { \
+ default: \
+ __e = DEF_FN; \
+ } \
+ __e; \
+})
+
+struct aa_secmark {
+ u8 audit;
+ u8 deny;
+ u32 secid;
+ char *label;
+};
+
+extern struct aa_sfs_entry aa_sfs_entry_network[];
+
+void audit_net_cb(struct audit_buffer *ab, void *va);
+int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+ u32 request, u16 family, int type);
+int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+ int type, int protocol);
+static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
+ struct common_audit_data *sa,
+ u32 request,
+ struct sock *sk)
+{
+ return aa_profile_af_perm(profile, sa, request, sk->sk_family,
+ sk->sk_type);
+}
+int aa_sk_perm(const char *op, u32 request, struct sock *sk);
+
+int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+ struct socket *sock);
+
+int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+ u32 secid, const struct sock *sk);
+
+#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
new file mode 100644
index 000000000..343189903
--- /dev/null
+++ b/security/apparmor/include/path.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic path manipulation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_PATH_H
+#define __AA_PATH_H
+
+enum path_flags {
+ PATH_IS_DIR = 0x1, /* path is a directory */
+ PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
+ PATH_CHROOT_REL = 0x8, /* do path lookup relative to chroot */
+ PATH_CHROOT_NSCONNECT = 0x10, /* connect paths that are at ns root */
+
+ PATH_DELEGATE_DELETED = 0x10000, /* delegate deleted files */
+ PATH_MEDIATE_DELETED = 0x20000, /* mediate deleted paths */
+};
+
+int aa_path_name(const struct path *path, int flags, char *buffer,
+ const char **name, const char **info,
+ const char *disconnected);
+
+#define IN_ATOMIC true
+char *aa_get_buffer(bool in_atomic);
+void aa_put_buffer(char *buf);
+
+#endif /* __AA_PATH_H */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
new file mode 100644
index 000000000..13f20c598
--- /dev/null
+++ b/security/apparmor/include/perms.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic permission sets definitions.
+ *
+ * Copyright 2017 Canonical Ltd.
+ */
+
+#ifndef __AA_PERM_H
+#define __AA_PERM_H
+
+#include <linux/fs.h>
+#include "label.h"
+
+#define AA_MAY_EXEC MAY_EXEC
+#define AA_MAY_WRITE MAY_WRITE
+#define AA_MAY_READ MAY_READ
+#define AA_MAY_APPEND MAY_APPEND
+
+#define AA_MAY_CREATE 0x0010
+#define AA_MAY_DELETE 0x0020
+#define AA_MAY_OPEN 0x0040
+#define AA_MAY_RENAME 0x0080 /* pair */
+
+#define AA_MAY_SETATTR 0x0100 /* meta write */
+#define AA_MAY_GETATTR 0x0200 /* meta read */
+#define AA_MAY_SETCRED 0x0400 /* security cred/attr */
+#define AA_MAY_GETCRED 0x0800
+
+#define AA_MAY_CHMOD 0x1000 /* pair */
+#define AA_MAY_CHOWN 0x2000 /* pair */
+#define AA_MAY_CHGRP 0x4000 /* pair */
+#define AA_MAY_LOCK 0x8000 /* LINK_SUBSET overlaid */
+
+#define AA_EXEC_MMAP 0x00010000
+#define AA_MAY_MPROT 0x00020000 /* extend conditions */
+#define AA_MAY_LINK 0x00040000 /* pair */
+#define AA_MAY_SNAPSHOT 0x00080000 /* pair */
+
+#define AA_MAY_DELEGATE
+#define AA_CONT_MATCH 0x08000000
+
+#define AA_MAY_STACK 0x10000000
+#define AA_MAY_ONEXEC 0x20000000 /* either stack or change_profile */
+#define AA_MAY_CHANGE_PROFILE 0x40000000
+#define AA_MAY_CHANGEHAT 0x80000000
+
+#define AA_LINK_SUBSET AA_MAY_LOCK /* overlaid */
+
+
+#define PERMS_CHRS_MASK (MAY_READ | MAY_WRITE | AA_MAY_CREATE | \
+ AA_MAY_DELETE | AA_MAY_LINK | AA_MAY_LOCK | \
+ AA_MAY_EXEC | AA_EXEC_MMAP | AA_MAY_APPEND)
+
+#define PERMS_NAMES_MASK (PERMS_CHRS_MASK | AA_MAY_OPEN | AA_MAY_RENAME | \
+ AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_SETCRED | \
+ AA_MAY_GETCRED | AA_MAY_CHMOD | AA_MAY_CHOWN | \
+ AA_MAY_CHGRP | AA_MAY_MPROT | AA_MAY_SNAPSHOT | \
+ AA_MAY_STACK | AA_MAY_ONEXEC | \
+ AA_MAY_CHANGE_PROFILE | AA_MAY_CHANGEHAT)
+
+extern const char aa_file_perm_chrs[];
+extern const char *aa_file_perm_names[];
+
+struct aa_perms {
+ u32 allow;
+ u32 audit; /* set only when allow is set */
+
+ u32 deny; /* explicit deny, or conflict if allow also set */
+ u32 quiet; /* set only when ~allow | deny */
+ u32 kill; /* set only when ~allow | deny */
+ u32 stop; /* set only when ~allow | deny */
+
+ u32 complain; /* accumulates only used when ~allow & ~deny */
+ u32 cond; /* set only when ~allow and ~deny */
+
+ u32 hide; /* set only when ~allow | deny */
+ u32 prompt; /* accumulates only used when ~allow & ~deny */
+
+ /* Reserved:
+ * u32 subtree; / * set only when allow is set * /
+ */
+ u16 xindex;
+};
+
+#define ALL_PERMS_MASK 0xffffffff
+extern struct aa_perms nullperms;
+extern struct aa_perms allperms;
+
+
+#define xcheck(FN1, FN2) \
+({ \
+ int e, error = FN1; \
+ e = FN2; \
+ if (e) \
+ error = e; \
+ error; \
+})
+
+
+/*
+ * TODO: update for labels pointing to labels instead of profiles
+ * TODO: optimize the walk, currently does subwalk of L2 for each P in L1
+ * gah this doesn't allow for label compound check!!!!
+ */
+#define xcheck_ns_profile_profile(P1, P2, FN, args...) \
+({ \
+ int ____e = 0; \
+ if (P1->ns == P2->ns) \
+ ____e = FN((P1), (P2), args); \
+ (____e); \
+})
+
+#define xcheck_ns_profile_label(P, L, FN, args...) \
+({ \
+ struct aa_profile *__p2; \
+ fn_for_each((L), __p2, \
+ xcheck_ns_profile_profile((P), __p2, (FN), args)); \
+})
+
+#define xcheck_ns_labels(L1, L2, FN, args...) \
+({ \
+ struct aa_profile *__p1; \
+ fn_for_each((L1), __p1, FN(__p1, (L2), args)); \
+})
+
+/* Do the cross check but applying FN at the profiles level */
+#define xcheck_labels_profiles(L1, L2, FN, args...) \
+ xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
+
+#define xcheck_labels(L1, L2, P, FN1, FN2) \
+ xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
+
+
+void aa_perm_mask_to_str(char *str, size_t str_size, const char *chrs,
+ u32 mask);
+void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
+ u32 mask);
+void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
+ u32 chrsmask, const char * const *names, u32 namesmask);
+void aa_apply_modes_to_perms(struct aa_profile *profile,
+ struct aa_perms *perms);
+void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
+ struct aa_perms *perms);
+void aa_perms_accum(struct aa_perms *accum, struct aa_perms *addend);
+void aa_perms_accum_raw(struct aa_perms *accum, struct aa_perms *addend);
+void aa_profile_match_label(struct aa_profile *profile, struct aa_label *label,
+ int type, u32 request, struct aa_perms *perms);
+int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ u32 request, int type, u32 *deny,
+ struct common_audit_data *sa);
+int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+ u32 request, struct common_audit_data *sa,
+ void (*cb)(struct audit_buffer *, void *));
+#endif /* __AA_PERM_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
new file mode 100644
index 000000000..639b5b248
--- /dev/null
+++ b/security/apparmor/include/policy.h
@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_POLICY_H
+#define __AA_POLICY_H
+
+#include <linux/capability.h>
+#include <linux/cred.h>
+#include <linux/kref.h>
+#include <linux/rhashtable.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+
+#include "apparmor.h"
+#include "audit.h"
+#include "capability.h"
+#include "domain.h"
+#include "file.h"
+#include "lib.h"
+#include "label.h"
+#include "net.h"
+#include "perms.h"
+#include "resource.h"
+
+
+struct aa_ns;
+
+extern int unprivileged_userns_apparmor_policy;
+
+extern const char *const aa_profile_mode_names[];
+#define APPARMOR_MODE_NAMES_MAX_INDEX 4
+
+#define PROFILE_MODE(_profile, _mode) \
+ ((aa_g_profile_mode == (_mode)) || \
+ ((_profile)->mode == (_mode)))
+
+#define COMPLAIN_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_COMPLAIN)
+
+#define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL)
+
+#define PROFILE_IS_HAT(_profile) ((_profile)->label.flags & FLAG_HAT)
+
+#define CHECK_DEBUG1(_profile) ((_profile)->label.flags & FLAG_DEBUG1)
+
+#define CHECK_DEBUG2(_profile) ((_profile)->label.flags & FLAG_DEBUG2)
+
+#define profile_is_stale(_profile) (label_is_stale(&(_profile)->label))
+
+#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
+
+/*
+ * FIXME: currently need a clean way to replace and remove profiles as a
+ * set. It should be done at the namespace level.
+ * Either, with a set of profiles loaded at the namespace level or via
+ * a mark and remove marked interface.
+ */
+enum profile_mode {
+ APPARMOR_ENFORCE, /* enforce access rules */
+ APPARMOR_COMPLAIN, /* allow and log access violations */
+ APPARMOR_KILL, /* kill task on access violation */
+ APPARMOR_UNCONFINED, /* profile set to unconfined */
+};
+
+
+/* struct aa_policydb - match engine for a policy
+ * dfa: dfa pattern match
+ * start: set of start states for the different classes of data
+ */
+struct aa_policydb {
+ /* Generic policy DFA specific rule types will be subsections of it */
+ struct aa_dfa *dfa;
+ unsigned int start[AA_CLASS_LAST + 1];
+
+};
+
+/* struct aa_data - generic data structure
+ * key: name for retrieving this data
+ * size: size of data in bytes
+ * data: binary data
+ * head: reserved for rhashtable
+ */
+struct aa_data {
+ char *key;
+ u32 size;
+ char *data;
+ struct rhash_head head;
+};
+
+
+/* struct aa_profile - basic confinement data
+ * @base - base components of the profile (name, refcount, lists, lock ...)
+ * @label - label this profile is an extension of
+ * @parent: parent of profile
+ * @ns: namespace the profile is in
+ * @rename: optional profile name that this profile renamed
+ * @attach: human readable attachment string
+ * @xmatch: optional extended matching for unconfined executables names
+ * @xmatch_len: xmatch prefix len, used to determine xmatch priority
+ * @audit: the auditing mode of the profile
+ * @mode: the enforcement mode of the profile
+ * @path_flags: flags controlling path generation behavior
+ * @disconnected: what to prepend if attach_disconnected is specified
+ * @size: the memory consumed by this profiles rules
+ * @policy: general match rules governing policy
+ * @file: The set of rules governing basic file access and domain transitions
+ * @caps: capabilities for the profile
+ * @rlimits: rlimits for the profile
+ *
+ * @dents: dentries for the profiles file entries in apparmorfs
+ * @dirname: name of the profile dir in apparmorfs
+ * @data: hashtable for free-form policy aa_data
+ *
+ * The AppArmor profile contains the basic confinement data. Each profile
+ * has a name, and exists in a namespace. The @name and @exec_match are
+ * used to determine profile attachment against unconfined tasks. All other
+ * attachments are determined by profile X transition rules.
+ *
+ * Profiles have a hierarchy where hats and children profiles keep
+ * a reference to their parent.
+ *
+ * Profile names can not begin with a : and can not contain the \0
+ * character. If a profile name begins with / it will be considered when
+ * determining profile attachment on "unconfined" tasks.
+ */
+struct aa_profile {
+ struct aa_policy base;
+ struct aa_profile __rcu *parent;
+
+ struct aa_ns *ns;
+ const char *rename;
+
+ const char *attach;
+ struct aa_dfa *xmatch;
+ unsigned int xmatch_len;
+ enum audit_mode audit;
+ long mode;
+ u32 path_flags;
+ const char *disconnected;
+ int size;
+
+ struct aa_policydb policy;
+ struct aa_file_rules file;
+ struct aa_caps caps;
+
+ int xattr_count;
+ char **xattrs;
+
+ struct aa_rlimit rlimits;
+
+ int secmark_count;
+ struct aa_secmark *secmark;
+
+ struct aa_loaddata *rawdata;
+ unsigned char *hash;
+ char *dirname;
+ struct dentry *dents[AAFS_PROF_SIZEOF];
+ struct rhashtable *data;
+ struct aa_label label;
+};
+
+extern enum profile_mode aa_g_profile_mode;
+
+#define AA_MAY_LOAD_POLICY AA_MAY_APPEND
+#define AA_MAY_REPLACE_POLICY AA_MAY_WRITE
+#define AA_MAY_REMOVE_POLICY AA_MAY_DELETE
+
+#define profiles_ns(P) ((P)->ns)
+#define name_is_shared(A, B) ((A)->hname && (A)->hname == (B)->hname)
+
+void aa_add_profile(struct aa_policy *common, struct aa_profile *profile);
+
+
+void aa_free_proxy_kref(struct kref *kref);
+struct aa_profile *aa_alloc_profile(const char *name, struct aa_proxy *proxy,
+ gfp_t gfp);
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
+ const char *base, gfp_t gfp);
+void aa_free_profile(struct aa_profile *profile);
+void aa_free_profile_kref(struct kref *kref);
+struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
+struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname,
+ size_t n);
+struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *name);
+struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
+ const char *fqname, size_t n);
+struct aa_profile *aa_match_profile(struct aa_ns *ns, const char *name);
+
+ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_label *label,
+ u32 mask, struct aa_loaddata *udata);
+ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_label *label,
+ char *name, size_t size);
+void __aa_profile_list_release(struct list_head *head);
+
+#define PROF_ADD 1
+#define PROF_REPLACE 0
+
+#define profile_unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
+
+/**
+ * aa_get_newest_profile - simple wrapper fn to wrap the label version
+ * @p: profile (NOT NULL)
+ *
+ * Returns refcount to newest version of the profile (maybe @p)
+ *
+ * Requires: @p must be held with a valid refcount
+ */
+static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
+{
+ return labels_profile(aa_get_newest_label(&p->label));
+}
+
+static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile,
+ unsigned char class)
+{
+ if (class <= AA_CLASS_LAST)
+ return profile->policy.start[class];
+ else
+ return aa_dfa_match_len(profile->policy.dfa,
+ profile->policy.start[0], &class, 1);
+}
+
+static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
+ u16 AF) {
+ unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
+ __be16 be_af = cpu_to_be16(AF);
+
+ if (!state)
+ return 0;
+ return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2);
+}
+
+/**
+ * aa_get_profile - increment refcount on profile @p
+ * @p: profile (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
+{
+ if (p)
+ kref_get(&(p->label.count));
+
+ return p;
+}
+
+/**
+ * aa_get_profile_not0 - increment refcount on profile @p found via lookup
+ * @p: profile (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
+{
+ if (p && kref_get_unless_zero(&p->label.count))
+ return p;
+
+ return NULL;
+}
+
+/**
+ * aa_get_profile_rcu - increment a refcount profile that can be replaced
+ * @p: pointer to profile that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted profile.
+ * else NULL if no profile
+ */
+static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
+{
+ struct aa_profile *c;
+
+ rcu_read_lock();
+ do {
+ c = rcu_dereference(*p);
+ } while (c && !kref_get_unless_zero(&c->label.count));
+ rcu_read_unlock();
+
+ return c;
+}
+
+/**
+ * aa_put_profile - decrement refcount on profile @p
+ * @p: profile (MAYBE NULL)
+ */
+static inline void aa_put_profile(struct aa_profile *p)
+{
+ if (p)
+ kref_put(&p->label.count, aa_label_kref);
+}
+
+static inline int AUDIT_MODE(struct aa_profile *profile)
+{
+ if (aa_g_audit != AUDIT_NORMAL)
+ return aa_g_audit;
+
+ return profile->audit;
+}
+
+bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns);
+bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns);
+int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
+ u32 mask);
+bool aa_current_policy_view_capable(struct aa_ns *ns);
+bool aa_current_policy_admin_capable(struct aa_ns *ns);
+
+#endif /* __AA_POLICY_H */
diff --git a/security/apparmor/include/policy_ns.h b/security/apparmor/include/policy_ns.h
new file mode 100644
index 000000000..33d665516
--- /dev/null
+++ b/security/apparmor/include/policy_ns.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#ifndef __AA_NAMESPACE_H
+#define __AA_NAMESPACE_H
+
+#include <linux/kref.h>
+
+#include "apparmor.h"
+#include "apparmorfs.h"
+#include "label.h"
+#include "policy.h"
+
+
+/* struct aa_ns_acct - accounting of profiles in namespace
+ * @max_size: maximum space allowed for all profiles in namespace
+ * @max_count: maximum number of profiles that can be in this namespace
+ * @size: current size of profiles
+ * @count: current count of profiles (includes null profiles)
+ */
+struct aa_ns_acct {
+ int max_size;
+ int max_count;
+ int size;
+ int count;
+};
+
+/* struct aa_ns - namespace for a set of profiles
+ * @base: common policy
+ * @parent: parent of namespace
+ * @lock: lock for modifying the object
+ * @acct: accounting for the namespace
+ * @unconfined: special unconfined profile for the namespace
+ * @sub_ns: list of namespaces under the current namespace.
+ * @uniq_null: uniq value used for null learning profiles
+ * @uniq_id: a unique id count for the profiles in the namespace
+ * @level: level of ns within the tree hierarchy
+ * @dents: dentries for the namespaces file entries in apparmorfs
+ *
+ * An aa_ns defines the set profiles that are searched to determine which
+ * profile to attach to a task. Profiles can not be shared between aa_ns
+ * and profile names within a namespace are guaranteed to be unique. When
+ * profiles in separate namespaces have the same name they are NOT considered
+ * to be equivalent.
+ *
+ * Namespaces are hierarchical and only namespaces and profiles below the
+ * current namespace are visible.
+ *
+ * Namespace names must be unique and can not contain the characters :/\0
+ */
+struct aa_ns {
+ struct aa_policy base;
+ struct aa_ns *parent;
+ struct mutex lock;
+ struct aa_ns_acct acct;
+ struct aa_profile *unconfined;
+ struct list_head sub_ns;
+ atomic_t uniq_null;
+ long uniq_id;
+ int level;
+ long revision;
+ wait_queue_head_t wait;
+
+ struct aa_labelset labels;
+ struct list_head rawdata_list;
+
+ struct dentry *dents[AAFS_NS_SIZEOF];
+};
+
+extern struct aa_label *kernel_t;
+extern struct aa_ns *root_ns;
+
+extern const char *aa_hidden_ns_name;
+
+#define ns_unconfined(NS) (&(NS)->unconfined->label)
+
+bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns);
+const char *aa_ns_name(struct aa_ns *parent, struct aa_ns *child, bool subns);
+void aa_free_ns(struct aa_ns *ns);
+int aa_alloc_root_ns(void);
+void aa_free_root_ns(void);
+void aa_free_ns_kref(struct kref *kref);
+
+struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name);
+struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n);
+struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n);
+struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n);
+struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name,
+ struct dentry *dir);
+struct aa_ns *aa_prepare_ns(struct aa_ns *root, const char *name);
+void __aa_remove_ns(struct aa_ns *ns);
+
+static inline struct aa_profile *aa_deref_parent(struct aa_profile *p)
+{
+ return rcu_dereference_protected(p->parent,
+ mutex_is_locked(&p->ns->lock));
+}
+
+/**
+ * aa_get_ns - increment references count on @ns
+ * @ns: namespace to increment reference count of (MAYBE NULL)
+ *
+ * Returns: pointer to @ns, if @ns is NULL returns NULL
+ * Requires: @ns must be held with valid refcount when called
+ */
+static inline struct aa_ns *aa_get_ns(struct aa_ns *ns)
+{
+ if (ns)
+ aa_get_profile(ns->unconfined);
+
+ return ns;
+}
+
+/**
+ * aa_put_ns - decrement refcount on @ns
+ * @ns: namespace to put reference of
+ *
+ * Decrement reference count of @ns and if no longer in use free it
+ */
+static inline void aa_put_ns(struct aa_ns *ns)
+{
+ if (ns)
+ aa_put_profile(ns->unconfined);
+}
+
+/**
+ * __aa_findn_ns - find a namespace on a list by @name
+ * @head: list to search for namespace on (NOT NULL)
+ * @name: name of namespace to look for (NOT NULL)
+ * @n: length of @name
+ * Returns: unrefcounted namespace
+ *
+ * Requires: rcu_read_lock be held
+ */
+static inline struct aa_ns *__aa_findn_ns(struct list_head *head,
+ const char *name, size_t n)
+{
+ return (struct aa_ns *)__policy_strn_find(head, name, n);
+}
+
+static inline struct aa_ns *__aa_find_ns(struct list_head *head,
+ const char *name)
+{
+ return __aa_findn_ns(head, name, strlen(name));
+}
+
+static inline struct aa_ns *__aa_lookup_ns(struct aa_ns *base,
+ const char *hname)
+{
+ return __aa_lookupn_ns(base, hname, strlen(hname));
+}
+
+static inline struct aa_ns *aa_lookup_ns(struct aa_ns *view, const char *name)
+{
+ return aa_lookupn_ns(view, name, strlen(name));
+}
+
+#endif /* AA_NAMESPACE_H */
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
new file mode 100644
index 000000000..e89b70144
--- /dev/null
+++ b/security/apparmor/include/policy_unpack.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __POLICY_INTERFACE_H
+#define __POLICY_INTERFACE_H
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/dcache.h>
+#include <linux/workqueue.h>
+
+struct aa_load_ent {
+ struct list_head list;
+ struct aa_profile *new;
+ struct aa_profile *old;
+ struct aa_profile *rename;
+ const char *ns_name;
+};
+
+void aa_load_ent_free(struct aa_load_ent *ent);
+struct aa_load_ent *aa_load_ent_alloc(void);
+
+#define PACKED_FLAG_HAT 1
+#define PACKED_FLAG_DEBUG1 2
+#define PACKED_FLAG_DEBUG2 4
+
+#define PACKED_MODE_ENFORCE 0
+#define PACKED_MODE_COMPLAIN 1
+#define PACKED_MODE_KILL 2
+#define PACKED_MODE_UNCONFINED 3
+
+struct aa_ns;
+
+enum {
+ AAFS_LOADDATA_ABI = 0,
+ AAFS_LOADDATA_REVISION,
+ AAFS_LOADDATA_HASH,
+ AAFS_LOADDATA_DATA,
+ AAFS_LOADDATA_COMPRESSED_SIZE,
+ AAFS_LOADDATA_DIR, /* must be last actual entry */
+ AAFS_LOADDATA_NDENTS /* count of entries */
+};
+
+/*
+ * The AppArmor interface treats data as a type byte followed by the
+ * actual data. The interface has the notion of a named entry
+ * which has a name (AA_NAME typecode followed by name string) followed by
+ * the entries typecode and data. Named types allow for optional
+ * elements and extensions to be added and tested for without breaking
+ * backwards compatibility.
+ */
+
+enum aa_code {
+ AA_U8,
+ AA_U16,
+ AA_U32,
+ AA_U64,
+ AA_NAME, /* same as string except it is items name */
+ AA_STRING,
+ AA_BLOB,
+ AA_STRUCT,
+ AA_STRUCTEND,
+ AA_LIST,
+ AA_LISTEND,
+ AA_ARRAY,
+ AA_ARRAYEND,
+};
+
+/*
+ * aa_ext is the read of the buffer containing the serialized profile. The
+ * data is copied into a kernel buffer in apparmorfs and then handed off to
+ * the unpack routines.
+ */
+struct aa_ext {
+ void *start;
+ void *end;
+ void *pos; /* pointer to current position in the buffer */
+ u32 version;
+};
+
+/*
+ * struct aa_loaddata - buffer of policy raw_data set
+ *
+ * there is no loaddata ref for being on ns list, nor a ref from
+ * d_inode(@dentry) when grab a ref from these, @ns->lock must be held
+ * && __aa_get_loaddata() needs to be used, and the return value
+ * checked, if NULL the loaddata is already being reaped and should be
+ * considered dead.
+ */
+struct aa_loaddata {
+ struct kref count;
+ struct list_head list;
+ struct work_struct work;
+ struct dentry *dents[AAFS_LOADDATA_NDENTS];
+ struct aa_ns *ns;
+ char *name;
+ size_t size; /* the original size of the payload */
+ size_t compressed_size; /* the compressed size of the payload */
+ long revision; /* the ns policy revision this caused */
+ int abi;
+ unsigned char *hash;
+
+ /* Pointer to payload. If @compressed_size > 0, then this is the
+ * compressed version of the payload, else it is the uncompressed
+ * version (with the size indicated by @size).
+ */
+ char *data;
+};
+
+int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, const char **ns);
+
+/**
+ * __aa_get_loaddata - get a reference count to uncounted data reference
+ * @data: reference to get a count on
+ *
+ * Returns: pointer to reference OR NULL if race is lost and reference is
+ * being repeated.
+ * Requires: @data->ns->lock held, and the return code MUST be checked
+ *
+ * Use only from inode->i_private and @data->list found references
+ */
+static inline struct aa_loaddata *
+__aa_get_loaddata(struct aa_loaddata *data)
+{
+ if (data && kref_get_unless_zero(&(data->count)))
+ return data;
+
+ return NULL;
+}
+
+/**
+ * aa_get_loaddata - get a reference count from a counted data reference
+ * @data: reference to get a count on
+ *
+ * Returns: point to reference
+ * Requires: @data to have a valid reference count on it. It is a bug
+ * if the race to reap can be encountered when it is used.
+ */
+static inline struct aa_loaddata *
+aa_get_loaddata(struct aa_loaddata *data)
+{
+ struct aa_loaddata *tmp = __aa_get_loaddata(data);
+
+ AA_BUG(data && !tmp);
+
+ return tmp;
+}
+
+void __aa_loaddata_update(struct aa_loaddata *data, long revision);
+bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r);
+void aa_loaddata_kref(struct kref *kref);
+struct aa_loaddata *aa_loaddata_alloc(size_t size);
+static inline void aa_put_loaddata(struct aa_loaddata *data)
+{
+ if (data)
+ kref_put(&data->count, aa_loaddata_kref);
+}
+
+#if IS_ENABLED(CONFIG_KUNIT)
+bool aa_inbounds(struct aa_ext *e, size_t size);
+size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk);
+bool aa_unpack_X(struct aa_ext *e, enum aa_code code);
+bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name);
+bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name);
+bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name);
+size_t aa_unpack_array(struct aa_ext *e, const char *name);
+size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name);
+int aa_unpack_str(struct aa_ext *e, const char **string, const char *name);
+int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name);
+#endif
+
+#endif /* __POLICY_INTERFACE_H */
diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h
new file mode 100644
index 000000000..31689437e
--- /dev/null
+++ b/security/apparmor/include/procattr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /proc/<pid>/attr/ interface function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_PROCATTR_H
+#define __AA_PROCATTR_H
+
+int aa_getprocattr(struct aa_label *label, char **string);
+int aa_setprocattr_changehat(char *args, size_t size, int flags);
+
+#endif /* __AA_PROCATTR_H */
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
new file mode 100644
index 000000000..961d85d32
--- /dev/null
+++ b/security/apparmor/include/resource.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor resource limits function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#ifndef __AA_RESOURCE_H
+#define __AA_RESOURCE_H
+
+#include <linux/resource.h>
+#include <linux/sched.h>
+
+#include "apparmorfs.h"
+
+struct aa_profile;
+
+/* struct aa_rlimit - rlimit settings for the profile
+ * @mask: which hard limits to set
+ * @limits: rlimit values that override task limits
+ *
+ * AppArmor rlimits are used to set confined task rlimits. Only the
+ * limits specified in @mask will be controlled by apparmor.
+ */
+struct aa_rlimit {
+ unsigned int mask;
+ struct rlimit limits[RLIM_NLIMITS];
+};
+
+extern struct aa_sfs_entry aa_sfs_entry_rlimit[];
+
+int aa_map_resource(int resource);
+int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim);
+
+void __aa_transition_rlimits(struct aa_label *old, struct aa_label *new);
+
+static inline void aa_free_rlimit_rules(struct aa_rlimit *rlims)
+{
+ /* NOP */
+}
+
+#endif /* __AA_RESOURCE_H */
diff --git a/security/apparmor/include/secid.h b/security/apparmor/include/secid.h
new file mode 100644
index 000000000..a912a5d5d
--- /dev/null
+++ b/security/apparmor/include/secid.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security identifier (secid) definitions
+ *
+ * Copyright 2009-2018 Canonical Ltd.
+ */
+
+#ifndef __AA_SECID_H
+#define __AA_SECID_H
+
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct aa_label;
+
+/* secid value that will not be allocated */
+#define AA_SECID_INVALID 0
+
+/* secid value that matches any other secid */
+#define AA_SECID_WILDCARD 1
+
+/* sysctl to enable displaying mode when converting secid to secctx */
+extern int apparmor_display_secid_mode;
+
+struct aa_label *aa_secid_to_label(u32 secid);
+int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
+int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
+void apparmor_release_secctx(char *secdata, u32 seclen);
+
+
+int aa_alloc_secid(struct aa_label *label, gfp_t gfp);
+void aa_free_secid(u32 secid);
+void aa_secid_update(u32 secid, struct aa_label *label);
+
+#endif /* __AA_SECID_H */
diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h
new file mode 100644
index 000000000..cbf7a997e
--- /dev/null
+++ b/security/apparmor/include/sig_names.h
@@ -0,0 +1,101 @@
+#include <linux/signal.h>
+
+#define SIGUNKNOWN 0
+#define MAXMAPPED_SIG 35
+#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1)
+#define SIGRT_BASE 128
+
+/* provide a mapping of arch signal to internal signal # for mediation
+ * those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO
+ * map to the same entry those that may/or may not get a separate entry
+ */
+static const int sig_map[MAXMAPPED_SIG] = {
+ [0] = MAXMAPPED_SIG, /* existence test */
+ [SIGHUP] = 1,
+ [SIGINT] = 2,
+ [SIGQUIT] = 3,
+ [SIGILL] = 4,
+ [SIGTRAP] = 5, /* -, 5, - */
+ [SIGABRT] = 6, /* SIGIOT: -, 6, - */
+ [SIGBUS] = 7, /* 10, 7, 10 */
+ [SIGFPE] = 8,
+ [SIGKILL] = 9,
+ [SIGUSR1] = 10, /* 30, 10, 16 */
+ [SIGSEGV] = 11,
+ [SIGUSR2] = 12, /* 31, 12, 17 */
+ [SIGPIPE] = 13,
+ [SIGALRM] = 14,
+ [SIGTERM] = 15,
+#ifdef SIGSTKFLT
+ [SIGSTKFLT] = 16, /* -, 16, - */
+#endif
+ [SIGCHLD] = 17, /* 20, 17, 18. SIGCHLD -, -, 18 */
+ [SIGCONT] = 18, /* 19, 18, 25 */
+ [SIGSTOP] = 19, /* 17, 19, 23 */
+ [SIGTSTP] = 20, /* 18, 20, 24 */
+ [SIGTTIN] = 21, /* 21, 21, 26 */
+ [SIGTTOU] = 22, /* 22, 22, 27 */
+ [SIGURG] = 23, /* 16, 23, 21 */
+ [SIGXCPU] = 24, /* 24, 24, 30 */
+ [SIGXFSZ] = 25, /* 25, 25, 31 */
+ [SIGVTALRM] = 26, /* 26, 26, 28 */
+ [SIGPROF] = 27, /* 27, 27, 29 */
+ [SIGWINCH] = 28, /* 28, 28, 20 */
+ [SIGIO] = 29, /* SIGPOLL: 23, 29, 22 */
+ [SIGPWR] = 30, /* 29, 30, 19. SIGINFO 29, -, - */
+#ifdef SIGSYS
+ [SIGSYS] = 31, /* 12, 31, 12. often SIG LOST/UNUSED */
+#endif
+#ifdef SIGEMT
+ [SIGEMT] = 32, /* 7, - , 7 */
+#endif
+#if defined(SIGLOST) && SIGPWR != SIGLOST /* sparc */
+ [SIGLOST] = 33, /* unused on Linux */
+#endif
+#if defined(SIGUNUSED) && \
+ defined(SIGLOST) && defined(SIGSYS) && SIGLOST != SIGSYS
+ [SIGUNUSED] = 34, /* -, 31, - */
+#endif
+};
+
+/* this table is ordered post sig_map[sig] mapping */
+static const char *const sig_names[MAXMAPPED_SIGNAME] = {
+ "unknown",
+ "hup",
+ "int",
+ "quit",
+ "ill",
+ "trap",
+ "abrt",
+ "bus",
+ "fpe",
+ "kill",
+ "usr1",
+ "segv",
+ "usr2",
+ "pipe",
+ "alrm",
+ "term",
+ "stkflt",
+ "chld",
+ "cont",
+ "stop",
+ "stp",
+ "ttin",
+ "ttou",
+ "urg",
+ "xcpu",
+ "xfsz",
+ "vtalrm",
+ "prof",
+ "winch",
+ "io",
+ "pwr",
+ "sys",
+ "emt",
+ "lost",
+ "unused",
+
+ "exists", /* always last existence test mapped to MAXMAPPED_SIG */
+};
+
diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
new file mode 100644
index 000000000..13437d62c
--- /dev/null
+++ b/security/apparmor/include/task.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor task related definitions and mediation
+ *
+ * Copyright 2017 Canonical Ltd.
+ */
+
+#ifndef __AA_TASK_H
+#define __AA_TASK_H
+
+static inline struct aa_task_ctx *task_ctx(struct task_struct *task)
+{
+ return task->security + apparmor_blob_sizes.lbs_task;
+}
+
+/*
+ * struct aa_task_ctx - information for current task label change
+ * @nnp: snapshot of label at time of no_new_privs
+ * @onexec: profile to transition to on next exec (MAY BE NULL)
+ * @previous: profile the task may return to (MAY BE NULL)
+ * @token: magic value the task must know for returning to @previous_profile
+ */
+struct aa_task_ctx {
+ struct aa_label *nnp;
+ struct aa_label *onexec;
+ struct aa_label *previous;
+ u64 token;
+};
+
+int aa_replace_current_label(struct aa_label *label);
+int aa_set_current_onexec(struct aa_label *label, bool stack);
+int aa_set_current_hat(struct aa_label *label, u64 token);
+int aa_restore_previous_label(u64 cookie);
+struct aa_label *aa_get_task_label(struct task_struct *task);
+
+/**
+ * aa_free_task_ctx - free a task_ctx
+ * @ctx: task_ctx to free (MAYBE NULL)
+ */
+static inline void aa_free_task_ctx(struct aa_task_ctx *ctx)
+{
+ if (ctx) {
+ aa_put_label(ctx->nnp);
+ aa_put_label(ctx->previous);
+ aa_put_label(ctx->onexec);
+ }
+}
+
+/**
+ * aa_dup_task_ctx - duplicate a task context, incrementing reference counts
+ * @new: a blank task context (NOT NULL)
+ * @old: the task context to copy (NOT NULL)
+ */
+static inline void aa_dup_task_ctx(struct aa_task_ctx *new,
+ const struct aa_task_ctx *old)
+{
+ *new = *old;
+ aa_get_label(new->nnp);
+ aa_get_label(new->previous);
+ aa_get_label(new->onexec);
+}
+
+/**
+ * aa_clear_task_ctx_trans - clear transition tracking info from the ctx
+ * @ctx: task context to clear (NOT NULL)
+ */
+static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
+{
+ AA_BUG(!ctx);
+
+ aa_put_label(ctx->previous);
+ aa_put_label(ctx->onexec);
+ ctx->previous = NULL;
+ ctx->onexec = NULL;
+ ctx->token = 0;
+}
+
+#define AA_PTRACE_TRACE MAY_WRITE
+#define AA_PTRACE_READ MAY_READ
+#define AA_MAY_BE_TRACED AA_MAY_APPEND
+#define AA_MAY_BE_READ AA_MAY_CREATE
+#define PTRACE_PERM_SHIFT 2
+
+#define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \
+ AA_MAY_BE_READ | AA_MAY_BE_TRACED)
+#define AA_SIGNAL_PERM_MASK (MAY_READ | MAY_WRITE)
+
+#define AA_SFS_SIG_MASK "hup int quit ill trap abrt bus fpe kill usr1 " \
+ "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
+ "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
+
+int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+ u32 request);
+
+
+#endif /* __AA_TASK_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
new file mode 100644
index 000000000..3dbbc59d4
--- /dev/null
+++ b/security/apparmor/ipc.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#include <linux/gfp.h>
+
+#include "include/audit.h"
+#include "include/capability.h"
+#include "include/cred.h"
+#include "include/policy.h"
+#include "include/ipc.h"
+#include "include/sig_names.h"
+
+
+static inline int map_signal_num(int sig)
+{
+ if (sig > SIGRTMAX)
+ return SIGUNKNOWN;
+ else if (sig >= SIGRTMIN)
+ return sig - SIGRTMIN + SIGRT_BASE;
+ else if (sig < MAXMAPPED_SIG)
+ return sig_map[sig];
+ return SIGUNKNOWN;
+}
+
+/**
+ * audit_signal_mask - convert mask to permission string
+ * @mask: permission mask to convert
+ *
+ * Returns: pointer to static string
+ */
+static const char *audit_signal_mask(u32 mask)
+{
+ if (mask & MAY_READ)
+ return "receive";
+ if (mask & MAY_WRITE)
+ return "send";
+ return "";
+}
+
+/**
+ * audit_cb - call back for signal specific audit fields
+ * @ab: audit_buffer (NOT NULL)
+ * @va: audit struct to audit values of (NOT NULL)
+ */
+static void audit_signal_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+ audit_signal_mask(aad(sa)->request));
+ if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+ audit_signal_mask(aad(sa)->denied));
+ }
+ }
+ if (aad(sa)->signal == SIGUNKNOWN)
+ audit_log_format(ab, "signal=unknown(%d)",
+ aad(sa)->unmappedsig);
+ else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
+ audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
+ else
+ audit_log_format(ab, " signal=rtmin+%d",
+ aad(sa)->signal - SIGRT_BASE);
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+}
+
+static int profile_signal_perm(struct aa_profile *profile,
+ struct aa_label *peer, u32 request,
+ struct common_audit_data *sa)
+{
+ struct aa_perms perms;
+ unsigned int state;
+
+ if (profile_unconfined(profile) ||
+ !PROFILE_MEDIATES(profile, AA_CLASS_SIGNAL))
+ return 0;
+
+ aad(sa)->peer = peer;
+ /* TODO: secondary cache check <profile, profile, perm> */
+ state = aa_dfa_next(profile->policy.dfa,
+ profile->policy.start[AA_CLASS_SIGNAL],
+ aad(sa)->signal);
+ aa_label_match(profile, peer, state, false, request, &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+ return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
+}
+
+int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SIGNAL);
+
+ aad(&sa)->signal = map_signal_num(sig);
+ aad(&sa)->unmappedsig = sig;
+ return xcheck_labels(sender, target, profile,
+ profile_signal_perm(profile, target, MAY_WRITE, &sa),
+ profile_signal_perm(profile, sender, MAY_READ, &sa));
+}
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
new file mode 100644
index 000000000..a67c5897e
--- /dev/null
+++ b/security/apparmor/label.c
@@ -0,0 +1,2163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor label definitions
+ *
+ * Copyright 2017 Canonical Ltd.
+ */
+
+#include <linux/audit.h>
+#include <linux/seq_file.h>
+#include <linux/sort.h>
+
+#include "include/apparmor.h"
+#include "include/cred.h"
+#include "include/label.h"
+#include "include/policy.h"
+#include "include/secid.h"
+
+
+/*
+ * the aa_label represents the set of profiles confining an object
+ *
+ * Labels maintain a reference count to the set of pointers they reference
+ * Labels are ref counted by
+ * tasks and object via the security field/security context off the field
+ * code - will take a ref count on a label if it needs the label
+ * beyond what is possible with an rcu_read_lock.
+ * profiles - each profile is a label
+ * secids - a pinned secid will keep a refcount of the label it is
+ * referencing
+ * objects - inode, files, sockets, ...
+ *
+ * Labels are not ref counted by the label set, so they maybe removed and
+ * freed when no longer in use.
+ *
+ */
+
+#define PROXY_POISON 97
+#define LABEL_POISON 100
+
+static void free_proxy(struct aa_proxy *proxy)
+{
+ if (proxy) {
+ /* p->label will not updated any more as p is dead */
+ aa_put_label(rcu_dereference_protected(proxy->label, true));
+ memset(proxy, 0, sizeof(*proxy));
+ RCU_INIT_POINTER(proxy->label, (struct aa_label *)PROXY_POISON);
+ kfree(proxy);
+ }
+}
+
+void aa_proxy_kref(struct kref *kref)
+{
+ struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count);
+
+ free_proxy(proxy);
+}
+
+struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp)
+{
+ struct aa_proxy *new;
+
+ new = kzalloc(sizeof(struct aa_proxy), gfp);
+ if (new) {
+ kref_init(&new->count);
+ rcu_assign_pointer(new->label, aa_get_label(label));
+ }
+ return new;
+}
+
+/* requires profile list write lock held */
+void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
+{
+ struct aa_label *tmp;
+
+ AA_BUG(!orig);
+ AA_BUG(!new);
+ lockdep_assert_held_write(&labels_set(orig)->lock);
+
+ tmp = rcu_dereference_protected(orig->proxy->label,
+ &labels_ns(orig)->lock);
+ rcu_assign_pointer(orig->proxy->label, aa_get_label(new));
+ orig->flags |= FLAG_STALE;
+ aa_put_label(tmp);
+}
+
+static void __proxy_share(struct aa_label *old, struct aa_label *new)
+{
+ struct aa_proxy *proxy = new->proxy;
+
+ new->proxy = aa_get_proxy(old->proxy);
+ __aa_proxy_redirect(old, new);
+ aa_put_proxy(proxy);
+}
+
+
+/**
+ * ns_cmp - compare ns for label set ordering
+ * @a: ns to compare (NOT NULL)
+ * @b: ns to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int ns_cmp(struct aa_ns *a, struct aa_ns *b)
+{
+ int res;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+ AA_BUG(!a->base.hname);
+ AA_BUG(!b->base.hname);
+
+ if (a == b)
+ return 0;
+
+ res = a->level - b->level;
+ if (res)
+ return res;
+
+ return strcmp(a->base.hname, b->base.hname);
+}
+
+/**
+ * profile_cmp - profile comparison for set ordering
+ * @a: profile to compare (NOT NULL)
+ * @b: profile to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int profile_cmp(struct aa_profile *a, struct aa_profile *b)
+{
+ int res;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+ AA_BUG(!a->ns);
+ AA_BUG(!b->ns);
+ AA_BUG(!a->base.hname);
+ AA_BUG(!b->base.hname);
+
+ if (a == b || a->base.hname == b->base.hname)
+ return 0;
+ res = ns_cmp(a->ns, b->ns);
+ if (res)
+ return res;
+
+ return strcmp(a->base.hname, b->base.hname);
+}
+
+/**
+ * vec_cmp - label comparison for set ordering
+ * @a: label to compare (NOT NULL)
+ * @vec: vector of profiles to compare (NOT NULL)
+ * @n: length of @vec
+ *
+ * Returns: <0 if a < vec
+ * ==0 if a == vec
+ * >0 if a > vec
+ */
+static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn)
+{
+ int i;
+
+ AA_BUG(!a);
+ AA_BUG(!*a);
+ AA_BUG(!b);
+ AA_BUG(!*b);
+ AA_BUG(an <= 0);
+ AA_BUG(bn <= 0);
+
+ for (i = 0; i < an && i < bn; i++) {
+ int res = profile_cmp(a[i], b[i]);
+
+ if (res != 0)
+ return res;
+ }
+
+ return an - bn;
+}
+
+static bool vec_is_stale(struct aa_profile **vec, int n)
+{
+ int i;
+
+ AA_BUG(!vec);
+
+ for (i = 0; i < n; i++) {
+ if (profile_is_stale(vec[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static long accum_vec_flags(struct aa_profile **vec, int n)
+{
+ long u = FLAG_UNCONFINED;
+ int i;
+
+ AA_BUG(!vec);
+
+ for (i = 0; i < n; i++) {
+ u |= vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 |
+ FLAG_STALE);
+ if (!(u & vec[i]->label.flags & FLAG_UNCONFINED))
+ u &= ~FLAG_UNCONFINED;
+ }
+
+ return u;
+}
+
+static int sort_cmp(const void *a, const void *b)
+{
+ return profile_cmp(*(struct aa_profile **)a, *(struct aa_profile **)b);
+}
+
+/*
+ * assumes vec is sorted
+ * Assumes @vec has null terminator at vec[n], and will null terminate
+ * vec[n - dups]
+ */
+static inline int unique(struct aa_profile **vec, int n)
+{
+ int i, pos, dups = 0;
+
+ AA_BUG(n < 1);
+ AA_BUG(!vec);
+
+ pos = 0;
+ for (i = 1; i < n; i++) {
+ int res = profile_cmp(vec[pos], vec[i]);
+
+ AA_BUG(res > 0, "vec not sorted");
+ if (res == 0) {
+ /* drop duplicate */
+ aa_put_profile(vec[i]);
+ dups++;
+ continue;
+ }
+ pos++;
+ if (dups)
+ vec[pos] = vec[i];
+ }
+
+ AA_BUG(dups < 0);
+
+ return dups;
+}
+
+/**
+ * aa_vec_unique - canonical sort and unique a list of profiles
+ * @n: number of refcounted profiles in the list (@n > 0)
+ * @vec: list of profiles to sort and merge
+ *
+ * Returns: the number of duplicates eliminated == references put
+ *
+ * If @flags & VEC_FLAG_TERMINATE @vec has null terminator at vec[n], and will
+ * null terminate vec[n - dups]
+ */
+int aa_vec_unique(struct aa_profile **vec, int n, int flags)
+{
+ int i, dups = 0;
+
+ AA_BUG(n < 1);
+ AA_BUG(!vec);
+
+ /* vecs are usually small and inorder, have a fallback for larger */
+ if (n > 8) {
+ sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL);
+ dups = unique(vec, n);
+ goto out;
+ }
+
+ /* insertion sort + unique in one */
+ for (i = 1; i < n; i++) {
+ struct aa_profile *tmp = vec[i];
+ int pos, j;
+
+ for (pos = i - 1 - dups; pos >= 0; pos--) {
+ int res = profile_cmp(vec[pos], tmp);
+
+ if (res == 0) {
+ /* drop duplicate entry */
+ aa_put_profile(tmp);
+ dups++;
+ goto continue_outer;
+ } else if (res < 0)
+ break;
+ }
+ /* pos is at entry < tmp, or index -1. Set to insert pos */
+ pos++;
+
+ for (j = i - dups; j > pos; j--)
+ vec[j] = vec[j - 1];
+ vec[pos] = tmp;
+continue_outer:
+ ;
+ }
+
+ AA_BUG(dups < 0);
+
+out:
+ if (flags & VEC_FLAG_TERMINATE)
+ vec[n - dups] = NULL;
+
+ return dups;
+}
+
+
+void aa_label_destroy(struct aa_label *label)
+{
+ AA_BUG(!label);
+
+ if (!label_isprofile(label)) {
+ struct aa_profile *profile;
+ struct label_it i;
+
+ aa_put_str(label->hname);
+
+ label_for_each(i, label, profile) {
+ aa_put_profile(profile);
+ label->vec[i.i] = (struct aa_profile *)
+ (LABEL_POISON + (long) i.i);
+ }
+ }
+
+ if (label->proxy) {
+ if (rcu_dereference_protected(label->proxy->label, true) == label)
+ rcu_assign_pointer(label->proxy->label, NULL);
+ aa_put_proxy(label->proxy);
+ }
+ aa_free_secid(label->secid);
+
+ label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
+}
+
+void aa_label_free(struct aa_label *label)
+{
+ if (!label)
+ return;
+
+ aa_label_destroy(label);
+ kfree(label);
+}
+
+static void label_free_switch(struct aa_label *label)
+{
+ if (label->flags & FLAG_NS_COUNT)
+ aa_free_ns(labels_ns(label));
+ else if (label_isprofile(label))
+ aa_free_profile(labels_profile(label));
+ else
+ aa_label_free(label);
+}
+
+static void label_free_rcu(struct rcu_head *head)
+{
+ struct aa_label *label = container_of(head, struct aa_label, rcu);
+
+ if (label->flags & FLAG_IN_TREE)
+ (void) aa_label_remove(label);
+ label_free_switch(label);
+}
+
+void aa_label_kref(struct kref *kref)
+{
+ struct aa_label *label = container_of(kref, struct aa_label, count);
+ struct aa_ns *ns = labels_ns(label);
+
+ if (!ns) {
+ /* never live, no rcu callback needed, just using the fn */
+ label_free_switch(label);
+ return;
+ }
+ /* TODO: update labels_profile macro so it works here */
+ AA_BUG(label_isprofile(label) &&
+ on_list_rcu(&label->vec[0]->base.profiles));
+ AA_BUG(label_isprofile(label) &&
+ on_list_rcu(&label->vec[0]->base.list));
+
+ /* TODO: if compound label and not stale add to reclaim cache */
+ call_rcu(&label->rcu, label_free_rcu);
+}
+
+static void label_free_or_put_new(struct aa_label *label, struct aa_label *new)
+{
+ if (label != new)
+ /* need to free directly to break circular ref with proxy */
+ aa_label_free(new);
+ else
+ aa_put_label(new);
+}
+
+bool aa_label_init(struct aa_label *label, int size, gfp_t gfp)
+{
+ AA_BUG(!label);
+ AA_BUG(size < 1);
+
+ if (aa_alloc_secid(label, gfp) < 0)
+ return false;
+
+ label->size = size; /* doesn't include null */
+ label->vec[size] = NULL; /* null terminate */
+ kref_init(&label->count);
+ RB_CLEAR_NODE(&label->node);
+
+ return true;
+}
+
+/**
+ * aa_label_alloc - allocate a label with a profile vector of @size length
+ * @size: size of profile vector in the label
+ * @proxy: proxy to use OR null if to allocate a new one
+ * @gfp: memory allocation type
+ *
+ * Returns: new label
+ * else NULL if failed
+ */
+struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
+{
+ struct aa_label *new;
+
+ AA_BUG(size < 1);
+
+ /* + 1 for null terminator entry on vec */
+ new = kzalloc(struct_size(new, vec, size + 1), gfp);
+ AA_DEBUG("%s (%p)\n", __func__, new);
+ if (!new)
+ goto fail;
+
+ if (!aa_label_init(new, size, gfp))
+ goto fail;
+
+ if (!proxy) {
+ proxy = aa_alloc_proxy(new, gfp);
+ if (!proxy)
+ goto fail;
+ } else
+ aa_get_proxy(proxy);
+ /* just set new's proxy, don't redirect proxy here if it was passed in*/
+ new->proxy = proxy;
+
+ return new;
+
+fail:
+ kfree(new);
+
+ return NULL;
+}
+
+
+/**
+ * label_cmp - label comparison for set ordering
+ * @a: label to compare (NOT NULL)
+ * @b: label to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int label_cmp(struct aa_label *a, struct aa_label *b)
+{
+ AA_BUG(!b);
+
+ if (a == b)
+ return 0;
+
+ return vec_cmp(a->vec, a->size, b->vec, b->size);
+}
+
+/* helper fn for label_for_each_confined */
+int aa_label_next_confined(struct aa_label *label, int i)
+{
+ AA_BUG(!label);
+ AA_BUG(i < 0);
+
+ for (; i < label->size; i++) {
+ if (!profile_unconfined(label->vec[i]))
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * __aa_label_next_not_in_set - return the next profile of @sub not in @set
+ * @I: label iterator
+ * @set: label to test against
+ * @sub: label to if is subset of @set
+ *
+ * Returns: profile in @sub that is not in @set, with iterator set pos after
+ * else NULL if @sub is a subset of @set
+ */
+struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
+ struct aa_label *set,
+ struct aa_label *sub)
+{
+ AA_BUG(!set);
+ AA_BUG(!I);
+ AA_BUG(I->i < 0);
+ AA_BUG(I->i > set->size);
+ AA_BUG(!sub);
+ AA_BUG(I->j < 0);
+ AA_BUG(I->j > sub->size);
+
+ while (I->j < sub->size && I->i < set->size) {
+ int res = profile_cmp(sub->vec[I->j], set->vec[I->i]);
+
+ if (res == 0) {
+ (I->j)++;
+ (I->i)++;
+ } else if (res > 0)
+ (I->i)++;
+ else
+ return sub->vec[(I->j)++];
+ }
+
+ if (I->j < sub->size)
+ return sub->vec[(I->j)++];
+
+ return NULL;
+}
+
+/**
+ * aa_label_is_subset - test if @sub is a subset of @set
+ * @set: label to test against
+ * @sub: label to test if is subset of @set
+ *
+ * Returns: true if @sub is subset of @set
+ * else false
+ */
+bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
+{
+ struct label_it i = { };
+
+ AA_BUG(!set);
+ AA_BUG(!sub);
+
+ if (sub == set)
+ return true;
+
+ return __aa_label_next_not_in_set(&i, set, sub) == NULL;
+}
+
+/**
+ * aa_label_is_unconfined_subset - test if @sub is a subset of @set
+ * @set: label to test against
+ * @sub: label to test if is subset of @set
+ *
+ * This checks for subset but taking into account unconfined. IF
+ * @sub contains an unconfined profile that does not have a matching
+ * unconfined in @set then this will not cause the test to fail.
+ * Conversely we don't care about an unconfined in @set that is not in
+ * @sub
+ *
+ * Returns: true if @sub is special_subset of @set
+ * else false
+ */
+bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub)
+{
+ struct label_it i = { };
+ struct aa_profile *p;
+
+ AA_BUG(!set);
+ AA_BUG(!sub);
+
+ if (sub == set)
+ return true;
+
+ do {
+ p = __aa_label_next_not_in_set(&i, set, sub);
+ if (p && !profile_unconfined(p))
+ break;
+ } while (p);
+
+ return p == NULL;
+}
+
+
+/**
+ * __label_remove - remove @label from the label set
+ * @l: label to remove
+ * @new: label to redirect to
+ *
+ * Requires: labels_set(@label)->lock write_lock
+ * Returns: true if the label was in the tree and removed
+ */
+static bool __label_remove(struct aa_label *label, struct aa_label *new)
+{
+ struct aa_labelset *ls = labels_set(label);
+
+ AA_BUG(!ls);
+ AA_BUG(!label);
+ lockdep_assert_held_write(&ls->lock);
+
+ if (new)
+ __aa_proxy_redirect(label, new);
+
+ if (!label_is_stale(label))
+ __label_make_stale(label);
+
+ if (label->flags & FLAG_IN_TREE) {
+ rb_erase(&label->node, &ls->root);
+ label->flags &= ~FLAG_IN_TREE;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __label_replace - replace @old with @new in label set
+ * @old: label to remove from label set
+ * @new: label to replace @old with
+ *
+ * Requires: labels_set(@old)->lock write_lock
+ * valid ref count be held on @new
+ * Returns: true if @old was in set and replaced by @new
+ *
+ * Note: current implementation requires label set be order in such a way
+ * that @new directly replaces @old position in the set (ie.
+ * using pointer comparison of the label address would not work)
+ */
+static bool __label_replace(struct aa_label *old, struct aa_label *new)
+{
+ struct aa_labelset *ls = labels_set(old);
+
+ AA_BUG(!ls);
+ AA_BUG(!old);
+ AA_BUG(!new);
+ lockdep_assert_held_write(&ls->lock);
+ AA_BUG(new->flags & FLAG_IN_TREE);
+
+ if (!label_is_stale(old))
+ __label_make_stale(old);
+
+ if (old->flags & FLAG_IN_TREE) {
+ rb_replace_node(&old->node, &new->node, &ls->root);
+ old->flags &= ~FLAG_IN_TREE;
+ new->flags |= FLAG_IN_TREE;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __label_insert - attempt to insert @l into a label set
+ * @ls: set of labels to insert @l into (NOT NULL)
+ * @label: new label to insert (NOT NULL)
+ * @replace: whether insertion should replace existing entry that is not stale
+ *
+ * Requires: @ls->lock
+ * caller to hold a valid ref on l
+ * if @replace is true l has a preallocated proxy associated
+ * Returns: @l if successful in inserting @l - with additional refcount
+ * else ref counted equivalent label that is already in the set,
+ * the else condition only happens if @replace is false
+ */
+static struct aa_label *__label_insert(struct aa_labelset *ls,
+ struct aa_label *label, bool replace)
+{
+ struct rb_node **new, *parent = NULL;
+
+ AA_BUG(!ls);
+ AA_BUG(!label);
+ AA_BUG(labels_set(label) != ls);
+ lockdep_assert_held_write(&ls->lock);
+ AA_BUG(label->flags & FLAG_IN_TREE);
+
+ /* Figure out where to put new node */
+ new = &ls->root.rb_node;
+ while (*new) {
+ struct aa_label *this = rb_entry(*new, struct aa_label, node);
+ int result = label_cmp(label, this);
+
+ parent = *new;
+ if (result == 0) {
+ /* !__aa_get_label means queued for destruction,
+ * so replace in place, however the label has
+ * died before the replacement so do not share
+ * the proxy
+ */
+ if (!replace && !label_is_stale(this)) {
+ if (__aa_get_label(this))
+ return this;
+ } else
+ __proxy_share(this, label);
+ AA_BUG(!__label_replace(this, label));
+ return aa_get_label(label);
+ } else if (result < 0)
+ new = &((*new)->rb_left);
+ else /* (result > 0) */
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&label->node, parent, new);
+ rb_insert_color(&label->node, &ls->root);
+ label->flags |= FLAG_IN_TREE;
+
+ return aa_get_label(label);
+}
+
+/**
+ * __vec_find - find label that matches @vec in label set
+ * @vec: vec of profiles to find matching label for (NOT NULL)
+ * @n: length of @vec
+ *
+ * Requires: @vec_labelset(vec) lock held
+ * caller to hold a valid ref on l
+ *
+ * Returns: ref counted @label if matching label is in tree
+ * ref counted label that is equiv to @l in tree
+ * else NULL if @vec equiv is not in tree
+ */
+static struct aa_label *__vec_find(struct aa_profile **vec, int n)
+{
+ struct rb_node *node;
+
+ AA_BUG(!vec);
+ AA_BUG(!*vec);
+ AA_BUG(n <= 0);
+
+ node = vec_labelset(vec, n)->root.rb_node;
+ while (node) {
+ struct aa_label *this = rb_entry(node, struct aa_label, node);
+ int result = vec_cmp(this->vec, this->size, vec, n);
+
+ if (result > 0)
+ node = node->rb_left;
+ else if (result < 0)
+ node = node->rb_right;
+ else
+ return __aa_get_label(this);
+ }
+
+ return NULL;
+}
+
+/**
+ * __label_find - find label @label in label set
+ * @label: label to find (NOT NULL)
+ *
+ * Requires: labels_set(@label)->lock held
+ * caller to hold a valid ref on l
+ *
+ * Returns: ref counted @label if @label is in tree OR
+ * ref counted label that is equiv to @label in tree
+ * else NULL if @label or equiv is not in tree
+ */
+static struct aa_label *__label_find(struct aa_label *label)
+{
+ AA_BUG(!label);
+
+ return __vec_find(label->vec, label->size);
+}
+
+
+/**
+ * aa_label_remove - remove a label from the labelset
+ * @label: label to remove
+ *
+ * Returns: true if @label was removed from the tree
+ * else @label was not in tree so it could not be removed
+ */
+bool aa_label_remove(struct aa_label *label)
+{
+ struct aa_labelset *ls = labels_set(label);
+ unsigned long flags;
+ bool res;
+
+ AA_BUG(!ls);
+
+ write_lock_irqsave(&ls->lock, flags);
+ res = __label_remove(label, ns_unconfined(labels_ns(label)));
+ write_unlock_irqrestore(&ls->lock, flags);
+
+ return res;
+}
+
+/**
+ * aa_label_replace - replace a label @old with a new version @new
+ * @old: label to replace
+ * @new: label replacing @old
+ *
+ * Returns: true if @old was in tree and replaced
+ * else @old was not in tree, and @new was not inserted
+ */
+bool aa_label_replace(struct aa_label *old, struct aa_label *new)
+{
+ unsigned long flags;
+ bool res;
+
+ if (name_is_shared(old, new) && labels_ns(old) == labels_ns(new)) {
+ write_lock_irqsave(&labels_set(old)->lock, flags);
+ if (old->proxy != new->proxy)
+ __proxy_share(old, new);
+ else
+ __aa_proxy_redirect(old, new);
+ res = __label_replace(old, new);
+ write_unlock_irqrestore(&labels_set(old)->lock, flags);
+ } else {
+ struct aa_label *l;
+ struct aa_labelset *ls = labels_set(old);
+
+ write_lock_irqsave(&ls->lock, flags);
+ res = __label_remove(old, new);
+ if (labels_ns(old) != labels_ns(new)) {
+ write_unlock_irqrestore(&ls->lock, flags);
+ ls = labels_set(new);
+ write_lock_irqsave(&ls->lock, flags);
+ }
+ l = __label_insert(ls, new, true);
+ res = (l == new);
+ write_unlock_irqrestore(&ls->lock, flags);
+ aa_put_label(l);
+ }
+
+ return res;
+}
+
+/**
+ * vec_find - find label @l in label set
+ * @vec: array of profiles to find equiv label for (NOT NULL)
+ * @n: length of @vec
+ *
+ * Returns: refcounted label if @vec equiv is in tree
+ * else NULL if @vec equiv is not in tree
+ */
+static struct aa_label *vec_find(struct aa_profile **vec, int n)
+{
+ struct aa_labelset *ls;
+ struct aa_label *label;
+ unsigned long flags;
+
+ AA_BUG(!vec);
+ AA_BUG(!*vec);
+ AA_BUG(n <= 0);
+
+ ls = vec_labelset(vec, n);
+ read_lock_irqsave(&ls->lock, flags);
+ label = __vec_find(vec, n);
+ read_unlock_irqrestore(&ls->lock, flags);
+
+ return label;
+}
+
+/* requires sort and merge done first */
+static struct aa_label *vec_create_and_insert_label(struct aa_profile **vec,
+ int len, gfp_t gfp)
+{
+ struct aa_label *label = NULL;
+ struct aa_labelset *ls;
+ unsigned long flags;
+ struct aa_label *new;
+ int i;
+
+ AA_BUG(!vec);
+
+ if (len == 1)
+ return aa_get_label(&vec[0]->label);
+
+ ls = labels_set(&vec[len - 1]->label);
+
+ /* TODO: enable when read side is lockless
+ * check if label exists before taking locks
+ */
+ new = aa_label_alloc(len, NULL, gfp);
+ if (!new)
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ new->vec[i] = aa_get_profile(vec[i]);
+
+ write_lock_irqsave(&ls->lock, flags);
+ label = __label_insert(ls, new, false);
+ write_unlock_irqrestore(&ls->lock, flags);
+ label_free_or_put_new(label, new);
+
+ return label;
+}
+
+struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
+ gfp_t gfp)
+{
+ struct aa_label *label = vec_find(vec, len);
+
+ if (label)
+ return label;
+
+ return vec_create_and_insert_label(vec, len, gfp);
+}
+
+/**
+ * aa_label_find - find label @label in label set
+ * @label: label to find (NOT NULL)
+ *
+ * Requires: caller to hold a valid ref on l
+ *
+ * Returns: refcounted @label if @label is in tree
+ * refcounted label that is equiv to @label in tree
+ * else NULL if @label or equiv is not in tree
+ */
+struct aa_label *aa_label_find(struct aa_label *label)
+{
+ AA_BUG(!label);
+
+ return vec_find(label->vec, label->size);
+}
+
+
+/**
+ * aa_label_insert - insert label @label into @ls or return existing label
+ * @ls - labelset to insert @label into
+ * @label - label to insert
+ *
+ * Requires: caller to hold a valid ref on @label
+ *
+ * Returns: ref counted @label if successful in inserting @label
+ * else ref counted equivalent label that is already in the set
+ */
+struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *label)
+{
+ struct aa_label *l;
+ unsigned long flags;
+
+ AA_BUG(!ls);
+ AA_BUG(!label);
+
+ /* check if label exists before taking lock */
+ if (!label_is_stale(label)) {
+ read_lock_irqsave(&ls->lock, flags);
+ l = __label_find(label);
+ read_unlock_irqrestore(&ls->lock, flags);
+ if (l)
+ return l;
+ }
+
+ write_lock_irqsave(&ls->lock, flags);
+ l = __label_insert(ls, label, false);
+ write_unlock_irqrestore(&ls->lock, flags);
+
+ return l;
+}
+
+
+/**
+ * aa_label_next_in_merge - find the next profile when merging @a and @b
+ * @I: label iterator
+ * @a: label to merge
+ * @b: label to merge
+ *
+ * Returns: next profile
+ * else null if no more profiles
+ */
+struct aa_profile *aa_label_next_in_merge(struct label_it *I,
+ struct aa_label *a,
+ struct aa_label *b)
+{
+ AA_BUG(!a);
+ AA_BUG(!b);
+ AA_BUG(!I);
+ AA_BUG(I->i < 0);
+ AA_BUG(I->i > a->size);
+ AA_BUG(I->j < 0);
+ AA_BUG(I->j > b->size);
+
+ if (I->i < a->size) {
+ if (I->j < b->size) {
+ int res = profile_cmp(a->vec[I->i], b->vec[I->j]);
+
+ if (res > 0)
+ return b->vec[(I->j)++];
+ if (res == 0)
+ (I->j)++;
+ }
+
+ return a->vec[(I->i)++];
+ }
+
+ if (I->j < b->size)
+ return b->vec[(I->j)++];
+
+ return NULL;
+}
+
+/**
+ * label_merge_cmp - cmp of @a merging with @b against @z for set ordering
+ * @a: label to merge then compare (NOT NULL)
+ * @b: label to merge then compare (NOT NULL)
+ * @z: label to compare merge against (NOT NULL)
+ *
+ * Assumes: using the most recent versions of @a, @b, and @z
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int label_merge_cmp(struct aa_label *a, struct aa_label *b,
+ struct aa_label *z)
+{
+ struct aa_profile *p = NULL;
+ struct label_it i = { };
+ int k;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+ AA_BUG(!z);
+
+ for (k = 0;
+ k < z->size && (p = aa_label_next_in_merge(&i, a, b));
+ k++) {
+ int res = profile_cmp(p, z->vec[k]);
+
+ if (res != 0)
+ return res;
+ }
+
+ if (p)
+ return 1;
+ else if (k < z->size)
+ return -1;
+ return 0;
+}
+
+/**
+ * label_merge_insert - create a new label by merging @a and @b
+ * @new: preallocated label to merge into (NOT NULL)
+ * @a: label to merge with @b (NOT NULL)
+ * @b: label to merge with @a (NOT NULL)
+ *
+ * Requires: preallocated proxy
+ *
+ * Returns: ref counted label either @new if merge is unique
+ * @a if @b is a subset of @a
+ * @b if @a is a subset of @b
+ *
+ * NOTE: will not use @new if the merge results in @new == @a or @b
+ *
+ * Must be used within labelset write lock to avoid racing with
+ * setting labels stale.
+ */
+static struct aa_label *label_merge_insert(struct aa_label *new,
+ struct aa_label *a,
+ struct aa_label *b)
+{
+ struct aa_label *label;
+ struct aa_labelset *ls;
+ struct aa_profile *next;
+ struct label_it i;
+ unsigned long flags;
+ int k = 0, invcount = 0;
+ bool stale = false;
+
+ AA_BUG(!a);
+ AA_BUG(a->size < 0);
+ AA_BUG(!b);
+ AA_BUG(b->size < 0);
+ AA_BUG(!new);
+ AA_BUG(new->size < a->size + b->size);
+
+ label_for_each_in_merge(i, a, b, next) {
+ AA_BUG(!next);
+ if (profile_is_stale(next)) {
+ new->vec[k] = aa_get_newest_profile(next);
+ AA_BUG(!new->vec[k]->label.proxy);
+ AA_BUG(!new->vec[k]->label.proxy->label);
+ if (next->label.proxy != new->vec[k]->label.proxy)
+ invcount++;
+ k++;
+ stale = true;
+ } else
+ new->vec[k++] = aa_get_profile(next);
+ }
+ /* set to actual size which is <= allocated len */
+ new->size = k;
+ new->vec[k] = NULL;
+
+ if (invcount) {
+ new->size -= aa_vec_unique(&new->vec[0], new->size,
+ VEC_FLAG_TERMINATE);
+ /* TODO: deal with reference labels */
+ if (new->size == 1) {
+ label = aa_get_label(&new->vec[0]->label);
+ return label;
+ }
+ } else if (!stale) {
+ /*
+ * merge could be same as a || b, note: it is not possible
+ * for new->size == a->size == b->size unless a == b
+ */
+ if (k == a->size)
+ return aa_get_label(a);
+ else if (k == b->size)
+ return aa_get_label(b);
+ }
+ new->flags |= accum_vec_flags(new->vec, new->size);
+ ls = labels_set(new);
+ write_lock_irqsave(&ls->lock, flags);
+ label = __label_insert(labels_set(new), new, false);
+ write_unlock_irqrestore(&ls->lock, flags);
+
+ return label;
+}
+
+/**
+ * labelset_of_merge - find which labelset a merged label should be inserted
+ * @a: label to merge and insert
+ * @b: label to merge and insert
+ *
+ * Returns: labelset that the merged label should be inserted into
+ */
+static struct aa_labelset *labelset_of_merge(struct aa_label *a,
+ struct aa_label *b)
+{
+ struct aa_ns *nsa = labels_ns(a);
+ struct aa_ns *nsb = labels_ns(b);
+
+ if (ns_cmp(nsa, nsb) <= 0)
+ return &nsa->labels;
+ return &nsb->labels;
+}
+
+/**
+ * __label_find_merge - find label that is equiv to merge of @a and @b
+ * @ls: set of labels to search (NOT NULL)
+ * @a: label to merge with @b (NOT NULL)
+ * @b: label to merge with @a (NOT NULL)
+ *
+ * Requires: ls->lock read_lock held
+ *
+ * Returns: ref counted label that is equiv to merge of @a and @b
+ * else NULL if merge of @a and @b is not in set
+ */
+static struct aa_label *__label_find_merge(struct aa_labelset *ls,
+ struct aa_label *a,
+ struct aa_label *b)
+{
+ struct rb_node *node;
+
+ AA_BUG(!ls);
+ AA_BUG(!a);
+ AA_BUG(!b);
+
+ if (a == b)
+ return __label_find(a);
+
+ node = ls->root.rb_node;
+ while (node) {
+ struct aa_label *this = container_of(node, struct aa_label,
+ node);
+ int result = label_merge_cmp(a, b, this);
+
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return __aa_get_label(this);
+ }
+
+ return NULL;
+}
+
+
+/**
+ * aa_label_find_merge - find label that is equiv to merge of @a and @b
+ * @a: label to merge with @b (NOT NULL)
+ * @b: label to merge with @a (NOT NULL)
+ *
+ * Requires: labels be fully constructed with a valid ns
+ *
+ * Returns: ref counted label that is equiv to merge of @a and @b
+ * else NULL if merge of @a and @b is not in set
+ */
+struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b)
+{
+ struct aa_labelset *ls;
+ struct aa_label *label, *ar = NULL, *br = NULL;
+ unsigned long flags;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+
+ if (label_is_stale(a))
+ a = ar = aa_get_newest_label(a);
+ if (label_is_stale(b))
+ b = br = aa_get_newest_label(b);
+ ls = labelset_of_merge(a, b);
+ read_lock_irqsave(&ls->lock, flags);
+ label = __label_find_merge(ls, a, b);
+ read_unlock_irqrestore(&ls->lock, flags);
+ aa_put_label(ar);
+ aa_put_label(br);
+
+ return label;
+}
+
+/**
+ * aa_label_merge - attempt to insert new merged label of @a and @b
+ * @ls: set of labels to insert label into (NOT NULL)
+ * @a: label to merge with @b (NOT NULL)
+ * @b: label to merge with @a (NOT NULL)
+ * @gfp: memory allocation type
+ *
+ * Requires: caller to hold valid refs on @a and @b
+ * labels be fully constructed with a valid ns
+ *
+ * Returns: ref counted new label if successful in inserting merge of a & b
+ * else ref counted equivalent label that is already in the set.
+ * else NULL if could not create label (-ENOMEM)
+ */
+struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
+ gfp_t gfp)
+{
+ struct aa_label *label = NULL;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+
+ if (a == b)
+ return aa_get_newest_label(a);
+
+ /* TODO: enable when read side is lockless
+ * check if label exists before taking locks
+ if (!label_is_stale(a) && !label_is_stale(b))
+ label = aa_label_find_merge(a, b);
+ */
+
+ if (!label) {
+ struct aa_label *new;
+
+ a = aa_get_newest_label(a);
+ b = aa_get_newest_label(b);
+
+ /* could use label_merge_len(a, b), but requires double
+ * comparison for small savings
+ */
+ new = aa_label_alloc(a->size + b->size, NULL, gfp);
+ if (!new)
+ goto out;
+
+ label = label_merge_insert(new, a, b);
+ label_free_or_put_new(label, new);
+out:
+ aa_put_label(a);
+ aa_put_label(b);
+ }
+
+ return label;
+}
+
+static inline bool label_is_visible(struct aa_profile *profile,
+ struct aa_label *label)
+{
+ return aa_ns_visible(profile->ns, labels_ns(label), true);
+}
+
+/* match a profile and its associated ns component if needed
+ * Assumes visibility test has already been done.
+ * If a subns profile is not to be matched should be prescreened with
+ * visibility test.
+ */
+static inline unsigned int match_component(struct aa_profile *profile,
+ struct aa_profile *tp,
+ unsigned int state)
+{
+ const char *ns_name;
+
+ if (profile->ns == tp->ns)
+ return aa_dfa_match(profile->policy.dfa, state, tp->base.hname);
+
+ /* try matching with namespace name and then profile */
+ ns_name = aa_ns_name(profile->ns, tp->ns, true);
+ state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1);
+ state = aa_dfa_match(profile->policy.dfa, state, ns_name);
+ state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1);
+ return aa_dfa_match(profile->policy.dfa, state, tp->base.hname);
+}
+
+/**
+ * label_compound_match - find perms for full compound label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: perms struct to set
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for A//&B//&C
+ * @perms should be preinitialized with allperms OR a previous permission
+ * check to be stacked.
+ */
+static int label_compound_match(struct aa_profile *profile,
+ struct aa_label *label,
+ unsigned int state, bool subns, u32 request,
+ struct aa_perms *perms)
+{
+ struct aa_profile *tp;
+ struct label_it i;
+
+ /* find first subcomponent that is visible */
+ label_for_each(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = match_component(profile, tp, state);
+ if (!state)
+ goto fail;
+ goto next;
+ }
+
+ /* no component visible */
+ *perms = allperms;
+ return 0;
+
+next:
+ label_for_each_cont(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = aa_dfa_match(profile->policy.dfa, state, "//&");
+ state = match_component(profile, tp, state);
+ if (!state)
+ goto fail;
+ }
+ aa_compute_perms(profile->policy.dfa, state, perms);
+ aa_apply_modes_to_perms(profile, perms);
+ if ((perms->allow & request) != request)
+ return -EACCES;
+
+ return 0;
+
+fail:
+ *perms = nullperms;
+ return state;
+}
+
+/**
+ * label_components_match - find perms for all subcomponents of a label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: an initialized perms struct to add accumulation to
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for each of A and B and C
+ * @perms should be preinitialized with allperms OR a previous permission
+ * check to be stacked.
+ */
+static int label_components_match(struct aa_profile *profile,
+ struct aa_label *label, unsigned int start,
+ bool subns, u32 request,
+ struct aa_perms *perms)
+{
+ struct aa_profile *tp;
+ struct label_it i;
+ struct aa_perms tmp;
+ unsigned int state = 0;
+
+ /* find first subcomponent to test */
+ label_for_each(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = match_component(profile, tp, start);
+ if (!state)
+ goto fail;
+ goto next;
+ }
+
+ /* no subcomponents visible - no change in perms */
+ return 0;
+
+next:
+ aa_compute_perms(profile->policy.dfa, state, &tmp);
+ aa_apply_modes_to_perms(profile, &tmp);
+ aa_perms_accum(perms, &tmp);
+ label_for_each_cont(i, label, tp) {
+ if (!aa_ns_visible(profile->ns, tp->ns, subns))
+ continue;
+ state = match_component(profile, tp, start);
+ if (!state)
+ goto fail;
+ aa_compute_perms(profile->policy.dfa, state, &tmp);
+ aa_apply_modes_to_perms(profile, &tmp);
+ aa_perms_accum(perms, &tmp);
+ }
+
+ if ((perms->allow & request) != request)
+ return -EACCES;
+
+ return 0;
+
+fail:
+ *perms = nullperms;
+ return -EACCES;
+}
+
+/**
+ * aa_label_match - do a multi-component label match
+ * @profile: profile to match against (NOT NULL)
+ * @label: label to match (NOT NULL)
+ * @state: state to start in
+ * @subns: whether to match subns components
+ * @request: permission request
+ * @perms: Returns computed perms (NOT NULL)
+ *
+ * Returns: the state the match finished in, may be the none matching state
+ */
+int aa_label_match(struct aa_profile *profile, struct aa_label *label,
+ unsigned int state, bool subns, u32 request,
+ struct aa_perms *perms)
+{
+ int error = label_compound_match(profile, label, state, subns, request,
+ perms);
+ if (!error)
+ return error;
+
+ *perms = allperms;
+ return label_components_match(profile, label, state, subns, request,
+ perms);
+}
+
+
+/**
+ * aa_update_label_name - update a label to have a stored name
+ * @ns: ns being viewed from (NOT NULL)
+ * @label: label to update (NOT NULL)
+ * @gfp: type of memory allocation
+ *
+ * Requires: labels_set(label) not locked in caller
+ *
+ * note: only updates the label name if it does not have a name already
+ * and if it is in the labelset
+ */
+bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
+{
+ struct aa_labelset *ls;
+ unsigned long flags;
+ char __counted *name;
+ bool res = false;
+
+ AA_BUG(!ns);
+ AA_BUG(!label);
+
+ if (label->hname || labels_ns(label) != ns)
+ return res;
+
+ if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) < 0)
+ return res;
+
+ ls = labels_set(label);
+ write_lock_irqsave(&ls->lock, flags);
+ if (!label->hname && label->flags & FLAG_IN_TREE) {
+ label->hname = name;
+ res = true;
+ } else
+ aa_put_str(name);
+ write_unlock_irqrestore(&ls->lock, flags);
+
+ return res;
+}
+
+/*
+ * cached label name is present and visible
+ * @label->hname only exists if label is namespace hierachical
+ */
+static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
+ int flags)
+{
+ if (label->hname && (!ns || labels_ns(label) == ns) &&
+ !(flags & ~FLAG_SHOW_MODE))
+ return true;
+
+ return false;
+}
+
+/* helper macro for snprint routines */
+#define update_for_len(total, len, size, str) \
+do { \
+ size_t ulen = len; \
+ \
+ AA_BUG(len < 0); \
+ total += ulen; \
+ ulen = min(ulen, size); \
+ size -= ulen; \
+ str += ulen; \
+} while (0)
+
+/**
+ * aa_profile_snxprint - print a profile name to a buffer
+ * @str: buffer to write to. (MAY BE NULL if @size == 0)
+ * @size: size of buffer
+ * @view: namespace profile is being viewed from
+ * @profile: profile to view (NOT NULL)
+ * @flags: whether to include the mode string
+ * @prev_ns: last ns printed when used in compound print
+ *
+ * Returns: size of name written or would be written if larger than
+ * available buffer
+ *
+ * Note: will not print anything if the profile is not visible
+ */
+static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view,
+ struct aa_profile *profile, int flags,
+ struct aa_ns **prev_ns)
+{
+ const char *ns_name = NULL;
+
+ AA_BUG(!str && size != 0);
+ AA_BUG(!profile);
+
+ if (!view)
+ view = profiles_ns(profile);
+
+ if (view != profile->ns &&
+ (!prev_ns || (*prev_ns != profile->ns))) {
+ if (prev_ns)
+ *prev_ns = profile->ns;
+ ns_name = aa_ns_name(view, profile->ns,
+ flags & FLAG_VIEW_SUBNS);
+ if (ns_name == aa_hidden_ns_name) {
+ if (flags & FLAG_HIDDEN_UNCONFINED)
+ return snprintf(str, size, "%s", "unconfined");
+ return snprintf(str, size, "%s", ns_name);
+ }
+ }
+
+ if ((flags & FLAG_SHOW_MODE) && profile != profile->ns->unconfined) {
+ const char *modestr = aa_profile_mode_names[profile->mode];
+
+ if (ns_name)
+ return snprintf(str, size, ":%s:%s (%s)", ns_name,
+ profile->base.hname, modestr);
+ return snprintf(str, size, "%s (%s)", profile->base.hname,
+ modestr);
+ }
+
+ if (ns_name)
+ return snprintf(str, size, ":%s:%s", ns_name,
+ profile->base.hname);
+ return snprintf(str, size, "%s", profile->base.hname);
+}
+
+static const char *label_modename(struct aa_ns *ns, struct aa_label *label,
+ int flags)
+{
+ struct aa_profile *profile;
+ struct label_it i;
+ int mode = -1, count = 0;
+
+ label_for_each(i, label, profile) {
+ if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
+ count++;
+ if (profile == profile->ns->unconfined)
+ /* special case unconfined so stacks with
+ * unconfined don't report as mixed. ie.
+ * profile_foo//&:ns1:unconfined (mixed)
+ */
+ continue;
+ if (mode == -1)
+ mode = profile->mode;
+ else if (mode != profile->mode)
+ return "mixed";
+ }
+ }
+
+ if (count == 0)
+ return "-";
+ if (mode == -1)
+ /* everything was unconfined */
+ mode = APPARMOR_UNCONFINED;
+
+ return aa_profile_mode_names[mode];
+}
+
+/* if any visible label is not unconfined the display_mode returns true */
+static inline bool display_mode(struct aa_ns *ns, struct aa_label *label,
+ int flags)
+{
+ if ((flags & FLAG_SHOW_MODE)) {
+ struct aa_profile *profile;
+ struct label_it i;
+
+ label_for_each(i, label, profile) {
+ if (aa_ns_visible(ns, profile->ns,
+ flags & FLAG_VIEW_SUBNS) &&
+ profile != profile->ns->unconfined)
+ return true;
+ }
+ /* only ns->unconfined in set of profiles in ns */
+ return false;
+ }
+
+ return false;
+}
+
+/**
+ * aa_label_snxprint - print a label name to a string buffer
+ * @str: buffer to write to. (MAY BE NULL if @size == 0)
+ * @size: size of buffer
+ * @ns: namespace profile is being viewed from
+ * @label: label to view (NOT NULL)
+ * @flags: whether to include the mode string
+ *
+ * Returns: size of name written or would be written if larger than
+ * available buffer
+ *
+ * Note: labels do not have to be strictly hierarchical to the ns as
+ * objects may be shared across different namespaces and thus
+ * pickup labeling from each ns. If a particular part of the
+ * label is not visible it will just be excluded. And if none
+ * of the label is visible "---" will be used.
+ */
+int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
+ struct aa_label *label, int flags)
+{
+ struct aa_profile *profile;
+ struct aa_ns *prev_ns = NULL;
+ struct label_it i;
+ int count = 0, total = 0;
+ ssize_t len;
+
+ AA_BUG(!str && size != 0);
+ AA_BUG(!label);
+
+ if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) {
+ ns = root_ns;
+ len = snprintf(str, size, "_");
+ update_for_len(total, len, size, str);
+ } else if (!ns) {
+ ns = labels_ns(label);
+ }
+
+ label_for_each(i, label, profile) {
+ if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
+ if (count > 0) {
+ len = snprintf(str, size, "//&");
+ update_for_len(total, len, size, str);
+ }
+ len = aa_profile_snxprint(str, size, ns, profile,
+ flags & FLAG_VIEW_SUBNS,
+ &prev_ns);
+ update_for_len(total, len, size, str);
+ count++;
+ }
+ }
+
+ if (count == 0) {
+ if (flags & FLAG_HIDDEN_UNCONFINED)
+ return snprintf(str, size, "%s", "unconfined");
+ return snprintf(str, size, "%s", aa_hidden_ns_name);
+ }
+
+ /* count == 1 && ... is for backwards compat where the mode
+ * is not displayed for 'unconfined' in the current ns
+ */
+ if (display_mode(ns, label, flags)) {
+ len = snprintf(str, size, " (%s)",
+ label_modename(ns, label, flags));
+ update_for_len(total, len, size, str);
+ }
+
+ return total;
+}
+#undef update_for_len
+
+/**
+ * aa_label_asxprint - allocate a string buffer and print label into it
+ * @strp: Returns - the allocated buffer with the label name. (NOT NULL)
+ * @ns: namespace profile is being viewed from
+ * @label: label to view (NOT NULL)
+ * @flags: flags controlling what label info is printed
+ * @gfp: kernel memory allocation type
+ *
+ * Returns: size of name written or would be written if larger than
+ * available buffer
+ */
+int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
+ int flags, gfp_t gfp)
+{
+ int size;
+
+ AA_BUG(!strp);
+ AA_BUG(!label);
+
+ size = aa_label_snxprint(NULL, 0, ns, label, flags);
+ if (size < 0)
+ return size;
+
+ *strp = kmalloc(size + 1, gfp);
+ if (!*strp)
+ return -ENOMEM;
+ return aa_label_snxprint(*strp, size + 1, ns, label, flags);
+}
+
+/**
+ * aa_label_acntsxprint - allocate a __counted string buffer and print label
+ * @strp: buffer to write to.
+ * @ns: namespace profile is being viewed from
+ * @label: label to view (NOT NULL)
+ * @flags: flags controlling what label info is printed
+ * @gfp: kernel memory allocation type
+ *
+ * Returns: size of name written or would be written if larger than
+ * available buffer
+ */
+int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp)
+{
+ int size;
+
+ AA_BUG(!strp);
+ AA_BUG(!label);
+
+ size = aa_label_snxprint(NULL, 0, ns, label, flags);
+ if (size < 0)
+ return size;
+
+ *strp = aa_str_alloc(size + 1, gfp);
+ if (!*strp)
+ return -ENOMEM;
+ return aa_label_snxprint(*strp, size + 1, ns, label, flags);
+}
+
+
+void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp)
+{
+ const char *str;
+ char *name = NULL;
+ int len;
+
+ AA_BUG(!ab);
+ AA_BUG(!label);
+
+ if (!use_label_hname(ns, label, flags) ||
+ display_mode(ns, label, flags)) {
+ len = aa_label_asxprint(&name, ns, label, flags, gfp);
+ if (len < 0) {
+ AA_DEBUG("label print error");
+ return;
+ }
+ str = name;
+ } else {
+ str = (char *) label->hname;
+ len = strlen(str);
+ }
+ if (audit_string_contains_control(str, len))
+ audit_log_n_hex(ab, str, len);
+ else
+ audit_log_n_string(ab, str, len);
+
+ kfree(name);
+}
+
+void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp)
+{
+ AA_BUG(!f);
+ AA_BUG(!label);
+
+ if (!use_label_hname(ns, label, flags)) {
+ char *str;
+ int len;
+
+ len = aa_label_asxprint(&str, ns, label, flags, gfp);
+ if (len < 0) {
+ AA_DEBUG("label print error");
+ return;
+ }
+ seq_puts(f, str);
+ kfree(str);
+ } else if (display_mode(ns, label, flags))
+ seq_printf(f, "%s (%s)", label->hname,
+ label_modename(ns, label, flags));
+ else
+ seq_puts(f, label->hname);
+}
+
+void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
+ gfp_t gfp)
+{
+ AA_BUG(!label);
+
+ if (!use_label_hname(ns, label, flags)) {
+ char *str;
+ int len;
+
+ len = aa_label_asxprint(&str, ns, label, flags, gfp);
+ if (len < 0) {
+ AA_DEBUG("label print error");
+ return;
+ }
+ pr_info("%s", str);
+ kfree(str);
+ } else if (display_mode(ns, label, flags))
+ pr_info("%s (%s)", label->hname,
+ label_modename(ns, label, flags));
+ else
+ pr_info("%s", label->hname);
+}
+
+void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp)
+{
+ struct aa_ns *ns = aa_get_current_ns();
+
+ aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp);
+ aa_put_ns(ns);
+}
+
+void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp)
+{
+ struct aa_ns *ns = aa_get_current_ns();
+
+ aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp);
+ aa_put_ns(ns);
+}
+
+void aa_label_printk(struct aa_label *label, gfp_t gfp)
+{
+ struct aa_ns *ns = aa_get_current_ns();
+
+ aa_label_xprintk(ns, label, FLAG_VIEW_SUBNS, gfp);
+ aa_put_ns(ns);
+}
+
+static int label_count_strn_entries(const char *str, size_t n)
+{
+ const char *end = str + n;
+ const char *split;
+ int count = 1;
+
+ AA_BUG(!str);
+
+ for (split = aa_label_strn_split(str, end - str);
+ split;
+ split = aa_label_strn_split(str, end - str)) {
+ count++;
+ str = split + 3;
+ }
+
+ return count;
+}
+
+/*
+ * ensure stacks with components like
+ * :ns:A//&B
+ * have :ns: applied to both 'A' and 'B' by making the lookup relative
+ * to the base if the lookup specifies an ns, else making the stacked lookup
+ * relative to the last embedded ns in the string.
+ */
+static struct aa_profile *fqlookupn_profile(struct aa_label *base,
+ struct aa_label *currentbase,
+ const char *str, size_t n)
+{
+ const char *first = skipn_spaces(str, n);
+
+ if (first && *first == ':')
+ return aa_fqlookupn_profile(base, str, n);
+
+ return aa_fqlookupn_profile(currentbase, str, n);
+}
+
+/**
+ * aa_label_strn_parse - parse, validate and convert a text string to a label
+ * @base: base label to use for lookups (NOT NULL)
+ * @str: null terminated text string (NOT NULL)
+ * @n: length of str to parse, will stop at \0 if encountered before n
+ * @gfp: allocation type
+ * @create: true if should create compound labels if they don't exist
+ * @force_stack: true if should stack even if no leading &
+ *
+ * Returns: the matching refcounted label if present
+ * else ERRPTR
+ */
+struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
+ size_t n, gfp_t gfp, bool create,
+ bool force_stack)
+{
+ DEFINE_VEC(profile, vec);
+ struct aa_label *label, *currbase = base;
+ int i, len, stack = 0, error;
+ const char *end = str + n;
+ const char *split;
+
+ AA_BUG(!base);
+ AA_BUG(!str);
+
+ str = skipn_spaces(str, n);
+ if (str == NULL || (AA_DEBUG_LABEL && *str == '_' &&
+ base != &root_ns->unconfined->label))
+ return ERR_PTR(-EINVAL);
+
+ len = label_count_strn_entries(str, end - str);
+ if (*str == '&' || force_stack) {
+ /* stack on top of base */
+ stack = base->size;
+ len += stack;
+ if (*str == '&')
+ str++;
+ }
+
+ error = vec_setup(profile, vec, len, gfp);
+ if (error)
+ return ERR_PTR(error);
+
+ for (i = 0; i < stack; i++)
+ vec[i] = aa_get_profile(base->vec[i]);
+
+ for (split = aa_label_strn_split(str, end - str), i = stack;
+ split && i < len; i++) {
+ vec[i] = fqlookupn_profile(base, currbase, str, split - str);
+ if (!vec[i])
+ goto fail;
+ /*
+ * if component specified a new ns it becomes the new base
+ * so that subsequent lookups are relative to it
+ */
+ if (vec[i]->ns != labels_ns(currbase))
+ currbase = &vec[i]->label;
+ str = split + 3;
+ split = aa_label_strn_split(str, end - str);
+ }
+ /* last element doesn't have a split */
+ if (i < len) {
+ vec[i] = fqlookupn_profile(base, currbase, str, end - str);
+ if (!vec[i])
+ goto fail;
+ }
+ if (len == 1)
+ /* no need to free vec as len < LOCAL_VEC_ENTRIES */
+ return &vec[0]->label;
+
+ len -= aa_vec_unique(vec, len, VEC_FLAG_TERMINATE);
+ /* TODO: deal with reference labels */
+ if (len == 1) {
+ label = aa_get_label(&vec[0]->label);
+ goto out;
+ }
+
+ if (create)
+ label = aa_vec_find_or_create_label(vec, len, gfp);
+ else
+ label = vec_find(vec, len);
+ if (!label)
+ goto fail;
+
+out:
+ /* use adjusted len from after vec_unique, not original */
+ vec_cleanup(profile, vec, len);
+ return label;
+
+fail:
+ label = ERR_PTR(-ENOENT);
+ goto out;
+}
+
+struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
+ gfp_t gfp, bool create, bool force_stack)
+{
+ return aa_label_strn_parse(base, str, strlen(str), gfp, create,
+ force_stack);
+}
+
+/**
+ * aa_labelset_destroy - remove all labels from the label set
+ * @ls: label set to cleanup (NOT NULL)
+ *
+ * Labels that are removed from the set may still exist beyond the set
+ * being destroyed depending on their reference counting
+ */
+void aa_labelset_destroy(struct aa_labelset *ls)
+{
+ struct rb_node *node;
+ unsigned long flags;
+
+ AA_BUG(!ls);
+
+ write_lock_irqsave(&ls->lock, flags);
+ for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) {
+ struct aa_label *this = rb_entry(node, struct aa_label, node);
+
+ if (labels_ns(this) != root_ns)
+ __label_remove(this,
+ ns_unconfined(labels_ns(this)->parent));
+ else
+ __label_remove(this, NULL);
+ }
+ write_unlock_irqrestore(&ls->lock, flags);
+}
+
+/*
+ * @ls: labelset to init (NOT NULL)
+ */
+void aa_labelset_init(struct aa_labelset *ls)
+{
+ AA_BUG(!ls);
+
+ rwlock_init(&ls->lock);
+ ls->root = RB_ROOT;
+}
+
+static struct aa_label *labelset_next_stale(struct aa_labelset *ls)
+{
+ struct aa_label *label;
+ struct rb_node *node;
+ unsigned long flags;
+
+ AA_BUG(!ls);
+
+ read_lock_irqsave(&ls->lock, flags);
+
+ __labelset_for_each(ls, node) {
+ label = rb_entry(node, struct aa_label, node);
+ if ((label_is_stale(label) ||
+ vec_is_stale(label->vec, label->size)) &&
+ __aa_get_label(label))
+ goto out;
+
+ }
+ label = NULL;
+
+out:
+ read_unlock_irqrestore(&ls->lock, flags);
+
+ return label;
+}
+
+/**
+ * __label_update - insert updated version of @label into labelset
+ * @label - the label to update/replace
+ *
+ * Returns: new label that is up to date
+ * else NULL on failure
+ *
+ * Requires: @ns lock be held
+ *
+ * Note: worst case is the stale @label does not get updated and has
+ * to be updated at a later time.
+ */
+static struct aa_label *__label_update(struct aa_label *label)
+{
+ struct aa_label *new, *tmp;
+ struct aa_labelset *ls;
+ unsigned long flags;
+ int i, invcount = 0;
+
+ AA_BUG(!label);
+ AA_BUG(!mutex_is_locked(&labels_ns(label)->lock));
+
+ new = aa_label_alloc(label->size, label->proxy, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ /*
+ * while holding the ns_lock will stop profile replacement, removal,
+ * and label updates, label merging and removal can be occurring
+ */
+ ls = labels_set(label);
+ write_lock_irqsave(&ls->lock, flags);
+ for (i = 0; i < label->size; i++) {
+ AA_BUG(!label->vec[i]);
+ new->vec[i] = aa_get_newest_profile(label->vec[i]);
+ AA_BUG(!new->vec[i]);
+ AA_BUG(!new->vec[i]->label.proxy);
+ AA_BUG(!new->vec[i]->label.proxy->label);
+ if (new->vec[i]->label.proxy != label->vec[i]->label.proxy)
+ invcount++;
+ }
+
+ /* updated stale label by being removed/renamed from labelset */
+ if (invcount) {
+ new->size -= aa_vec_unique(&new->vec[0], new->size,
+ VEC_FLAG_TERMINATE);
+ /* TODO: deal with reference labels */
+ if (new->size == 1) {
+ tmp = aa_get_label(&new->vec[0]->label);
+ AA_BUG(tmp == label);
+ goto remove;
+ }
+ if (labels_set(label) != labels_set(new)) {
+ write_unlock_irqrestore(&ls->lock, flags);
+ tmp = aa_label_insert(labels_set(new), new);
+ write_lock_irqsave(&ls->lock, flags);
+ goto remove;
+ }
+ } else
+ AA_BUG(labels_ns(label) != labels_ns(new));
+
+ tmp = __label_insert(labels_set(label), new, true);
+remove:
+ /* ensure label is removed, and redirected correctly */
+ __label_remove(label, tmp);
+ write_unlock_irqrestore(&ls->lock, flags);
+ label_free_or_put_new(tmp, new);
+
+ return tmp;
+}
+
+/**
+ * __labelset_update - update labels in @ns
+ * @ns: namespace to update labels in (NOT NULL)
+ *
+ * Requires: @ns lock be held
+ *
+ * Walk the labelset ensuring that all labels are up to date and valid
+ * Any label that has a stale component is marked stale and replaced and
+ * by an updated version.
+ *
+ * If failures happen due to memory pressures then stale labels will
+ * be left in place until the next pass.
+ */
+static void __labelset_update(struct aa_ns *ns)
+{
+ struct aa_label *label;
+
+ AA_BUG(!ns);
+ AA_BUG(!mutex_is_locked(&ns->lock));
+
+ do {
+ label = labelset_next_stale(&ns->labels);
+ if (label) {
+ struct aa_label *l = __label_update(label);
+
+ aa_put_label(l);
+ aa_put_label(label);
+ }
+ } while (label);
+}
+
+/**
+ * __aa_labelset_update_subtree - update all labels with a stale component
+ * @ns: ns to start update at (NOT NULL)
+ *
+ * Requires: @ns lock be held
+ *
+ * Invalidates labels based on @p in @ns and any children namespaces.
+ */
+void __aa_labelset_update_subtree(struct aa_ns *ns)
+{
+ struct aa_ns *child;
+
+ AA_BUG(!ns);
+ AA_BUG(!mutex_is_locked(&ns->lock));
+
+ __labelset_update(ns);
+
+ list_for_each_entry(child, &ns->sub_ns, base.list) {
+ mutex_lock_nested(&child->lock, child->level);
+ __aa_labelset_update_subtree(child);
+ mutex_unlock(&child->lock);
+ }
+}
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
new file mode 100644
index 000000000..1c72a6110
--- /dev/null
+++ b/security/apparmor/lib.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains basic common functions used in AppArmor
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include "include/audit.h"
+#include "include/apparmor.h"
+#include "include/lib.h"
+#include "include/perms.h"
+#include "include/policy.h"
+
+struct aa_perms nullperms;
+struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
+ .quiet = ALL_PERMS_MASK,
+ .hide = ALL_PERMS_MASK };
+
+/**
+ * aa_split_fqname - split a fqname into a profile and namespace name
+ * @fqname: a full qualified name in namespace profile format (NOT NULL)
+ * @ns_name: pointer to portion of the string containing the ns name (NOT NULL)
+ *
+ * Returns: profile name or NULL if one is not specified
+ *
+ * Split a namespace name from a profile name (see policy.c for naming
+ * description). If a portion of the name is missing it returns NULL for
+ * that portion.
+ *
+ * NOTE: may modify the @fqname string. The pointers returned point
+ * into the @fqname string.
+ */
+char *aa_split_fqname(char *fqname, char **ns_name)
+{
+ char *name = strim(fqname);
+
+ *ns_name = NULL;
+ if (name[0] == ':') {
+ char *split = strchr(&name[1], ':');
+ *ns_name = skip_spaces(&name[1]);
+ if (split) {
+ /* overwrite ':' with \0 */
+ *split++ = 0;
+ if (strncmp(split, "//", 2) == 0)
+ split += 2;
+ name = skip_spaces(split);
+ } else
+ /* a ns name without a following profile is allowed */
+ name = NULL;
+ }
+ if (name && *name == 0)
+ name = NULL;
+
+ return name;
+}
+
+/**
+ * skipn_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ * if all whitespace will return NULL
+ */
+
+const char *skipn_spaces(const char *str, size_t n)
+{
+ for (; n && isspace(*str); --n)
+ ++str;
+ if (n)
+ return (char *)str;
+ return NULL;
+}
+
+const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
+ size_t *ns_len)
+{
+ const char *end = fqname + n;
+ const char *name = skipn_spaces(fqname, n);
+
+ *ns_name = NULL;
+ *ns_len = 0;
+
+ if (!name)
+ return NULL;
+
+ if (name[0] == ':') {
+ char *split = strnchr(&name[1], end - &name[1], ':');
+ *ns_name = skipn_spaces(&name[1], end - &name[1]);
+ if (!*ns_name)
+ return NULL;
+ if (split) {
+ *ns_len = split - *ns_name;
+ if (*ns_len == 0)
+ *ns_name = NULL;
+ split++;
+ if (end - split > 1 && strncmp(split, "//", 2) == 0)
+ split += 2;
+ name = skipn_spaces(split, end - split);
+ } else {
+ /* a ns name without a following profile is allowed */
+ name = NULL;
+ *ns_len = end - *ns_name;
+ }
+ }
+ if (name && *name == 0)
+ name = NULL;
+
+ return name;
+}
+
+/**
+ * aa_info_message - log a none profile related status message
+ * @str: message to log
+ */
+void aa_info_message(const char *str)
+{
+ if (audit_enabled) {
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
+
+ aad(&sa)->info = str;
+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
+ }
+ printk(KERN_INFO "AppArmor: %s\n", str);
+}
+
+__counted char *aa_str_alloc(int size, gfp_t gfp)
+{
+ struct counted_str *str;
+
+ str = kmalloc(struct_size(str, name, size), gfp);
+ if (!str)
+ return NULL;
+
+ kref_init(&str->count);
+ return str->name;
+}
+
+void aa_str_kref(struct kref *kref)
+{
+ kfree(container_of(kref, struct counted_str, count));
+}
+
+
+const char aa_file_perm_chrs[] = "xwracd km l ";
+const char *aa_file_perm_names[] = {
+ "exec",
+ "write",
+ "read",
+ "append",
+
+ "create",
+ "delete",
+ "open",
+ "rename",
+
+ "setattr",
+ "getattr",
+ "setcred",
+ "getcred",
+
+ "chmod",
+ "chown",
+ "chgrp",
+ "lock",
+
+ "mmap",
+ "mprot",
+ "link",
+ "snapshot",
+
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+
+ "stack",
+ "change_onexec",
+ "change_profile",
+ "change_hat",
+};
+
+/**
+ * aa_perm_mask_to_str - convert a perm mask to its short string
+ * @str: character buffer to store string in (at least 10 characters)
+ * @str_size: size of the @str buffer
+ * @chrs: NUL-terminated character buffer of permission characters
+ * @mask: permission mask to convert
+ */
+void aa_perm_mask_to_str(char *str, size_t str_size, const char *chrs, u32 mask)
+{
+ unsigned int i, perm = 1;
+ size_t num_chrs = strlen(chrs);
+
+ for (i = 0; i < num_chrs; perm <<= 1, i++) {
+ if (mask & perm) {
+ /* Ensure that one byte is left for NUL-termination */
+ if (WARN_ON_ONCE(str_size <= 1))
+ break;
+
+ *str++ = chrs[i];
+ str_size--;
+ }
+ }
+ *str = '\0';
+}
+
+void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
+ u32 mask)
+{
+ const char *fmt = "%s";
+ unsigned int i, perm = 1;
+ bool prev = false;
+
+ for (i = 0; i < 32; perm <<= 1, i++) {
+ if (mask & perm) {
+ audit_log_format(ab, fmt, names[i]);
+ if (!prev) {
+ prev = true;
+ fmt = " %s";
+ }
+ }
+ }
+}
+
+void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
+ u32 chrsmask, const char * const *names, u32 namesmask)
+{
+ char str[33];
+
+ audit_log_format(ab, "\"");
+ if ((mask & chrsmask) && chrs) {
+ aa_perm_mask_to_str(str, sizeof(str), chrs, mask & chrsmask);
+ mask &= ~chrsmask;
+ audit_log_format(ab, "%s", str);
+ if (mask & namesmask)
+ audit_log_format(ab, " ");
+ }
+ if ((mask & namesmask) && names)
+ aa_audit_perm_names(ab, names, mask & namesmask);
+ audit_log_format(ab, "\"");
+}
+
+/**
+ * aa_audit_perms_cb - generic callback fn for auditing perms
+ * @ab: audit buffer (NOT NULL)
+ * @va: audit struct to audit values of (NOT NULL)
+ */
+static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->request) {
+ audit_log_format(ab, " requested_mask=");
+ aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
+ PERMS_CHRS_MASK, aa_file_perm_names,
+ PERMS_NAMES_MASK);
+ }
+ if (aad(sa)->denied) {
+ audit_log_format(ab, "denied_mask=");
+ aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
+ PERMS_CHRS_MASK, aa_file_perm_names,
+ PERMS_NAMES_MASK);
+ }
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+}
+
+/**
+ * aa_apply_modes_to_perms - apply namespace and profile flags to perms
+ * @profile: that perms where computed from
+ * @perms: perms to apply mode modifiers to
+ *
+ * TODO: split into profile and ns based flags for when accumulating perms
+ */
+void aa_apply_modes_to_perms(struct aa_profile *profile, struct aa_perms *perms)
+{
+ switch (AUDIT_MODE(profile)) {
+ case AUDIT_ALL:
+ perms->audit = ALL_PERMS_MASK;
+ fallthrough;
+ case AUDIT_NOQUIET:
+ perms->quiet = 0;
+ break;
+ case AUDIT_QUIET:
+ perms->audit = 0;
+ fallthrough;
+ case AUDIT_QUIET_DENIED:
+ perms->quiet = ALL_PERMS_MASK;
+ break;
+ }
+
+ if (KILL_MODE(profile))
+ perms->kill = ALL_PERMS_MASK;
+ else if (COMPLAIN_MODE(profile))
+ perms->complain = ALL_PERMS_MASK;
+/*
+ * TODO:
+ * else if (PROMPT_MODE(profile))
+ * perms->prompt = ALL_PERMS_MASK;
+ */
+}
+
+static u32 map_other(u32 x)
+{
+ return ((x & 0x3) << 8) | /* SETATTR/GETATTR */
+ ((x & 0x1c) << 18) | /* ACCEPT/BIND/LISTEN */
+ ((x & 0x60) << 19); /* SETOPT/GETOPT */
+}
+
+static u32 map_xbits(u32 x)
+{
+ return ((x & 0x1) << 7) |
+ ((x & 0x7e) << 9);
+}
+
+void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
+ struct aa_perms *perms)
+{
+ /* This mapping is convulated due to history.
+ * v1-v4: only file perms
+ * v5: added policydb which dropped in perm user conditional to
+ * gain new perm bits, but had to map around the xbits because
+ * the userspace compiler was still munging them.
+ * v9: adds using the xbits in policydb because the compiler now
+ * supports treating policydb permission bits different.
+ * Unfortunately there is not way to force auditing on the
+ * perms represented by the xbits
+ */
+ *perms = (struct aa_perms) {
+ .allow = dfa_user_allow(dfa, state) |
+ map_xbits(dfa_user_xbits(dfa, state)),
+ .audit = dfa_user_audit(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state) |
+ map_xbits(dfa_other_xbits(dfa, state)),
+ };
+
+ /* for v5-v9 perm mapping in the policydb, the other set is used
+ * to extend the general perm set
+ */
+ perms->allow |= map_other(dfa_other_allow(dfa, state));
+ perms->audit |= map_other(dfa_other_audit(dfa, state));
+ perms->quiet |= map_other(dfa_other_quiet(dfa, state));
+}
+
+/**
+ * aa_perms_accum_raw - accumulate perms with out masking off overlapping perms
+ * @accum - perms struct to accumulate into
+ * @addend - perms struct to add to @accum
+ */
+void aa_perms_accum_raw(struct aa_perms *accum, struct aa_perms *addend)
+{
+ accum->deny |= addend->deny;
+ accum->allow &= addend->allow & ~addend->deny;
+ accum->audit |= addend->audit & addend->allow;
+ accum->quiet &= addend->quiet & ~addend->allow;
+ accum->kill |= addend->kill & ~addend->allow;
+ accum->stop |= addend->stop & ~addend->allow;
+ accum->complain |= addend->complain & ~addend->allow & ~addend->deny;
+ accum->cond |= addend->cond & ~addend->allow & ~addend->deny;
+ accum->hide &= addend->hide & ~addend->allow;
+ accum->prompt |= addend->prompt & ~addend->allow & ~addend->deny;
+}
+
+/**
+ * aa_perms_accum - accumulate perms, masking off overlapping perms
+ * @accum - perms struct to accumulate into
+ * @addend - perms struct to add to @accum
+ */
+void aa_perms_accum(struct aa_perms *accum, struct aa_perms *addend)
+{
+ accum->deny |= addend->deny;
+ accum->allow &= addend->allow & ~accum->deny;
+ accum->audit |= addend->audit & accum->allow;
+ accum->quiet &= addend->quiet & ~accum->allow;
+ accum->kill |= addend->kill & ~accum->allow;
+ accum->stop |= addend->stop & ~accum->allow;
+ accum->complain |= addend->complain & ~accum->allow & ~accum->deny;
+ accum->cond |= addend->cond & ~accum->allow & ~accum->deny;
+ accum->hide &= addend->hide & ~accum->allow;
+ accum->prompt |= addend->prompt & ~accum->allow & ~accum->deny;
+}
+
+void aa_profile_match_label(struct aa_profile *profile, struct aa_label *label,
+ int type, u32 request, struct aa_perms *perms)
+{
+ /* TODO: doesn't yet handle extended types */
+ unsigned int state;
+
+ state = aa_dfa_next(profile->policy.dfa,
+ profile->policy.start[AA_CLASS_LABEL],
+ type);
+ aa_label_match(profile, label, state, false, request, perms);
+}
+
+
+/* currently unused */
+int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ u32 request, int type, u32 *deny,
+ struct common_audit_data *sa)
+{
+ struct aa_perms perms;
+
+ aad(sa)->label = &profile->label;
+ aad(sa)->peer = &target->label;
+ aad(sa)->request = request;
+
+ aa_profile_match_label(profile, &target->label, type, request, &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+ *deny |= request & perms.deny;
+ return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
+}
+
+/**
+ * aa_check_perms - do audit mode selection based on perms set
+ * @profile: profile being checked
+ * @perms: perms computed for the request
+ * @request: requested perms
+ * @deny: Returns: explicit deny set
+ * @sa: initialized audit structure (MAY BE NULL if not auditing)
+ * @cb: callback fn for type specific fields (MAY BE NULL)
+ *
+ * Returns: 0 if permission else error code
+ *
+ * Note: profile audit modes need to be set before calling by setting the
+ * perm masks appropriately.
+ *
+ * If not auditing then complain mode is not enabled and the
+ * error code will indicate whether there was an explicit deny
+ * with a positive value.
+ */
+int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+ u32 request, struct common_audit_data *sa,
+ void (*cb)(struct audit_buffer *, void *))
+{
+ int type, error;
+ u32 denied = request & (~perms->allow | perms->deny);
+
+ if (likely(!denied)) {
+ /* mask off perms that are not being force audited */
+ request &= perms->audit;
+ if (!request || !sa)
+ return 0;
+
+ type = AUDIT_APPARMOR_AUDIT;
+ error = 0;
+ } else {
+ error = -EACCES;
+
+ if (denied & perms->kill)
+ type = AUDIT_APPARMOR_KILL;
+ else if (denied == (denied & perms->complain))
+ type = AUDIT_APPARMOR_ALLOWED;
+ else
+ type = AUDIT_APPARMOR_DENIED;
+
+ if (denied == (denied & perms->hide))
+ error = -ENOENT;
+
+ denied &= ~perms->quiet;
+ if (!sa || !denied)
+ return error;
+ }
+
+ if (sa) {
+ aad(sa)->label = &profile->label;
+ aad(sa)->request = request;
+ aad(sa)->denied = denied;
+ aad(sa)->error = error;
+ aa_audit_msg(type, sa, cb);
+ }
+
+ if (type == AUDIT_APPARMOR_ALLOWED)
+ error = 0;
+
+ return error;
+}
+
+
+/**
+ * aa_policy_init - initialize a policy structure
+ * @policy: policy to initialize (NOT NULL)
+ * @prefix: prefix name if any is required. (MAYBE NULL)
+ * @name: name of the policy, init will make a copy of it (NOT NULL)
+ * @gfp: allocation mode
+ *
+ * Note: this fn creates a copy of strings passed in
+ *
+ * Returns: true if policy init successful
+ */
+bool aa_policy_init(struct aa_policy *policy, const char *prefix,
+ const char *name, gfp_t gfp)
+{
+ char *hname;
+
+ /* freed by policy_free */
+ if (prefix) {
+ hname = aa_str_alloc(strlen(prefix) + strlen(name) + 3, gfp);
+ if (hname)
+ sprintf(hname, "%s//%s", prefix, name);
+ } else {
+ hname = aa_str_alloc(strlen(name) + 1, gfp);
+ if (hname)
+ strcpy(hname, name);
+ }
+ if (!hname)
+ return false;
+ policy->hname = hname;
+ /* base.name is a substring of fqname */
+ policy->name = basename(policy->hname);
+ INIT_LIST_HEAD(&policy->list);
+ INIT_LIST_HEAD(&policy->profiles);
+
+ return true;
+}
+
+/**
+ * aa_policy_destroy - free the elements referenced by @policy
+ * @policy: policy that is to have its elements freed (NOT NULL)
+ */
+void aa_policy_destroy(struct aa_policy *policy)
+{
+ AA_BUG(on_list_rcu(&policy->profiles));
+ AA_BUG(on_list_rcu(&policy->list));
+
+ /* don't free name as its a subset of hname */
+ aa_put_str(policy->hname);
+}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
new file mode 100644
index 000000000..1e2f40db1
--- /dev/null
+++ b/security/apparmor/lsm.c
@@ -0,0 +1,1929 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor LSM hooks.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/lsm_hooks.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/ptrace.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/audit.h>
+#include <linux/user_namespace.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/zlib.h>
+#include <net/sock.h>
+#include <uapi/linux/mount.h>
+
+#include "include/apparmor.h"
+#include "include/apparmorfs.h"
+#include "include/audit.h"
+#include "include/capability.h"
+#include "include/cred.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/net.h"
+#include "include/path.h"
+#include "include/label.h"
+#include "include/policy.h"
+#include "include/policy_ns.h"
+#include "include/procattr.h"
+#include "include/mount.h"
+#include "include/secid.h"
+
+/* Flag indicating whether initialization completed */
+int apparmor_initialized;
+
+union aa_buffer {
+ struct list_head list;
+ char buffer[1];
+};
+
+#define RESERVE_COUNT 2
+static int reserve_count = RESERVE_COUNT;
+static int buffer_count;
+
+static LIST_HEAD(aa_global_buffers);
+static DEFINE_SPINLOCK(aa_buffers_lock);
+
+/*
+ * LSM hook functions
+ */
+
+/*
+ * put the associated labels
+ */
+static void apparmor_cred_free(struct cred *cred)
+{
+ aa_put_label(cred_label(cred));
+ set_cred_label(cred, NULL);
+}
+
+/*
+ * allocate the apparmor part of blank credentials
+ */
+static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ set_cred_label(cred, NULL);
+ return 0;
+}
+
+/*
+ * prepare new cred label for modification by prepare_cred block
+ */
+static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
+ gfp_t gfp)
+{
+ set_cred_label(new, aa_get_newest_label(cred_label(old)));
+ return 0;
+}
+
+/*
+ * transfer the apparmor data to a blank set of creds
+ */
+static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
+{
+ set_cred_label(new, aa_get_newest_label(cred_label(old)));
+}
+
+static void apparmor_task_free(struct task_struct *task)
+{
+
+ aa_free_task_ctx(task_ctx(task));
+}
+
+static int apparmor_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ struct aa_task_ctx *new = task_ctx(task);
+
+ aa_dup_task_ctx(new, task_ctx(current));
+
+ return 0;
+}
+
+static int apparmor_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ struct aa_label *tracer, *tracee;
+ int error;
+
+ tracer = __begin_current_label_crit_section();
+ tracee = aa_get_task_label(child);
+ error = aa_may_ptrace(tracer, tracee,
+ (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
+ : AA_PTRACE_TRACE);
+ aa_put_label(tracee);
+ __end_current_label_crit_section(tracer);
+
+ return error;
+}
+
+static int apparmor_ptrace_traceme(struct task_struct *parent)
+{
+ struct aa_label *tracer, *tracee;
+ int error;
+
+ tracee = __begin_current_label_crit_section();
+ tracer = aa_get_task_label(parent);
+ error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
+ aa_put_label(tracer);
+ __end_current_label_crit_section(tracee);
+
+ return error;
+}
+
+/* Derived from security/commoncap.c:cap_capget */
+static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+ struct aa_label *label;
+ const struct cred *cred;
+
+ rcu_read_lock();
+ cred = __task_cred(target);
+ label = aa_get_newest_cred_label(cred);
+
+ /*
+ * cap_capget is stacked ahead of this and will
+ * initialize effective and permitted.
+ */
+ if (!unconfined(label)) {
+ struct aa_profile *profile;
+ struct label_it i;
+
+ label_for_each_confined(i, label, profile) {
+ if (COMPLAIN_MODE(profile))
+ continue;
+ *effective = cap_intersect(*effective,
+ profile->caps.allow);
+ *permitted = cap_intersect(*permitted,
+ profile->caps.allow);
+ }
+ }
+ rcu_read_unlock();
+ aa_put_label(label);
+
+ return 0;
+}
+
+static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
+ int cap, unsigned int opts)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ label = aa_get_newest_cred_label(cred);
+ if (!unconfined(label))
+ error = aa_capable(label, cap, opts);
+ aa_put_label(label);
+
+ return error;
+}
+
+/**
+ * common_perm - basic common permission check wrapper fn for paths
+ * @op: operation being checked
+ * @path: path to check permission of (NOT NULL)
+ * @mask: requested permissions mask
+ * @cond: conditional info for the permission request (NOT NULL)
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm(const char *op, const struct path *path, u32 mask,
+ struct path_cond *cond)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+ error = aa_path_perm(op, label, path, 0, mask, cond);
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
+/**
+ * common_perm_cond - common permission wrapper around inode cond
+ * @op: operation being checked
+ * @path: location to check (NOT NULL)
+ * @mask: requested permissions mask
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_cond(const char *op, const struct path *path, u32 mask)
+{
+ struct user_namespace *mnt_userns = mnt_user_ns(path->mnt);
+ struct path_cond cond = {
+ i_uid_into_mnt(mnt_userns, d_backing_inode(path->dentry)),
+ d_backing_inode(path->dentry)->i_mode
+ };
+
+ if (!path_mediated_fs(path->dentry))
+ return 0;
+
+ return common_perm(op, path, mask, &cond);
+}
+
+/**
+ * common_perm_dir_dentry - common permission wrapper when path is dir, dentry
+ * @op: operation being checked
+ * @dir: directory of the dentry (NOT NULL)
+ * @dentry: dentry to check (NOT NULL)
+ * @mask: requested permissions mask
+ * @cond: conditional info for the permission request (NOT NULL)
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_dir_dentry(const char *op, const struct path *dir,
+ struct dentry *dentry, u32 mask,
+ struct path_cond *cond)
+{
+ struct path path = { .mnt = dir->mnt, .dentry = dentry };
+
+ return common_perm(op, &path, mask, cond);
+}
+
+/**
+ * common_perm_rm - common permission wrapper for operations doing rm
+ * @op: operation being checked
+ * @dir: directory that the dentry is in (NOT NULL)
+ * @dentry: dentry being rm'd (NOT NULL)
+ * @mask: requested permission mask
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_rm(const char *op, const struct path *dir,
+ struct dentry *dentry, u32 mask)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct user_namespace *mnt_userns = mnt_user_ns(dir->mnt);
+ struct path_cond cond = { };
+
+ if (!inode || !path_mediated_fs(dentry))
+ return 0;
+
+ cond.uid = i_uid_into_mnt(mnt_userns, inode);
+ cond.mode = inode->i_mode;
+
+ return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
+}
+
+/**
+ * common_perm_create - common permission wrapper for operations doing create
+ * @op: operation being checked
+ * @dir: directory that dentry will be created in (NOT NULL)
+ * @dentry: dentry to create (NOT NULL)
+ * @mask: request permission mask
+ * @mode: created file mode
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_create(const char *op, const struct path *dir,
+ struct dentry *dentry, u32 mask, umode_t mode)
+{
+ struct path_cond cond = { current_fsuid(), mode };
+
+ if (!path_mediated_fs(dir->dentry))
+ return 0;
+
+ return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
+}
+
+static int apparmor_path_unlink(const struct path *dir, struct dentry *dentry)
+{
+ return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
+}
+
+static int apparmor_path_mkdir(const struct path *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
+ S_IFDIR);
+}
+
+static int apparmor_path_rmdir(const struct path *dir, struct dentry *dentry)
+{
+ return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
+}
+
+static int apparmor_path_mknod(const struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+{
+ return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
+}
+
+static int apparmor_path_truncate(const struct path *path)
+{
+ return common_perm_cond(OP_TRUNC, path, MAY_WRITE | AA_MAY_SETATTR);
+}
+
+static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry,
+ const char *old_name)
+{
+ return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
+ S_IFLNK);
+}
+
+static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ if (!path_mediated_fs(old_dentry))
+ return 0;
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label))
+ error = aa_path_link(label, old_dentry, new_dir, new_dentry);
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry,
+ const unsigned int flags)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ if (!path_mediated_fs(old_dentry))
+ return 0;
+ if ((flags & RENAME_EXCHANGE) && !path_mediated_fs(new_dentry))
+ return 0;
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ struct user_namespace *mnt_userns = mnt_user_ns(old_dir->mnt);
+ struct path old_path = { .mnt = old_dir->mnt,
+ .dentry = old_dentry };
+ struct path new_path = { .mnt = new_dir->mnt,
+ .dentry = new_dentry };
+ struct path_cond cond = {
+ i_uid_into_mnt(mnt_userns, d_backing_inode(old_dentry)),
+ d_backing_inode(old_dentry)->i_mode
+ };
+
+ if (flags & RENAME_EXCHANGE) {
+ struct path_cond cond_exchange = {
+ i_uid_into_mnt(mnt_userns, d_backing_inode(new_dentry)),
+ d_backing_inode(new_dentry)->i_mode
+ };
+
+ error = aa_path_perm(OP_RENAME_SRC, label, &new_path, 0,
+ MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+ AA_MAY_SETATTR | AA_MAY_DELETE,
+ &cond_exchange);
+ if (!error)
+ error = aa_path_perm(OP_RENAME_DEST, label, &old_path,
+ 0, MAY_WRITE | AA_MAY_SETATTR |
+ AA_MAY_CREATE, &cond_exchange);
+ }
+
+ if (!error)
+ error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
+ MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+ AA_MAY_SETATTR | AA_MAY_DELETE,
+ &cond);
+ if (!error)
+ error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
+ 0, MAY_WRITE | AA_MAY_SETATTR |
+ AA_MAY_CREATE, &cond);
+
+ }
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int apparmor_path_chmod(const struct path *path, umode_t mode)
+{
+ return common_perm_cond(OP_CHMOD, path, AA_MAY_CHMOD);
+}
+
+static int apparmor_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
+{
+ return common_perm_cond(OP_CHOWN, path, AA_MAY_CHOWN);
+}
+
+static int apparmor_inode_getattr(const struct path *path)
+{
+ return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
+}
+
+static int apparmor_file_open(struct file *file)
+{
+ struct aa_file_ctx *fctx = file_ctx(file);
+ struct aa_label *label;
+ int error = 0;
+
+ if (!path_mediated_fs(file->f_path.dentry))
+ return 0;
+
+ /* If in exec, permission is handled by bprm hooks.
+ * Cache permissions granted by the previous exec check, with
+ * implicit read and executable mmap which are required to
+ * actually execute the image.
+ */
+ if (current->in_execve) {
+ fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
+ return 0;
+ }
+
+ label = aa_get_newest_cred_label(file->f_cred);
+ if (!unconfined(label)) {
+ struct user_namespace *mnt_userns = file_mnt_user_ns(file);
+ struct inode *inode = file_inode(file);
+ struct path_cond cond = {
+ i_uid_into_mnt(mnt_userns, inode),
+ inode->i_mode
+ };
+
+ error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
+ aa_map_file_to_perms(file), &cond);
+ /* todo cache full allowed permissions set and state */
+ fctx->allow = aa_map_file_to_perms(file);
+ }
+ aa_put_label(label);
+
+ return error;
+}
+
+static int apparmor_file_alloc_security(struct file *file)
+{
+ struct aa_file_ctx *ctx = file_ctx(file);
+ struct aa_label *label = begin_current_label_crit_section();
+
+ spin_lock_init(&ctx->lock);
+ rcu_assign_pointer(ctx->label, aa_get_label(label));
+ end_current_label_crit_section(label);
+ return 0;
+}
+
+static void apparmor_file_free_security(struct file *file)
+{
+ struct aa_file_ctx *ctx = file_ctx(file);
+
+ if (ctx)
+ aa_put_label(rcu_access_pointer(ctx->label));
+}
+
+static int common_file_perm(const char *op, struct file *file, u32 mask,
+ bool in_atomic)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ /* don't reaudit files closed during inheritance */
+ if (file->f_path.dentry == aa_null.dentry)
+ return -EACCES;
+
+ label = __begin_current_label_crit_section();
+ error = aa_file_perm(op, label, file, mask, in_atomic);
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int apparmor_file_receive(struct file *file)
+{
+ return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file),
+ false);
+}
+
+static int apparmor_file_permission(struct file *file, int mask)
+{
+ return common_file_perm(OP_FPERM, file, mask, false);
+}
+
+static int apparmor_file_lock(struct file *file, unsigned int cmd)
+{
+ u32 mask = AA_MAY_LOCK;
+
+ if (cmd == F_WRLCK)
+ mask |= MAY_WRITE;
+
+ return common_file_perm(OP_FLOCK, file, mask, false);
+}
+
+static int common_mmap(const char *op, struct file *file, unsigned long prot,
+ unsigned long flags, bool in_atomic)
+{
+ int mask = 0;
+
+ if (!file || !file_ctx(file))
+ return 0;
+
+ if (prot & PROT_READ)
+ mask |= MAY_READ;
+ /*
+ * Private mappings don't require write perms since they don't
+ * write back to the files
+ */
+ if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
+ mask |= MAY_WRITE;
+ if (prot & PROT_EXEC)
+ mask |= AA_EXEC_MMAP;
+
+ return common_file_perm(op, file, mask, in_atomic);
+}
+
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC);
+}
+
+static int apparmor_file_mprotect(struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot)
+{
+ return common_mmap(OP_FMPROT, vma->vm_file, prot,
+ !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0,
+ false);
+}
+
+static int apparmor_sb_mount(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ /* Discard magic */
+ if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+ flags &= ~MS_MGC_MSK;
+
+ flags &= ~AA_MS_IGNORE_MASK;
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ if (flags & MS_REMOUNT)
+ error = aa_remount(label, path, flags, data);
+ else if (flags & MS_BIND)
+ error = aa_bind_mount(label, path, dev_name, flags);
+ else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
+ MS_UNBINDABLE))
+ error = aa_mount_change_type(label, path, flags);
+ else if (flags & MS_MOVE)
+ error = aa_move_mount(label, path, dev_name);
+ else
+ error = aa_new_mount(label, dev_name, path, type,
+ flags, data);
+ }
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+ error = aa_umount(label, mnt, flags);
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int apparmor_sb_pivotroot(const struct path *old_path,
+ const struct path *new_path)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ label = aa_get_current_label();
+ if (!unconfined(label))
+ error = aa_pivotroot(label, old_path, new_path);
+ aa_put_label(label);
+
+ return error;
+}
+
+static int apparmor_getprocattr(struct task_struct *task, const char *name,
+ char **value)
+{
+ int error = -ENOENT;
+ /* released below */
+ const struct cred *cred = get_task_cred(task);
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct aa_label *label = NULL;
+
+ if (strcmp(name, "current") == 0)
+ label = aa_get_newest_label(cred_label(cred));
+ else if (strcmp(name, "prev") == 0 && ctx->previous)
+ label = aa_get_newest_label(ctx->previous);
+ else if (strcmp(name, "exec") == 0 && ctx->onexec)
+ label = aa_get_newest_label(ctx->onexec);
+ else
+ error = -EINVAL;
+
+ if (label)
+ error = aa_getprocattr(label, value);
+
+ aa_put_label(label);
+ put_cred(cred);
+
+ return error;
+}
+
+static int apparmor_setprocattr(const char *name, void *value,
+ size_t size)
+{
+ char *command, *largs = NULL, *args = value;
+ size_t arg_size;
+ int error;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SETPROCATTR);
+
+ if (size == 0)
+ return -EINVAL;
+
+ /* AppArmor requires that the buffer must be null terminated atm */
+ if (args[size - 1] != '\0') {
+ /* null terminate */
+ largs = args = kmalloc(size + 1, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ memcpy(args, value, size);
+ args[size] = '\0';
+ }
+
+ error = -EINVAL;
+ args = strim(args);
+ command = strsep(&args, " ");
+ if (!args)
+ goto out;
+ args = skip_spaces(args);
+ if (!*args)
+ goto out;
+
+ arg_size = size - (args - (largs ? largs : (char *) value));
+ if (strcmp(name, "current") == 0) {
+ if (strcmp(command, "changehat") == 0) {
+ error = aa_setprocattr_changehat(args, arg_size,
+ AA_CHANGE_NOFLAGS);
+ } else if (strcmp(command, "permhat") == 0) {
+ error = aa_setprocattr_changehat(args, arg_size,
+ AA_CHANGE_TEST);
+ } else if (strcmp(command, "changeprofile") == 0) {
+ error = aa_change_profile(args, AA_CHANGE_NOFLAGS);
+ } else if (strcmp(command, "permprofile") == 0) {
+ error = aa_change_profile(args, AA_CHANGE_TEST);
+ } else if (strcmp(command, "stack") == 0) {
+ error = aa_change_profile(args, AA_CHANGE_STACK);
+ } else
+ goto fail;
+ } else if (strcmp(name, "exec") == 0) {
+ if (strcmp(command, "exec") == 0)
+ error = aa_change_profile(args, AA_CHANGE_ONEXEC);
+ else if (strcmp(command, "stack") == 0)
+ error = aa_change_profile(args, (AA_CHANGE_ONEXEC |
+ AA_CHANGE_STACK));
+ else
+ goto fail;
+ } else
+ /* only support the "current" and "exec" process attributes */
+ goto fail;
+
+ if (!error)
+ error = size;
+out:
+ kfree(largs);
+ return error;
+
+fail:
+ aad(&sa)->label = begin_current_label_crit_section();
+ aad(&sa)->info = name;
+ aad(&sa)->error = error = -EINVAL;
+ aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+ end_current_label_crit_section(aad(&sa)->label);
+ goto out;
+}
+
+/**
+ * apparmor_bprm_committing_creds - do task cleanup on committing new creds
+ * @bprm: binprm for the exec (NOT NULL)
+ */
+static void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ struct aa_label *label = aa_current_raw_label();
+ struct aa_label *new_label = cred_label(bprm->cred);
+
+ /* bail out if unconfined or not changing profile */
+ if ((new_label->proxy == label->proxy) ||
+ (unconfined(new_label)))
+ return;
+
+ aa_inherit_files(bprm->cred, current->files);
+
+ current->pdeath_signal = 0;
+
+ /* reset soft limits and set hard limits for the new label */
+ __aa_transition_rlimits(label, new_label);
+}
+
+/**
+ * apparmor_bprm_committed_cred - do cleanup after new creds committed
+ * @bprm: binprm for the exec (NOT NULL)
+ */
+static void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
+{
+ /* clear out temporary/transitional state from the context */
+ aa_clear_task_ctx_trans(task_ctx(current));
+
+ return;
+}
+
+static void apparmor_current_getsecid_subj(u32 *secid)
+{
+ struct aa_label *label = aa_get_current_label();
+ *secid = label->secid;
+ aa_put_label(label);
+}
+
+static void apparmor_task_getsecid_obj(struct task_struct *p, u32 *secid)
+{
+ struct aa_label *label = aa_get_task_label(p);
+ *secid = label->secid;
+ aa_put_label(label);
+}
+
+static int apparmor_task_setrlimit(struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
+{
+ struct aa_label *label = __begin_current_label_crit_section();
+ int error = 0;
+
+ if (!unconfined(label))
+ error = aa_task_setrlimit(label, task, resource, new_rlim);
+ __end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+{
+ struct aa_label *cl, *tl;
+ int error;
+
+ if (cred) {
+ /*
+ * Dealing with USB IO specific behavior
+ */
+ cl = aa_get_newest_cred_label(cred);
+ tl = aa_get_task_label(target);
+ error = aa_may_signal(cl, tl, sig);
+ aa_put_label(cl);
+ aa_put_label(tl);
+ return error;
+ }
+
+ cl = __begin_current_label_crit_section();
+ tl = aa_get_task_label(target);
+ error = aa_may_signal(cl, tl, sig);
+ aa_put_label(tl);
+ __end_current_label_crit_section(cl);
+
+ return error;
+}
+
+/**
+ * apparmor_sk_alloc_security - allocate and attach the sk_security field
+ */
+static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
+{
+ struct aa_sk_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), flags);
+ if (!ctx)
+ return -ENOMEM;
+
+ SK_CTX(sk) = ctx;
+
+ return 0;
+}
+
+/**
+ * apparmor_sk_free_security - free the sk_security field
+ */
+static void apparmor_sk_free_security(struct sock *sk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ SK_CTX(sk) = NULL;
+ aa_put_label(ctx->label);
+ aa_put_label(ctx->peer);
+ kfree(ctx);
+}
+
+/**
+ * apparmor_sk_clone_security - clone the sk_security field
+ */
+static void apparmor_sk_clone_security(const struct sock *sk,
+ struct sock *newsk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *new = SK_CTX(newsk);
+
+ if (new->label)
+ aa_put_label(new->label);
+ new->label = aa_get_label(ctx->label);
+
+ if (new->peer)
+ aa_put_label(new->peer);
+ new->peer = aa_get_label(ctx->peer);
+}
+
+/**
+ * apparmor_socket_create - check perms before creating a new socket
+ */
+static int apparmor_socket_create(int family, int type, int protocol, int kern)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ AA_BUG(in_interrupt());
+
+ label = begin_current_label_crit_section();
+ if (!(kern || unconfined(label)))
+ error = af_select(family,
+ create_perm(label, family, type, protocol),
+ aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
+ family, type, protocol));
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+/**
+ * apparmor_socket_post_create - setup the per-socket security struct
+ *
+ * Note:
+ * - kernel sockets currently labeled unconfined but we may want to
+ * move to a special kernel label
+ * - socket may not have sk here if created with sock_create_lite or
+ * sock_alloc. These should be accept cases which will be handled in
+ * sock_graft.
+ */
+static int apparmor_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ struct aa_label *label;
+
+ if (kern) {
+ label = aa_get_label(kernel_t);
+ } else
+ label = aa_get_current_label();
+
+ if (sock->sk) {
+ struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
+
+ aa_put_label(ctx->label);
+ ctx->label = aa_get_label(label);
+ }
+ aa_put_label(label);
+
+ return 0;
+}
+
+/**
+ * apparmor_socket_bind - check perms before bind addr to socket
+ */
+static int apparmor_socket_bind(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!address);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ bind_perm(sock, address, addrlen),
+ aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
+}
+
+/**
+ * apparmor_socket_connect - check perms before connecting @sock to @address
+ */
+static int apparmor_socket_connect(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!address);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ connect_perm(sock, address, addrlen),
+ aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
+}
+
+/**
+ * apparmor_socket_listen - check perms before allowing listen
+ */
+static int apparmor_socket_listen(struct socket *sock, int backlog)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ listen_perm(sock, backlog),
+ aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
+}
+
+/**
+ * apparmor_socket_accept - check perms before accepting a new connection.
+ *
+ * Note: while @newsock is created and has some information, the accept
+ * has not been done.
+ */
+static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!newsock);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ accept_perm(sock, newsock),
+ aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk));
+}
+
+static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!msg);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ msg_perm(op, request, sock, msg, size),
+ aa_sk_perm(op, request, sock->sk));
+}
+
+/**
+ * apparmor_socket_sendmsg - check perms before sending msg to another socket
+ */
+static int apparmor_socket_sendmsg(struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
+}
+
+/**
+ * apparmor_socket_recvmsg - check perms before receiving a message
+ */
+static int apparmor_socket_recvmsg(struct socket *sock,
+ struct msghdr *msg, int size, int flags)
+{
+ return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
+}
+
+/* revaliation, get/set attr, shutdown */
+static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ sock_perm(op, request, sock),
+ aa_sk_perm(op, request, sock->sk));
+}
+
+/**
+ * apparmor_socket_getsockname - check perms before getting the local address
+ */
+static int apparmor_socket_getsockname(struct socket *sock)
+{
+ return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
+}
+
+/**
+ * apparmor_socket_getpeername - check perms before getting remote address
+ */
+static int apparmor_socket_getpeername(struct socket *sock)
+{
+ return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
+}
+
+/* revaliation, get/set attr, opt */
+static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
+ int level, int optname)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ opt_perm(op, request, sock, level, optname),
+ aa_sk_perm(op, request, sock->sk));
+}
+
+/**
+ * apparmor_socket_getsockopt - check perms before getting socket options
+ */
+static int apparmor_socket_getsockopt(struct socket *sock, int level,
+ int optname)
+{
+ return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
+ level, optname);
+}
+
+/**
+ * apparmor_socket_setsockopt - check perms before setting socket options
+ */
+static int apparmor_socket_setsockopt(struct socket *sock, int level,
+ int optname)
+{
+ return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
+ level, optname);
+}
+
+/**
+ * apparmor_socket_shutdown - check perms before shutting down @sock conn
+ */
+static int apparmor_socket_shutdown(struct socket *sock, int how)
+{
+ return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
+}
+
+#ifdef CONFIG_NETWORK_SECMARK
+/**
+ * apparmor_socket_sock_rcv_skb - check perms before associating skb to sk
+ *
+ * Note: can not sleep may be called with locks held
+ *
+ * dont want protocol specific in __skb_recv_datagram()
+ * to deny an incoming connection socket_sock_rcv_skb()
+ */
+static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (!skb->secmark)
+ return 0;
+
+ return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
+ skb->secmark, sk);
+}
+#endif
+
+
+static struct aa_label *sk_peer_label(struct sock *sk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (ctx->peer)
+ return ctx->peer;
+
+ return ERR_PTR(-ENOPROTOOPT);
+}
+
+/**
+ * apparmor_socket_getpeersec_stream - get security context of peer
+ *
+ * Note: for tcp only valid if using ipsec or cipso on lan
+ */
+static int apparmor_socket_getpeersec_stream(struct socket *sock,
+ char __user *optval,
+ int __user *optlen,
+ unsigned int len)
+{
+ char *name;
+ int slen, error = 0;
+ struct aa_label *label;
+ struct aa_label *peer;
+
+ label = begin_current_label_crit_section();
+ peer = sk_peer_label(sock->sk);
+ if (IS_ERR(peer)) {
+ error = PTR_ERR(peer);
+ goto done;
+ }
+ slen = aa_label_asxprint(&name, labels_ns(label), peer,
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
+ FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
+ /* don't include terminating \0 in slen, it breaks some apps */
+ if (slen < 0) {
+ error = -ENOMEM;
+ } else {
+ if (slen > len) {
+ error = -ERANGE;
+ } else if (copy_to_user(optval, name, slen)) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (put_user(slen, optlen))
+ error = -EFAULT;
+out:
+ kfree(name);
+
+ }
+
+done:
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+/**
+ * apparmor_socket_getpeersec_dgram - get security label of packet
+ * @sock: the peer socket
+ * @skb: packet data
+ * @secid: pointer to where to put the secid of the packet
+ *
+ * Sets the netlabel socket state on sk from parent
+ */
+static int apparmor_socket_getpeersec_dgram(struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+
+{
+ /* TODO: requires secid support */
+ return -ENOPROTOOPT;
+}
+
+/**
+ * apparmor_sock_graft - Initialize newly created socket
+ * @sk: child sock
+ * @parent: parent socket
+ *
+ * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
+ * just set sk security information off of current creating process label
+ * Labeling of sk for accept case - probably should be sock based
+ * instead of task, because of the case where an implicitly labeled
+ * socket is shared by different tasks.
+ */
+static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (!ctx->label)
+ ctx->label = aa_get_current_label();
+}
+
+#ifdef CONFIG_NETWORK_SECMARK
+static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (!skb->secmark)
+ return 0;
+
+ return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT,
+ skb->secmark, sk);
+}
+#endif
+
+/*
+ * The cred blob is a pointer to, not an instance of, an aa_label.
+ */
+struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct aa_label *),
+ .lbs_file = sizeof(struct aa_file_ctx),
+ .lbs_task = sizeof(struct aa_task_ctx),
+};
+
+static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
+ LSM_HOOK_INIT(capget, apparmor_capget),
+ LSM_HOOK_INIT(capable, apparmor_capable),
+
+ LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
+ LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
+ LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
+
+ LSM_HOOK_INIT(path_link, apparmor_path_link),
+ LSM_HOOK_INIT(path_unlink, apparmor_path_unlink),
+ LSM_HOOK_INIT(path_symlink, apparmor_path_symlink),
+ LSM_HOOK_INIT(path_mkdir, apparmor_path_mkdir),
+ LSM_HOOK_INIT(path_rmdir, apparmor_path_rmdir),
+ LSM_HOOK_INIT(path_mknod, apparmor_path_mknod),
+ LSM_HOOK_INIT(path_rename, apparmor_path_rename),
+ LSM_HOOK_INIT(path_chmod, apparmor_path_chmod),
+ LSM_HOOK_INIT(path_chown, apparmor_path_chown),
+ LSM_HOOK_INIT(path_truncate, apparmor_path_truncate),
+ LSM_HOOK_INIT(inode_getattr, apparmor_inode_getattr),
+
+ LSM_HOOK_INIT(file_open, apparmor_file_open),
+ LSM_HOOK_INIT(file_receive, apparmor_file_receive),
+ LSM_HOOK_INIT(file_permission, apparmor_file_permission),
+ LSM_HOOK_INIT(file_alloc_security, apparmor_file_alloc_security),
+ LSM_HOOK_INIT(file_free_security, apparmor_file_free_security),
+ LSM_HOOK_INIT(mmap_file, apparmor_mmap_file),
+ LSM_HOOK_INIT(file_mprotect, apparmor_file_mprotect),
+ LSM_HOOK_INIT(file_lock, apparmor_file_lock),
+
+ LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
+ LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
+
+ LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
+ LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
+ LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
+
+ LSM_HOOK_INIT(socket_create, apparmor_socket_create),
+ LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
+ LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
+ LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
+ LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
+ LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
+ LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
+ LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
+ LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
+ LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
+ LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
+ LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
+ LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
+#ifdef CONFIG_NETWORK_SECMARK
+ LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
+#endif
+ LSM_HOOK_INIT(socket_getpeersec_stream,
+ apparmor_socket_getpeersec_stream),
+ LSM_HOOK_INIT(socket_getpeersec_dgram,
+ apparmor_socket_getpeersec_dgram),
+ LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
+#ifdef CONFIG_NETWORK_SECMARK
+ LSM_HOOK_INIT(inet_conn_request, apparmor_inet_conn_request),
+#endif
+
+ LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
+ LSM_HOOK_INIT(cred_free, apparmor_cred_free),
+ LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
+ LSM_HOOK_INIT(cred_transfer, apparmor_cred_transfer),
+
+ LSM_HOOK_INIT(bprm_creds_for_exec, apparmor_bprm_creds_for_exec),
+ LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds),
+ LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
+
+ LSM_HOOK_INIT(task_free, apparmor_task_free),
+ LSM_HOOK_INIT(task_alloc, apparmor_task_alloc),
+ LSM_HOOK_INIT(current_getsecid_subj, apparmor_current_getsecid_subj),
+ LSM_HOOK_INIT(task_getsecid_obj, apparmor_task_getsecid_obj),
+ LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
+ LSM_HOOK_INIT(task_kill, apparmor_task_kill),
+
+#ifdef CONFIG_AUDIT
+ LSM_HOOK_INIT(audit_rule_init, aa_audit_rule_init),
+ LSM_HOOK_INIT(audit_rule_known, aa_audit_rule_known),
+ LSM_HOOK_INIT(audit_rule_match, aa_audit_rule_match),
+ LSM_HOOK_INIT(audit_rule_free, aa_audit_rule_free),
+#endif
+
+ LSM_HOOK_INIT(secid_to_secctx, apparmor_secid_to_secctx),
+ LSM_HOOK_INIT(secctx_to_secid, apparmor_secctx_to_secid),
+ LSM_HOOK_INIT(release_secctx, apparmor_release_secctx),
+};
+
+/*
+ * AppArmor sysfs module parameters
+ */
+
+static int param_set_aabool(const char *val, const struct kernel_param *kp);
+static int param_get_aabool(char *buffer, const struct kernel_param *kp);
+#define param_check_aabool param_check_bool
+static const struct kernel_param_ops param_ops_aabool = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = param_set_aabool,
+ .get = param_get_aabool
+};
+
+static int param_set_aauint(const char *val, const struct kernel_param *kp);
+static int param_get_aauint(char *buffer, const struct kernel_param *kp);
+#define param_check_aauint param_check_uint
+static const struct kernel_param_ops param_ops_aauint = {
+ .set = param_set_aauint,
+ .get = param_get_aauint
+};
+
+static int param_set_aacompressionlevel(const char *val,
+ const struct kernel_param *kp);
+static int param_get_aacompressionlevel(char *buffer,
+ const struct kernel_param *kp);
+#define param_check_aacompressionlevel param_check_int
+static const struct kernel_param_ops param_ops_aacompressionlevel = {
+ .set = param_set_aacompressionlevel,
+ .get = param_get_aacompressionlevel
+};
+
+static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
+static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
+#define param_check_aalockpolicy param_check_bool
+static const struct kernel_param_ops param_ops_aalockpolicy = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = param_set_aalockpolicy,
+ .get = param_get_aalockpolicy
+};
+
+static int param_set_audit(const char *val, const struct kernel_param *kp);
+static int param_get_audit(char *buffer, const struct kernel_param *kp);
+
+static int param_set_mode(const char *val, const struct kernel_param *kp);
+static int param_get_mode(char *buffer, const struct kernel_param *kp);
+
+/* Flag values, also controllable via /sys/module/apparmor/parameters
+ * We define special types as we want to do additional mediation.
+ */
+
+/* AppArmor global enforcement switch - complain, enforce, kill */
+enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
+module_param_call(mode, param_set_mode, param_get_mode,
+ &aa_g_profile_mode, S_IRUSR | S_IWUSR);
+
+/* whether policy verification hashing is enabled */
+bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
+module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
+#endif
+
+/* whether policy exactly as loaded is retained for debug and checkpointing */
+bool aa_g_export_binary = IS_ENABLED(CONFIG_SECURITY_APPARMOR_EXPORT_BINARY);
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+module_param_named(export_binary, aa_g_export_binary, aabool, 0600);
+#endif
+
+/* policy loaddata compression level */
+int aa_g_rawdata_compression_level = Z_DEFAULT_COMPRESSION;
+module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
+ aacompressionlevel, 0400);
+
+/* Debug mode */
+bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES);
+module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
+
+/* Audit mode */
+enum audit_mode aa_g_audit;
+module_param_call(audit, param_set_audit, param_get_audit,
+ &aa_g_audit, S_IRUSR | S_IWUSR);
+
+/* Determines if audit header is included in audited messages. This
+ * provides more context if the audit daemon is not running
+ */
+bool aa_g_audit_header = true;
+module_param_named(audit_header, aa_g_audit_header, aabool,
+ S_IRUSR | S_IWUSR);
+
+/* lock out loading/removal of policy
+ * TODO: add in at boot loading of policy, which is the only way to
+ * load policy, if lock_policy is set
+ */
+bool aa_g_lock_policy;
+module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
+ S_IRUSR | S_IWUSR);
+
+/* Syscall logging mode */
+bool aa_g_logsyscall;
+module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
+
+/* Maximum pathname length before accesses will start getting rejected */
+unsigned int aa_g_path_max = 2 * PATH_MAX;
+module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
+
+/* Determines how paranoid loading of policy is and how much verification
+ * on the loaded policy is done.
+ * DEPRECATED: read only as strict checking of load is always done now
+ * that none root users (user namespaces) can load policy.
+ */
+bool aa_g_paranoid_load = IS_ENABLED(CONFIG_SECURITY_APPARMOR_PARANOID_LOAD);
+module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
+
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
+#define param_check_aaintbool param_check_int
+static const struct kernel_param_ops param_ops_aaintbool = {
+ .set = param_set_aaintbool,
+ .get = param_get_aaintbool
+};
+/* Boot time disable flag */
+static int apparmor_enabled __lsm_ro_after_init = 1;
+module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
+
+static int __init apparmor_enabled_setup(char *str)
+{
+ unsigned long enabled;
+ int error = kstrtoul(str, 0, &enabled);
+ if (!error)
+ apparmor_enabled = enabled ? 1 : 0;
+ return 1;
+}
+
+__setup("apparmor=", apparmor_enabled_setup);
+
+/* set global flag turning off the ability to load policy */
+static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
+ return -EPERM;
+ return param_set_bool(val, kp);
+}
+
+static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+ return param_get_bool(buffer, kp);
+}
+
+static int param_set_aabool(const char *val, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
+ return -EPERM;
+ return param_set_bool(val, kp);
+}
+
+static int param_get_aabool(char *buffer, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+ return param_get_bool(buffer, kp);
+}
+
+static int param_set_aauint(const char *val, const struct kernel_param *kp)
+{
+ int error;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ /* file is ro but enforce 2nd line check */
+ if (apparmor_initialized)
+ return -EPERM;
+
+ error = param_set_uint(val, kp);
+ aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer));
+ pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
+
+ return error;
+}
+
+static int param_get_aauint(char *buffer, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+ return param_get_uint(buffer, kp);
+}
+
+/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
+{
+ struct kernel_param kp_local;
+ bool value;
+ int error;
+
+ if (apparmor_initialized)
+ return -EPERM;
+
+ /* Create local copy, with arg pointing to bool type. */
+ value = !!*((int *)kp->arg);
+ memcpy(&kp_local, kp, sizeof(kp_local));
+ kp_local.arg = &value;
+
+ error = param_set_bool(val, &kp_local);
+ if (!error)
+ *((int *)kp->arg) = *((bool *)kp_local.arg);
+ return error;
+}
+
+/*
+ * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
+ * 1/0, this converts the "int that is actually bool" back to bool for
+ * display in the /sys filesystem, while keeping it "int" for the LSM
+ * infrastructure.
+ */
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
+{
+ struct kernel_param kp_local;
+ bool value;
+
+ /* Create local copy, with arg pointing to bool type. */
+ value = !!*((int *)kp->arg);
+ memcpy(&kp_local, kp, sizeof(kp_local));
+ kp_local.arg = &value;
+
+ return param_get_bool(buffer, &kp_local);
+}
+
+static int param_set_aacompressionlevel(const char *val,
+ const struct kernel_param *kp)
+{
+ int error;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized)
+ return -EPERM;
+
+ error = param_set_int(val, kp);
+
+ aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
+ Z_NO_COMPRESSION,
+ Z_BEST_COMPRESSION);
+ pr_info("AppArmor: policy rawdata compression level set to %u\n",
+ aa_g_rawdata_compression_level);
+
+ return error;
+}
+
+static int param_get_aacompressionlevel(char *buffer,
+ const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+ return param_get_int(buffer, kp);
+}
+
+static int param_get_audit(char *buffer, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+ return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
+}
+
+static int param_set_audit(const char *val, const struct kernel_param *kp)
+{
+ int i;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (!val)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
+ return -EPERM;
+
+ i = match_string(audit_mode_names, AUDIT_MAX_INDEX, val);
+ if (i < 0)
+ return -EINVAL;
+
+ aa_g_audit = i;
+ return 0;
+}
+
+static int param_get_mode(char *buffer, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+
+ return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
+}
+
+static int param_set_mode(const char *val, const struct kernel_param *kp)
+{
+ int i;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (!val)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
+ return -EPERM;
+
+ i = match_string(aa_profile_mode_names, APPARMOR_MODE_NAMES_MAX_INDEX,
+ val);
+ if (i < 0)
+ return -EINVAL;
+
+ aa_g_profile_mode = i;
+ return 0;
+}
+
+char *aa_get_buffer(bool in_atomic)
+{
+ union aa_buffer *aa_buf;
+ bool try_again = true;
+ gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
+retry:
+ spin_lock(&aa_buffers_lock);
+ if (buffer_count > reserve_count ||
+ (in_atomic && !list_empty(&aa_global_buffers))) {
+ aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
+ list);
+ list_del(&aa_buf->list);
+ buffer_count--;
+ spin_unlock(&aa_buffers_lock);
+ return &aa_buf->buffer[0];
+ }
+ if (in_atomic) {
+ /*
+ * out of reserve buffers and in atomic context so increase
+ * how many buffers to keep in reserve
+ */
+ reserve_count++;
+ flags = GFP_ATOMIC;
+ }
+ spin_unlock(&aa_buffers_lock);
+
+ if (!in_atomic)
+ might_sleep();
+ aa_buf = kmalloc(aa_g_path_max, flags);
+ if (!aa_buf) {
+ if (try_again) {
+ try_again = false;
+ goto retry;
+ }
+ pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
+ return NULL;
+ }
+ return &aa_buf->buffer[0];
+}
+
+void aa_put_buffer(char *buf)
+{
+ union aa_buffer *aa_buf;
+
+ if (!buf)
+ return;
+ aa_buf = container_of(buf, union aa_buffer, buffer[0]);
+
+ spin_lock(&aa_buffers_lock);
+ list_add(&aa_buf->list, &aa_global_buffers);
+ buffer_count++;
+ spin_unlock(&aa_buffers_lock);
+}
+
+/*
+ * AppArmor init functions
+ */
+
+/**
+ * set_init_ctx - set a task context and profile on the first task.
+ *
+ * TODO: allow setting an alternate profile than unconfined
+ */
+static int __init set_init_ctx(void)
+{
+ struct cred *cred = (__force struct cred *)current->real_cred;
+
+ set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
+
+ return 0;
+}
+
+static void destroy_buffers(void)
+{
+ union aa_buffer *aa_buf;
+
+ spin_lock(&aa_buffers_lock);
+ while (!list_empty(&aa_global_buffers)) {
+ aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
+ list);
+ list_del(&aa_buf->list);
+ spin_unlock(&aa_buffers_lock);
+ kfree(aa_buf);
+ spin_lock(&aa_buffers_lock);
+ }
+ spin_unlock(&aa_buffers_lock);
+}
+
+static int __init alloc_buffers(void)
+{
+ union aa_buffer *aa_buf;
+ int i, num;
+
+ /*
+ * A function may require two buffers at once. Usually the buffers are
+ * used for a short period of time and are shared. On UP kernel buffers
+ * two should be enough, with more CPUs it is possible that more
+ * buffers will be used simultaneously. The preallocated pool may grow.
+ * This preallocation has also the side-effect that AppArmor will be
+ * disabled early at boot if aa_g_path_max is extremly high.
+ */
+ if (num_online_cpus() > 1)
+ num = 4 + RESERVE_COUNT;
+ else
+ num = 2 + RESERVE_COUNT;
+
+ for (i = 0; i < num; i++) {
+
+ aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!aa_buf) {
+ destroy_buffers();
+ return -ENOMEM;
+ }
+ aa_put_buffer(&aa_buf->buffer[0]);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SYSCTL
+static int apparmor_dointvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ if (!aa_current_policy_admin_capable(NULL))
+ return -EPERM;
+ if (!apparmor_enabled)
+ return -EINVAL;
+
+ return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+
+static struct ctl_path apparmor_sysctl_path[] = {
+ { .procname = "kernel", },
+ { }
+};
+
+static struct ctl_table apparmor_sysctl_table[] = {
+ {
+ .procname = "unprivileged_userns_apparmor_policy",
+ .data = &unprivileged_userns_apparmor_policy,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = apparmor_dointvec,
+ },
+ {
+ .procname = "apparmor_display_secid_mode",
+ .data = &apparmor_display_secid_mode,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = apparmor_dointvec,
+ },
+
+ { }
+};
+
+static int __init apparmor_init_sysctl(void)
+{
+ return register_sysctl_paths(apparmor_sysctl_path,
+ apparmor_sysctl_table) ? 0 : -ENOMEM;
+}
+#else
+static inline int apparmor_init_sysctl(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SYSCTL */
+
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK)
+static unsigned int apparmor_ip_postroute(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct aa_sk_ctx *ctx;
+ struct sock *sk;
+
+ if (!skb->secmark)
+ return NF_ACCEPT;
+
+ sk = skb_to_full_sk(skb);
+ if (sk == NULL)
+ return NF_ACCEPT;
+
+ ctx = SK_CTX(sk);
+ if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND,
+ skb->secmark, sk))
+ return NF_ACCEPT;
+
+ return NF_DROP_ERR(-ECONNREFUSED);
+
+}
+
+static const struct nf_hook_ops apparmor_nf_ops[] = {
+ {
+ .hook = apparmor_ip_postroute,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_SELINUX_FIRST,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .hook = apparmor_ip_postroute,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_SELINUX_FIRST,
+ },
+#endif
+};
+
+static int __net_init apparmor_nf_register(struct net *net)
+{
+ return nf_register_net_hooks(net, apparmor_nf_ops,
+ ARRAY_SIZE(apparmor_nf_ops));
+}
+
+static void __net_exit apparmor_nf_unregister(struct net *net)
+{
+ nf_unregister_net_hooks(net, apparmor_nf_ops,
+ ARRAY_SIZE(apparmor_nf_ops));
+}
+
+static struct pernet_operations apparmor_net_ops = {
+ .init = apparmor_nf_register,
+ .exit = apparmor_nf_unregister,
+};
+
+static int __init apparmor_nf_ip_init(void)
+{
+ int err;
+
+ if (!apparmor_enabled)
+ return 0;
+
+ err = register_pernet_subsys(&apparmor_net_ops);
+ if (err)
+ panic("Apparmor: register_pernet_subsys: error %d\n", err);
+
+ return 0;
+}
+__initcall(apparmor_nf_ip_init);
+#endif
+
+static int __init apparmor_init(void)
+{
+ int error;
+
+ error = aa_setup_dfa_engine();
+ if (error) {
+ AA_ERROR("Unable to setup dfa engine\n");
+ goto alloc_out;
+ }
+
+ error = aa_alloc_root_ns();
+ if (error) {
+ AA_ERROR("Unable to allocate default profile namespace\n");
+ goto alloc_out;
+ }
+
+ error = apparmor_init_sysctl();
+ if (error) {
+ AA_ERROR("Unable to register sysctls\n");
+ goto alloc_out;
+
+ }
+
+ error = alloc_buffers();
+ if (error) {
+ AA_ERROR("Unable to allocate work buffers\n");
+ goto alloc_out;
+ }
+
+ error = set_init_ctx();
+ if (error) {
+ AA_ERROR("Failed to set context on init task\n");
+ aa_free_root_ns();
+ goto buffers_out;
+ }
+ security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks),
+ "apparmor");
+
+ /* Report that AppArmor successfully initialized */
+ apparmor_initialized = 1;
+ if (aa_g_profile_mode == APPARMOR_COMPLAIN)
+ aa_info_message("AppArmor initialized: complain mode enabled");
+ else if (aa_g_profile_mode == APPARMOR_KILL)
+ aa_info_message("AppArmor initialized: kill mode enabled");
+ else
+ aa_info_message("AppArmor initialized");
+
+ return error;
+
+buffers_out:
+ destroy_buffers();
+alloc_out:
+ aa_destroy_aafs();
+ aa_teardown_dfa_engine();
+
+ apparmor_enabled = false;
+ return error;
+}
+
+DEFINE_LSM(apparmor) = {
+ .name = "apparmor",
+ .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
+ .enabled = &apparmor_enabled,
+ .blobs = &apparmor_blob_sizes,
+ .init = apparmor_init,
+};
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
new file mode 100644
index 000000000..3e9e1eaf9
--- /dev/null
+++ b/security/apparmor/match.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor dfa based regular expression matching engine
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2012 Canonical Ltd.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+
+#include "include/lib.h"
+#include "include/match.h"
+
+#define base_idx(X) ((X) & 0xffffff)
+
+static char nulldfa_src[] = {
+ #include "nulldfa.in"
+};
+struct aa_dfa *nulldfa;
+
+static char stacksplitdfa_src[] = {
+ #include "stacksplitdfa.in"
+};
+struct aa_dfa *stacksplitdfa;
+
+int aa_setup_dfa_engine(void)
+{
+ int error;
+
+ nulldfa = aa_dfa_unpack(nulldfa_src, sizeof(nulldfa_src),
+ TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32));
+ if (IS_ERR(nulldfa)) {
+ error = PTR_ERR(nulldfa);
+ nulldfa = NULL;
+ return error;
+ }
+
+ stacksplitdfa = aa_dfa_unpack(stacksplitdfa_src,
+ sizeof(stacksplitdfa_src),
+ TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32));
+ if (IS_ERR(stacksplitdfa)) {
+ aa_put_dfa(nulldfa);
+ nulldfa = NULL;
+ error = PTR_ERR(stacksplitdfa);
+ stacksplitdfa = NULL;
+ return error;
+ }
+
+ return 0;
+}
+
+void aa_teardown_dfa_engine(void)
+{
+ aa_put_dfa(stacksplitdfa);
+ aa_put_dfa(nulldfa);
+}
+
+/**
+ * unpack_table - unpack a dfa table (one of accept, default, base, next check)
+ * @blob: data to unpack (NOT NULL)
+ * @bsize: size of blob
+ *
+ * Returns: pointer to table else NULL on failure
+ *
+ * NOTE: must be freed by kvfree (not kfree)
+ */
+static struct table_header *unpack_table(char *blob, size_t bsize)
+{
+ struct table_header *table = NULL;
+ struct table_header th;
+ size_t tsize;
+
+ if (bsize < sizeof(struct table_header))
+ goto out;
+
+ /* loaded td_id's start at 1, subtract 1 now to avoid doing
+ * it every time we use td_id as an index
+ */
+ th.td_id = be16_to_cpu(*(__be16 *) (blob)) - 1;
+ if (th.td_id > YYTD_ID_MAX)
+ goto out;
+ th.td_flags = be16_to_cpu(*(__be16 *) (blob + 2));
+ th.td_lolen = be32_to_cpu(*(__be32 *) (blob + 8));
+ blob += sizeof(struct table_header);
+
+ if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
+ th.td_flags == YYTD_DATA8))
+ goto out;
+
+ /* if we have a table it must have some entries */
+ if (th.td_lolen == 0)
+ goto out;
+ tsize = table_size(th.td_lolen, th.td_flags);
+ if (bsize < tsize)
+ goto out;
+
+ table = kvzalloc(tsize, GFP_KERNEL);
+ if (table) {
+ table->td_id = th.td_id;
+ table->td_flags = th.td_flags;
+ table->td_lolen = th.td_lolen;
+ if (th.td_flags == YYTD_DATA8)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u8, u8, byte_to_byte);
+ else if (th.td_flags == YYTD_DATA16)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u16, __be16, be16_to_cpu);
+ else if (th.td_flags == YYTD_DATA32)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u32, __be32, be32_to_cpu);
+ else
+ goto fail;
+ /* if table was vmalloced make sure the page tables are synced
+ * before it is used, as it goes live to all cpus.
+ */
+ if (is_vmalloc_addr(table))
+ vm_unmap_aliases();
+ }
+
+out:
+ return table;
+fail:
+ kvfree(table);
+ return NULL;
+}
+
+/**
+ * verify_table_headers - verify that the tables headers are as expected
+ * @tables - array of dfa tables to check (NOT NULL)
+ * @flags: flags controlling what type of accept table are acceptable
+ *
+ * Assumes dfa has gone through the first pass verification done by unpacking
+ * NOTE: this does not valid accept table values
+ *
+ * Returns: %0 else error code on failure to verify
+ */
+static int verify_table_headers(struct table_header **tables, int flags)
+{
+ size_t state_count, trans_count;
+ int error = -EPROTO;
+
+ /* check that required tables exist */
+ if (!(tables[YYTD_ID_DEF] && tables[YYTD_ID_BASE] &&
+ tables[YYTD_ID_NXT] && tables[YYTD_ID_CHK]))
+ goto out;
+
+ /* accept.size == default.size == base.size */
+ state_count = tables[YYTD_ID_BASE]->td_lolen;
+ if (ACCEPT1_FLAGS(flags)) {
+ if (!tables[YYTD_ID_ACCEPT])
+ goto out;
+ if (state_count != tables[YYTD_ID_ACCEPT]->td_lolen)
+ goto out;
+ }
+ if (ACCEPT2_FLAGS(flags)) {
+ if (!tables[YYTD_ID_ACCEPT2])
+ goto out;
+ if (state_count != tables[YYTD_ID_ACCEPT2]->td_lolen)
+ goto out;
+ }
+ if (state_count != tables[YYTD_ID_DEF]->td_lolen)
+ goto out;
+
+ /* next.size == chk.size */
+ trans_count = tables[YYTD_ID_NXT]->td_lolen;
+ if (trans_count != tables[YYTD_ID_CHK]->td_lolen)
+ goto out;
+
+ /* if equivalence classes then its table size must be 256 */
+ if (tables[YYTD_ID_EC] && tables[YYTD_ID_EC]->td_lolen != 256)
+ goto out;
+
+ error = 0;
+out:
+ return error;
+}
+
+/**
+ * verify_dfa - verify that transitions and states in the tables are in bounds.
+ * @dfa: dfa to test (NOT NULL)
+ *
+ * Assumes dfa has gone through the first pass verification done by unpacking
+ * NOTE: this does not valid accept table values
+ *
+ * Returns: %0 else error code on failure to verify
+ */
+static int verify_dfa(struct aa_dfa *dfa)
+{
+ size_t i, state_count, trans_count;
+ int error = -EPROTO;
+
+ state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
+ trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
+ if (state_count == 0)
+ goto out;
+ for (i = 0; i < state_count; i++) {
+ if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) &&
+ (DEFAULT_TABLE(dfa)[i] >= state_count))
+ goto out;
+ if (BASE_TABLE(dfa)[i] & MATCH_FLAGS_INVALID) {
+ pr_err("AppArmor DFA state with invalid match flags");
+ goto out;
+ }
+ if ((BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE)) {
+ if (!(dfa->flags & YYTH_FLAG_DIFF_ENCODE)) {
+ pr_err("AppArmor DFA diff encoded transition state without header flag");
+ goto out;
+ }
+ }
+ if ((BASE_TABLE(dfa)[i] & MATCH_FLAG_OOB_TRANSITION)) {
+ if (base_idx(BASE_TABLE(dfa)[i]) < dfa->max_oob) {
+ pr_err("AppArmor DFA out of bad transition out of range");
+ goto out;
+ }
+ if (!(dfa->flags & YYTH_FLAG_OOB_TRANS)) {
+ pr_err("AppArmor DFA out of bad transition state without header flag");
+ goto out;
+ }
+ }
+ if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
+ pr_err("AppArmor DFA next/check upper bounds error\n");
+ goto out;
+ }
+ }
+
+ for (i = 0; i < trans_count; i++) {
+ if (NEXT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ if (CHECK_TABLE(dfa)[i] >= state_count)
+ goto out;
+ }
+
+ /* Now that all the other tables are verified, verify diffencoding */
+ for (i = 0; i < state_count; i++) {
+ size_t j, k;
+
+ for (j = i;
+ (BASE_TABLE(dfa)[j] & MATCH_FLAG_DIFF_ENCODE) &&
+ !(BASE_TABLE(dfa)[j] & MARK_DIFF_ENCODE);
+ j = k) {
+ k = DEFAULT_TABLE(dfa)[j];
+ if (j == k)
+ goto out;
+ if (k < j)
+ break; /* already verified */
+ BASE_TABLE(dfa)[j] |= MARK_DIFF_ENCODE;
+ }
+ }
+ error = 0;
+
+out:
+ return error;
+}
+
+/**
+ * dfa_free - free a dfa allocated by aa_dfa_unpack
+ * @dfa: the dfa to free (MAYBE NULL)
+ *
+ * Requires: reference count to dfa == 0
+ */
+static void dfa_free(struct aa_dfa *dfa)
+{
+ if (dfa) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
+ kvfree(dfa->tables[i]);
+ dfa->tables[i] = NULL;
+ }
+ kfree(dfa);
+ }
+}
+
+/**
+ * aa_dfa_free_kref - free aa_dfa by kref (called by aa_put_dfa)
+ * @kr: kref callback for freeing of a dfa (NOT NULL)
+ */
+void aa_dfa_free_kref(struct kref *kref)
+{
+ struct aa_dfa *dfa = container_of(kref, struct aa_dfa, count);
+ dfa_free(dfa);
+}
+
+/**
+ * aa_dfa_unpack - unpack the binary tables of a serialized dfa
+ * @blob: aligned serialized stream of data to unpack (NOT NULL)
+ * @size: size of data to unpack
+ * @flags: flags controlling what type of accept tables are acceptable
+ *
+ * Unpack a dfa that has been serialized. To find information on the dfa
+ * format look in Documentation/admin-guide/LSM/apparmor.rst
+ * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
+ *
+ * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
+ */
+struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
+{
+ int hsize;
+ int error = -ENOMEM;
+ char *data = blob;
+ struct table_header *table = NULL;
+ struct aa_dfa *dfa = kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
+ if (!dfa)
+ goto fail;
+
+ kref_init(&dfa->count);
+
+ error = -EPROTO;
+
+ /* get dfa table set header */
+ if (size < sizeof(struct table_set_header))
+ goto fail;
+
+ if (ntohl(*(__be32 *) data) != YYTH_MAGIC)
+ goto fail;
+
+ hsize = ntohl(*(__be32 *) (data + 4));
+ if (size < hsize)
+ goto fail;
+
+ dfa->flags = ntohs(*(__be16 *) (data + 12));
+ if (dfa->flags & ~(YYTH_FLAGS))
+ goto fail;
+
+ /*
+ * TODO: needed for dfa to support more than 1 oob
+ * if (dfa->flags & YYTH_FLAGS_OOB_TRANS) {
+ * if (hsize < 16 + 4)
+ * goto fail;
+ * dfa->max_oob = ntol(*(__be32 *) (data + 16));
+ * if (dfa->max <= MAX_OOB_SUPPORTED) {
+ * pr_err("AppArmor DFA OOB greater than supported\n");
+ * goto fail;
+ * }
+ * }
+ */
+ dfa->max_oob = 1;
+
+ data += hsize;
+ size -= hsize;
+
+ while (size > 0) {
+ table = unpack_table(data, size);
+ if (!table)
+ goto fail;
+
+ switch (table->td_id) {
+ case YYTD_ID_ACCEPT:
+ if (!(table->td_flags & ACCEPT1_FLAGS(flags)))
+ goto fail;
+ break;
+ case YYTD_ID_ACCEPT2:
+ if (!(table->td_flags & ACCEPT2_FLAGS(flags)))
+ goto fail;
+ break;
+ case YYTD_ID_BASE:
+ if (table->td_flags != YYTD_DATA32)
+ goto fail;
+ break;
+ case YYTD_ID_DEF:
+ case YYTD_ID_NXT:
+ case YYTD_ID_CHK:
+ if (table->td_flags != YYTD_DATA16)
+ goto fail;
+ break;
+ case YYTD_ID_EC:
+ if (table->td_flags != YYTD_DATA8)
+ goto fail;
+ break;
+ default:
+ goto fail;
+ }
+ /* check for duplicate table entry */
+ if (dfa->tables[table->td_id])
+ goto fail;
+ dfa->tables[table->td_id] = table;
+ data += table_size(table->td_lolen, table->td_flags);
+ size -= table_size(table->td_lolen, table->td_flags);
+ table = NULL;
+ }
+ error = verify_table_headers(dfa->tables, flags);
+ if (error)
+ goto fail;
+
+ if (flags & DFA_FLAG_VERIFY_STATES) {
+ error = verify_dfa(dfa);
+ if (error)
+ goto fail;
+ }
+
+ return dfa;
+
+fail:
+ kvfree(table);
+ dfa_free(dfa);
+ return ERR_PTR(error);
+}
+
+#define match_char(state, def, base, next, check, C) \
+do { \
+ u32 b = (base)[(state)]; \
+ unsigned int pos = base_idx(b) + (C); \
+ if ((check)[pos] != (state)) { \
+ (state) = (def)[(state)]; \
+ if (b & MATCH_FLAG_DIFF_ENCODE) \
+ continue; \
+ break; \
+ } \
+ (state) = (next)[pos]; \
+ break; \
+} while (1)
+
+/**
+ * aa_dfa_match_len - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the string of bytes to match against the dfa (NOT NULL)
+ * @len: length of the string of bytes to match
+ *
+ * aa_dfa_match_len will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * This function will happily match again the 0 byte and only finishes
+ * when @len input is consumed.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int len)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ for (; len; len--)
+ match_char(state, def, base, next, check,
+ equiv[(u8) *str++]);
+ } else {
+ /* default is direct to next state */
+ for (; len; len--)
+ match_char(state, def, base, next, check, (u8) *str++);
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_match - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+ const char *str)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ while (*str)
+ match_char(state, def, base, next, check,
+ equiv[(u8) *str++]);
+ } else {
+ /* default is direct to next state */
+ while (*str)
+ match_char(state, def, base, next, check, (u8) *str++);
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_next - step one character to the next state in the dfa
+ * @dfa: the dfa to traverse (NOT NULL)
+ * @state: the state to start in
+ * @c: the input character to transition on
+ *
+ * aa_dfa_match will step through the dfa by one input character @c
+ *
+ * Returns: state reach after input @c
+ */
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ match_char(state, def, base, next, check, equiv[(u8) c]);
+ } else
+ match_char(state, def, base, next, check, (u8) c);
+
+ return state;
+}
+
+unsigned int aa_dfa_outofband_transition(struct aa_dfa *dfa, unsigned int state)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ u32 b = (base)[(state)];
+
+ if (!(b & MATCH_FLAG_OOB_TRANSITION))
+ return DFA_NOMATCH;
+
+ /* No Equivalence class remapping for outofband transitions */
+ match_char(state, def, base, next, check, -1);
+
+ return state;
+}
+
+/**
+ * aa_dfa_match_until - traverse @dfa until accept state or end of input
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ * @retpos: first character in str after match OR end of string
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, const char **retpos)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ u32 *accept = ACCEPT_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ while (*str) {
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (accept[state])
+ break;
+ }
+ } else {
+ /* default is direct to next state */
+ while (*str) {
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (accept[state])
+ break;
+ }
+ }
+
+ *retpos = str;
+ return state;
+}
+
+/**
+ * aa_dfa_matchn_until - traverse @dfa until accept or @n bytes consumed
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the string of bytes to match against the dfa (NOT NULL)
+ * @n: length of the string of bytes to match
+ * @retpos: first character in str after match OR str + n
+ *
+ * aa_dfa_match_len will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * This function will happily match again the 0 byte and only finishes
+ * when @n input is consumed.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int n, const char **retpos)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ u32 *accept = ACCEPT_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ *retpos = NULL;
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ for (; n; n--) {
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (accept[state])
+ break;
+ }
+ } else {
+ /* default is direct to next state */
+ for (; n; n--) {
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (accept[state])
+ break;
+ }
+ }
+
+ *retpos = str;
+ return state;
+}
+
+#define inc_wb_pos(wb) \
+do { \
+ wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
+ wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
+} while (0)
+
+/* For DFAs that don't support extended tagging of states */
+static bool is_loop(struct match_workbuf *wb, unsigned int state,
+ unsigned int *adjust)
+{
+ unsigned int pos = wb->pos;
+ unsigned int i;
+
+ if (wb->history[pos] < state)
+ return false;
+
+ for (i = 0; i <= wb->len; i++) {
+ if (wb->history[pos] == state) {
+ *adjust = i;
+ return true;
+ }
+ if (pos == 0)
+ pos = WB_HISTORY_SIZE;
+ pos--;
+ }
+
+ *adjust = i;
+ return true;
+}
+
+static unsigned int leftmatch_fb(struct aa_dfa *dfa, unsigned int start,
+ const char *str, struct match_workbuf *wb,
+ unsigned int *count)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ AA_BUG(!dfa);
+ AA_BUG(!str);
+ AA_BUG(!wb);
+ AA_BUG(!count);
+
+ *count = 0;
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ while (*str) {
+ unsigned int adjust;
+
+ wb->history[wb->pos] = state;
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (is_loop(wb, state, &adjust)) {
+ state = aa_dfa_match(dfa, state, str);
+ *count -= adjust;
+ goto out;
+ }
+ inc_wb_pos(wb);
+ (*count)++;
+ }
+ } else {
+ /* default is direct to next state */
+ while (*str) {
+ unsigned int adjust;
+
+ wb->history[wb->pos] = state;
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (is_loop(wb, state, &adjust)) {
+ state = aa_dfa_match(dfa, state, str);
+ *count -= adjust;
+ goto out;
+ }
+ inc_wb_pos(wb);
+ (*count)++;
+ }
+ }
+
+out:
+ if (!state)
+ *count = 0;
+ return state;
+}
+
+/**
+ * aa_dfa_leftmatch - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ * @count: current count of longest left.
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
+ const char *str, unsigned int *count)
+{
+ DEFINE_MATCH_WB(wb);
+
+ /* TODO: match for extended state dfas */
+
+ return leftmatch_fb(dfa, start, str, &wb, count);
+}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
new file mode 100644
index 000000000..f61247241
--- /dev/null
+++ b/security/apparmor/mount.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor mediation of files
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <uapi/linux/mount.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/domain.h"
+#include "include/file.h"
+#include "include/match.h"
+#include "include/mount.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+
+static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
+{
+ if (flags & MS_RDONLY)
+ audit_log_format(ab, "ro");
+ else
+ audit_log_format(ab, "rw");
+ if (flags & MS_NOSUID)
+ audit_log_format(ab, ", nosuid");
+ if (flags & MS_NODEV)
+ audit_log_format(ab, ", nodev");
+ if (flags & MS_NOEXEC)
+ audit_log_format(ab, ", noexec");
+ if (flags & MS_SYNCHRONOUS)
+ audit_log_format(ab, ", sync");
+ if (flags & MS_REMOUNT)
+ audit_log_format(ab, ", remount");
+ if (flags & MS_MANDLOCK)
+ audit_log_format(ab, ", mand");
+ if (flags & MS_DIRSYNC)
+ audit_log_format(ab, ", dirsync");
+ if (flags & MS_NOATIME)
+ audit_log_format(ab, ", noatime");
+ if (flags & MS_NODIRATIME)
+ audit_log_format(ab, ", nodiratime");
+ if (flags & MS_BIND)
+ audit_log_format(ab, flags & MS_REC ? ", rbind" : ", bind");
+ if (flags & MS_MOVE)
+ audit_log_format(ab, ", move");
+ if (flags & MS_SILENT)
+ audit_log_format(ab, ", silent");
+ if (flags & MS_POSIXACL)
+ audit_log_format(ab, ", acl");
+ if (flags & MS_UNBINDABLE)
+ audit_log_format(ab, flags & MS_REC ? ", runbindable" :
+ ", unbindable");
+ if (flags & MS_PRIVATE)
+ audit_log_format(ab, flags & MS_REC ? ", rprivate" :
+ ", private");
+ if (flags & MS_SLAVE)
+ audit_log_format(ab, flags & MS_REC ? ", rslave" :
+ ", slave");
+ if (flags & MS_SHARED)
+ audit_log_format(ab, flags & MS_REC ? ", rshared" :
+ ", shared");
+ if (flags & MS_RELATIME)
+ audit_log_format(ab, ", relatime");
+ if (flags & MS_I_VERSION)
+ audit_log_format(ab, ", iversion");
+ if (flags & MS_STRICTATIME)
+ audit_log_format(ab, ", strictatime");
+ if (flags & MS_NOUSER)
+ audit_log_format(ab, ", nouser");
+}
+
+/**
+ * audit_cb - call back for mount specific audit fields
+ * @ab: audit_buffer (NOT NULL)
+ * @va: audit struct to audit values of (NOT NULL)
+ */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->mnt.type) {
+ audit_log_format(ab, " fstype=");
+ audit_log_untrustedstring(ab, aad(sa)->mnt.type);
+ }
+ if (aad(sa)->mnt.src_name) {
+ audit_log_format(ab, " srcname=");
+ audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
+ }
+ if (aad(sa)->mnt.trans) {
+ audit_log_format(ab, " trans=");
+ audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
+ }
+ if (aad(sa)->mnt.flags) {
+ audit_log_format(ab, " flags=\"");
+ audit_mnt_flags(ab, aad(sa)->mnt.flags);
+ audit_log_format(ab, "\"");
+ }
+ if (aad(sa)->mnt.data) {
+ audit_log_format(ab, " options=");
+ audit_log_untrustedstring(ab, aad(sa)->mnt.data);
+ }
+}
+
+/**
+ * audit_mount - handle the auditing of mount operations
+ * @profile: the profile being enforced (NOT NULL)
+ * @op: operation being mediated (NOT NULL)
+ * @name: name of object being mediated (MAYBE NULL)
+ * @src_name: src_name of object being mediated (MAYBE_NULL)
+ * @type: type of filesystem (MAYBE_NULL)
+ * @trans: name of trans (MAYBE NULL)
+ * @flags: filesystem independent mount flags
+ * @data: filesystem mount flags
+ * @request: permissions requested
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @info: extra information message (MAYBE NULL)
+ * @error: 0 if operation allowed else failure error code
+ *
+ * Returns: %0 or error on failure
+ */
+static int audit_mount(struct aa_profile *profile, const char *op,
+ const char *name, const char *src_name,
+ const char *type, const char *trans,
+ unsigned long flags, const void *data, u32 request,
+ struct aa_perms *perms, const char *info, int error)
+{
+ int audit_type = AUDIT_APPARMOR_AUTO;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, op);
+
+ if (likely(!error)) {
+ u32 mask = perms->audit;
+
+ if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+ mask = 0xffff;
+
+ /* mask off perms that are not being force audited */
+ request &= mask;
+
+ if (likely(!request))
+ return 0;
+ audit_type = AUDIT_APPARMOR_AUDIT;
+ } else {
+ /* only report permissions that were denied */
+ request = request & ~perms->allow;
+
+ if (request & perms->kill)
+ audit_type = AUDIT_APPARMOR_KILL;
+
+ /* quiet known rejects, assumes quiet and kill do not overlap */
+ if ((request & perms->quiet) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL)
+ request &= ~perms->quiet;
+
+ if (!request)
+ return error;
+ }
+
+ aad(&sa)->name = name;
+ aad(&sa)->mnt.src_name = src_name;
+ aad(&sa)->mnt.type = type;
+ aad(&sa)->mnt.trans = trans;
+ aad(&sa)->mnt.flags = flags;
+ if (data && (perms->audit & AA_AUDIT_DATA))
+ aad(&sa)->mnt.data = data;
+ aad(&sa)->info = info;
+ aad(&sa)->error = error;
+
+ return aa_audit(audit_type, profile, &sa, audit_cb);
+}
+
+/**
+ * match_mnt_flags - Do an ordered match on mount flags
+ * @dfa: dfa to match against
+ * @state: state to start in
+ * @flags: mount flags to match against
+ *
+ * Mount flags are encoded as an ordered match. This is done instead of
+ * checking against a simple bitmask, to allow for logical operations
+ * on the flags.
+ *
+ * Returns: next state after flags match
+ */
+static unsigned int match_mnt_flags(struct aa_dfa *dfa, unsigned int state,
+ unsigned long flags)
+{
+ unsigned int i;
+
+ for (i = 0; i <= 31 ; ++i) {
+ if ((1 << i) & flags)
+ state = aa_dfa_next(dfa, state, i + 1);
+ }
+
+ return state;
+}
+
+/**
+ * compute_mnt_perms - compute mount permission associated with @state
+ * @dfa: dfa to match against (NOT NULL)
+ * @state: state match finished in
+ *
+ * Returns: mount permissions
+ */
+static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa,
+ unsigned int state)
+{
+ struct aa_perms perms = {
+ .allow = dfa_user_allow(dfa, state),
+ .audit = dfa_user_audit(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state),
+ };
+
+ return perms;
+}
+
+static const char * const mnt_info_table[] = {
+ "match succeeded",
+ "failed mntpnt match",
+ "failed srcname match",
+ "failed type match",
+ "failed flags match",
+ "failed data match",
+ "failed perms check"
+};
+
+/*
+ * Returns 0 on success else element that match failed in, this is the
+ * index into the mnt_info_table above
+ */
+static int do_match_mnt(struct aa_dfa *dfa, unsigned int start,
+ const char *mntpnt, const char *devname,
+ const char *type, unsigned long flags,
+ void *data, bool binary, struct aa_perms *perms)
+{
+ unsigned int state;
+
+ AA_BUG(!dfa);
+ AA_BUG(!perms);
+
+ state = aa_dfa_match(dfa, start, mntpnt);
+ state = aa_dfa_null_transition(dfa, state);
+ if (!state)
+ return 1;
+
+ if (devname)
+ state = aa_dfa_match(dfa, state, devname);
+ state = aa_dfa_null_transition(dfa, state);
+ if (!state)
+ return 2;
+
+ if (type)
+ state = aa_dfa_match(dfa, state, type);
+ state = aa_dfa_null_transition(dfa, state);
+ if (!state)
+ return 3;
+
+ state = match_mnt_flags(dfa, state, flags);
+ if (!state)
+ return 4;
+ *perms = compute_mnt_perms(dfa, state);
+ if (perms->allow & AA_MAY_MOUNT)
+ return 0;
+
+ /* only match data if not binary and the DFA flags data is expected */
+ if (data && !binary && (perms->allow & AA_MNT_CONT_MATCH)) {
+ state = aa_dfa_null_transition(dfa, state);
+ if (!state)
+ return 4;
+
+ state = aa_dfa_match(dfa, state, data);
+ if (!state)
+ return 5;
+ *perms = compute_mnt_perms(dfa, state);
+ if (perms->allow & AA_MAY_MOUNT)
+ return 0;
+ }
+
+ /* failed at perms check, don't confuse with flags match */
+ return 6;
+}
+
+
+static int path_flags(struct aa_profile *profile, const struct path *path)
+{
+ AA_BUG(!profile);
+ AA_BUG(!path);
+
+ return profile->path_flags |
+ (S_ISDIR(path->dentry->d_inode->i_mode) ? PATH_IS_DIR : 0);
+}
+
+/**
+ * match_mnt_path_str - handle path matching for mount
+ * @profile: the confining profile
+ * @mntpath: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+ * @devname: string for the devname/src_name (MAY BE NULL OR ERRPTR)
+ * @type: string for the dev type (MAYBE NULL)
+ * @flags: mount flags to match
+ * @data: fs mount data (MAYBE NULL)
+ * @binary: whether @data is binary
+ * @devinfo: error str if (IS_ERR(@devname))
+ *
+ * Returns: 0 on success else error
+ */
+static int match_mnt_path_str(struct aa_profile *profile,
+ const struct path *mntpath, char *buffer,
+ const char *devname, const char *type,
+ unsigned long flags, void *data, bool binary,
+ const char *devinfo)
+{
+ struct aa_perms perms = { };
+ const char *mntpnt = NULL, *info = NULL;
+ int pos, error;
+
+ AA_BUG(!profile);
+ AA_BUG(!mntpath);
+ AA_BUG(!buffer);
+
+ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return 0;
+
+ error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
+ &mntpnt, &info, profile->disconnected);
+ if (error)
+ goto audit;
+ if (IS_ERR(devname)) {
+ error = PTR_ERR(devname);
+ devname = NULL;
+ info = devinfo;
+ goto audit;
+ }
+
+ error = -EACCES;
+ pos = do_match_mnt(profile->policy.dfa,
+ profile->policy.start[AA_CLASS_MOUNT],
+ mntpnt, devname, type, flags, data, binary, &perms);
+ if (pos) {
+ info = mnt_info_table[pos];
+ goto audit;
+ }
+ error = 0;
+
+audit:
+ return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
+ flags, data, AA_MAY_MOUNT, &perms, info, error);
+}
+
+/**
+ * match_mnt - handle path matching for mount
+ * @profile: the confining profile
+ * @path: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+ * @devpath: path devname/src_name (MAYBE NULL)
+ * @devbuffer: buffer to be used to lookup devname/src_name
+ * @type: string for the dev type (MAYBE NULL)
+ * @flags: mount flags to match
+ * @data: fs mount data (MAYBE NULL)
+ * @binary: whether @data is binary
+ *
+ * Returns: 0 on success else error
+ */
+static int match_mnt(struct aa_profile *profile, const struct path *path,
+ char *buffer, const struct path *devpath, char *devbuffer,
+ const char *type, unsigned long flags, void *data,
+ bool binary)
+{
+ const char *devname = NULL, *info = NULL;
+ int error = -EACCES;
+
+ AA_BUG(!profile);
+ AA_BUG(devpath && !devbuffer);
+
+ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return 0;
+
+ if (devpath) {
+ error = aa_path_name(devpath, path_flags(profile, devpath),
+ devbuffer, &devname, &info,
+ profile->disconnected);
+ if (error)
+ devname = ERR_PTR(error);
+ }
+
+ return match_mnt_path_str(profile, path, buffer, devname, type, flags,
+ data, binary, info);
+}
+
+int aa_remount(struct aa_label *label, const struct path *path,
+ unsigned long flags, void *data)
+{
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ bool binary;
+ int error;
+
+ AA_BUG(!label);
+ AA_BUG(!path);
+
+ binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
+
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+ match_mnt(profile, path, buffer, NULL, NULL, NULL,
+ flags, data, binary));
+ aa_put_buffer(buffer);
+
+ return error;
+}
+
+int aa_bind_mount(struct aa_label *label, const struct path *path,
+ const char *dev_name, unsigned long flags)
+{
+ struct aa_profile *profile;
+ char *buffer = NULL, *old_buffer = NULL;
+ struct path old_path;
+ int error;
+
+ AA_BUG(!label);
+ AA_BUG(!path);
+
+ if (!dev_name || !*dev_name)
+ return -EINVAL;
+
+ flags &= MS_REC | MS_BIND;
+
+ error = kern_path(dev_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
+ if (error)
+ return error;
+
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !old_buffer)
+ goto out;
+
+ error = fn_for_each_confined(label, profile,
+ match_mnt(profile, path, buffer, &old_path, old_buffer,
+ NULL, flags, NULL, false));
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
+ path_put(&old_path);
+
+ return error;
+}
+
+int aa_mount_change_type(struct aa_label *label, const struct path *path,
+ unsigned long flags)
+{
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ int error;
+
+ AA_BUG(!label);
+ AA_BUG(!path);
+
+ /* These are the flags allowed by do_change_type() */
+ flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
+ MS_UNBINDABLE);
+
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+ match_mnt(profile, path, buffer, NULL, NULL, NULL,
+ flags, NULL, false));
+ aa_put_buffer(buffer);
+
+ return error;
+}
+
+int aa_move_mount(struct aa_label *label, const struct path *path,
+ const char *orig_name)
+{
+ struct aa_profile *profile;
+ char *buffer = NULL, *old_buffer = NULL;
+ struct path old_path;
+ int error;
+
+ AA_BUG(!label);
+ AA_BUG(!path);
+
+ if (!orig_name || !*orig_name)
+ return -EINVAL;
+
+ error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
+ if (error)
+ return error;
+
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !old_buffer)
+ goto out;
+ error = fn_for_each_confined(label, profile,
+ match_mnt(profile, path, buffer, &old_path, old_buffer,
+ NULL, MS_MOVE, NULL, false));
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
+ path_put(&old_path);
+
+ return error;
+}
+
+int aa_new_mount(struct aa_label *label, const char *dev_name,
+ const struct path *path, const char *type, unsigned long flags,
+ void *data)
+{
+ struct aa_profile *profile;
+ char *buffer = NULL, *dev_buffer = NULL;
+ bool binary = true;
+ int error;
+ int requires_dev = 0;
+ struct path tmp_path, *dev_path = NULL;
+
+ AA_BUG(!label);
+ AA_BUG(!path);
+
+ if (type) {
+ struct file_system_type *fstype;
+
+ fstype = get_fs_type(type);
+ if (!fstype)
+ return -ENODEV;
+ binary = fstype->fs_flags & FS_BINARY_MOUNTDATA;
+ requires_dev = fstype->fs_flags & FS_REQUIRES_DEV;
+ put_filesystem(fstype);
+
+ if (requires_dev) {
+ if (!dev_name || !*dev_name)
+ return -ENOENT;
+
+ error = kern_path(dev_name, LOOKUP_FOLLOW, &tmp_path);
+ if (error)
+ return error;
+ dev_path = &tmp_path;
+ }
+ }
+
+ buffer = aa_get_buffer(false);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (dev_path) {
+ dev_buffer = aa_get_buffer(false);
+ if (!dev_buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = fn_for_each_confined(label, profile,
+ match_mnt(profile, path, buffer, dev_path, dev_buffer,
+ type, flags, data, binary));
+ } else {
+ error = fn_for_each_confined(label, profile,
+ match_mnt_path_str(profile, path, buffer, dev_name,
+ type, flags, data, binary, NULL));
+ }
+
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(dev_buffer);
+ if (dev_path)
+ path_put(dev_path);
+
+ return error;
+}
+
+static int profile_umount(struct aa_profile *profile, const struct path *path,
+ char *buffer)
+{
+ struct aa_perms perms = { };
+ const char *name = NULL, *info = NULL;
+ unsigned int state;
+ int error;
+
+ AA_BUG(!profile);
+ AA_BUG(!path);
+
+ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return 0;
+
+ error = aa_path_name(path, path_flags(profile, path), buffer, &name,
+ &info, profile->disconnected);
+ if (error)
+ goto audit;
+
+ state = aa_dfa_match(profile->policy.dfa,
+ profile->policy.start[AA_CLASS_MOUNT],
+ name);
+ perms = compute_mnt_perms(profile->policy.dfa, state);
+ if (AA_MAY_UMOUNT & ~perms.allow)
+ error = -EACCES;
+
+audit:
+ return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
+ AA_MAY_UMOUNT, &perms, info, error);
+}
+
+int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+{
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ int error;
+ struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
+
+ AA_BUG(!label);
+ AA_BUG(!mnt);
+
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
+
+ error = fn_for_each_confined(label, profile,
+ profile_umount(profile, &path, buffer));
+ aa_put_buffer(buffer);
+
+ return error;
+}
+
+/* helper fn for transition on pivotroot
+ *
+ * Returns: label for transition or ERR_PTR. Does not return NULL
+ */
+static struct aa_label *build_pivotroot(struct aa_profile *profile,
+ const struct path *new_path,
+ char *new_buffer,
+ const struct path *old_path,
+ char *old_buffer)
+{
+ const char *old_name, *new_name = NULL, *info = NULL;
+ const char *trans_name = NULL;
+ struct aa_perms perms = { };
+ unsigned int state;
+ int error;
+
+ AA_BUG(!profile);
+ AA_BUG(!new_path);
+ AA_BUG(!old_path);
+
+ if (profile_unconfined(profile) ||
+ !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return aa_get_newest_label(&profile->label);
+
+ error = aa_path_name(old_path, path_flags(profile, old_path),
+ old_buffer, &old_name, &info,
+ profile->disconnected);
+ if (error)
+ goto audit;
+ error = aa_path_name(new_path, path_flags(profile, new_path),
+ new_buffer, &new_name, &info,
+ profile->disconnected);
+ if (error)
+ goto audit;
+
+ error = -EACCES;
+ state = aa_dfa_match(profile->policy.dfa,
+ profile->policy.start[AA_CLASS_MOUNT],
+ new_name);
+ state = aa_dfa_null_transition(profile->policy.dfa, state);
+ state = aa_dfa_match(profile->policy.dfa, state, old_name);
+ perms = compute_mnt_perms(profile->policy.dfa, state);
+
+ if (AA_MAY_PIVOTROOT & perms.allow)
+ error = 0;
+
+audit:
+ error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
+ NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
+ &perms, info, error);
+ if (error)
+ return ERR_PTR(error);
+
+ return aa_get_newest_label(&profile->label);
+}
+
+int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+ const struct path *new_path)
+{
+ struct aa_profile *profile;
+ struct aa_label *target = NULL;
+ char *old_buffer = NULL, *new_buffer = NULL, *info = NULL;
+ int error;
+
+ AA_BUG(!label);
+ AA_BUG(!old_path);
+ AA_BUG(!new_path);
+
+ old_buffer = aa_get_buffer(false);
+ new_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!old_buffer || !new_buffer)
+ goto out;
+ target = fn_label_build(label, profile, GFP_KERNEL,
+ build_pivotroot(profile, new_path, new_buffer,
+ old_path, old_buffer));
+ if (!target) {
+ info = "label build failed";
+ error = -ENOMEM;
+ goto fail;
+ } else if (!IS_ERR(target)) {
+ error = aa_replace_current_label(target);
+ if (error) {
+ /* TODO: audit target */
+ aa_put_label(target);
+ goto out;
+ }
+ aa_put_label(target);
+ } else
+ /* already audited error */
+ error = PTR_ERR(target);
+out:
+ aa_put_buffer(old_buffer);
+ aa_put_buffer(new_buffer);
+
+ return error;
+
+fail:
+ /* TODO: add back in auditing of new_name and old_name */
+ error = fn_for_each(label, profile,
+ audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
+ NULL /* old_name */,
+ NULL, NULL,
+ 0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
+ error));
+ goto out;
+}
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
new file mode 100644
index 000000000..7efe4d172
--- /dev/null
+++ b/security/apparmor/net.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor network mediation
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ */
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/label.h"
+#include "include/net.h"
+#include "include/policy.h"
+#include "include/secid.h"
+
+#include "net_names.h"
+
+
+struct aa_sfs_entry aa_sfs_entry_network[] = {
+ AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
+ { }
+};
+
+static const char * const net_mask_names[] = {
+ "unknown",
+ "send",
+ "receive",
+ "unknown",
+
+ "create",
+ "shutdown",
+ "connect",
+ "unknown",
+
+ "setattr",
+ "getattr",
+ "setcred",
+ "getcred",
+
+ "chmod",
+ "chown",
+ "chgrp",
+ "lock",
+
+ "mmap",
+ "mprot",
+ "unknown",
+ "unknown",
+
+ "accept",
+ "bind",
+ "listen",
+ "unknown",
+
+ "setopt",
+ "getopt",
+ "unknown",
+ "unknown",
+
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+};
+
+
+/* audit callback for net specific fields */
+void audit_net_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (address_family_names[sa->u.net->family])
+ audit_log_format(ab, " family=\"%s\"",
+ address_family_names[sa->u.net->family]);
+ else
+ audit_log_format(ab, " family=\"unknown(%d)\"",
+ sa->u.net->family);
+ if (sock_type_names[aad(sa)->net.type])
+ audit_log_format(ab, " sock_type=\"%s\"",
+ sock_type_names[aad(sa)->net.type]);
+ else
+ audit_log_format(ab, " sock_type=\"unknown(%d)\"",
+ aad(sa)->net.type);
+ audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
+
+ if (aad(sa)->request & NET_PERMS_MASK) {
+ audit_log_format(ab, " requested_mask=");
+ aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+
+ if (aad(sa)->denied & NET_PERMS_MASK) {
+ audit_log_format(ab, " denied_mask=");
+ aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+ }
+ }
+ if (aad(sa)->peer) {
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+}
+
+/* Generic af perm */
+int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+ u32 request, u16 family, int type)
+{
+ struct aa_perms perms = { };
+ unsigned int state;
+ __be16 buffer[2];
+
+ AA_BUG(family >= AF_MAX);
+ AA_BUG(type < 0 || type >= SOCK_MAX);
+
+ if (profile_unconfined(profile))
+ return 0;
+ state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
+ if (!state)
+ return 0;
+
+ buffer[0] = cpu_to_be16(family);
+ buffer[1] = cpu_to_be16((u16) type);
+ state = aa_dfa_match_len(profile->policy.dfa, state, (char *) &buffer,
+ 4);
+ aa_compute_perms(profile->policy.dfa, state, &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+
+ return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+}
+
+int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+ int type, int protocol)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
+
+ return fn_for_each_confined(label, profile,
+ aa_profile_af_perm(profile, &sa, request, family,
+ type));
+}
+
+static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
+ struct sock *sk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+ int error = 0;
+
+ AA_BUG(!label);
+ AA_BUG(!sk);
+
+ if (ctx->label != kernel_t && !unconfined(label)) {
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(sa, op, sk);
+
+ error = fn_for_each_confined(label, profile,
+ aa_profile_af_sk_perm(profile, &sa, request, sk));
+ }
+
+ return error;
+}
+
+int aa_sk_perm(const char *op, u32 request, struct sock *sk)
+{
+ struct aa_label *label;
+ int error;
+
+ AA_BUG(!sk);
+ AA_BUG(in_interrupt());
+
+ /* TODO: switch to begin_current_label ???? */
+ label = begin_current_label_crit_section();
+ error = aa_label_sk_perm(label, op, request, sk);
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+
+int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+ struct socket *sock)
+{
+ AA_BUG(!label);
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+
+ return aa_label_sk_perm(label, op, request, sock->sk);
+}
+
+#ifdef CONFIG_NETWORK_SECMARK
+static int apparmor_secmark_init(struct aa_secmark *secmark)
+{
+ struct aa_label *label;
+
+ if (secmark->label[0] == '*') {
+ secmark->secid = AA_SECID_WILDCARD;
+ return 0;
+ }
+
+ label = aa_label_strn_parse(&root_ns->unconfined->label,
+ secmark->label, strlen(secmark->label),
+ GFP_ATOMIC, false, false);
+
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
+ secmark->secid = label->secid;
+
+ return 0;
+}
+
+static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+ struct common_audit_data *sa)
+{
+ int i, ret;
+ struct aa_perms perms = { };
+
+ if (profile->secmark_count == 0)
+ return 0;
+
+ for (i = 0; i < profile->secmark_count; i++) {
+ if (!profile->secmark[i].secid) {
+ ret = apparmor_secmark_init(&profile->secmark[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (profile->secmark[i].secid == secid ||
+ profile->secmark[i].secid == AA_SECID_WILDCARD) {
+ if (profile->secmark[i].deny)
+ perms.deny = ALL_PERMS_MASK;
+ else
+ perms.allow = ALL_PERMS_MASK;
+
+ if (profile->secmark[i].audit)
+ perms.audit = ALL_PERMS_MASK;
+ }
+ }
+
+ aa_apply_modes_to_perms(profile, &perms);
+
+ return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+}
+
+int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+ u32 secid, const struct sock *sk)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(sa, op, sk);
+
+ return fn_for_each_confined(label, profile,
+ aa_secmark_perm(profile, request, secid,
+ &sa));
+}
+#endif
diff --git a/security/apparmor/nulldfa.in b/security/apparmor/nulldfa.in
new file mode 100644
index 000000000..095f42a24
--- /dev/null
+++ b/security/apparmor/nulldfa.in
@@ -0,0 +1,107 @@
+0x1B, 0x5E, 0x78, 0x3D, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x04,
+0x90, 0x00, 0x00, 0x6E, 0x6F, 0x74, 0x66, 0x6C, 0x65, 0x78, 0x00,
+0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x04, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
new file mode 100644
index 000000000..45ec994b5
--- /dev/null
+++ b/security/apparmor/path.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor function for pathnames
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/magic.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/path.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs_struct.h>
+
+#include "include/apparmor.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+/* modified from dcache.c */
+static int prepend(char **buffer, int buflen, const char *str, int namelen)
+{
+ buflen -= namelen;
+ if (buflen < 0)
+ return -ENAMETOOLONG;
+ *buffer -= namelen;
+ memcpy(*buffer, str, namelen);
+ return 0;
+}
+
+#define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT)
+
+/* If the path is not connected to the expected root,
+ * check if it is a sysctl and handle specially else remove any
+ * leading / that __d_path may have returned.
+ * Unless
+ * specifically directed to connect the path,
+ * OR
+ * if in a chroot and doing chroot relative paths and the path
+ * resolves to the namespace root (would be connected outside
+ * of chroot) and specifically directed to connect paths to
+ * namespace root.
+ */
+static int disconnect(const struct path *path, char *buf, char **name,
+ int flags, const char *disconnected)
+{
+ int error = 0;
+
+ if (!(flags & PATH_CONNECT_PATH) &&
+ !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
+ our_mnt(path->mnt))) {
+ /* disconnected path, don't return pathname starting
+ * with '/'
+ */
+ error = -EACCES;
+ if (**name == '/')
+ *name = *name + 1;
+ } else {
+ if (**name != '/')
+ /* CONNECT_PATH with missing root */
+ error = prepend(name, *name - buf, "/", 1);
+ if (!error && disconnected)
+ error = prepend(name, *name - buf, disconnected,
+ strlen(disconnected));
+ }
+
+ return error;
+}
+
+/**
+ * d_namespace_path - lookup a name associated with a given path
+ * @path: path to lookup (NOT NULL)
+ * @buf: buffer to store path to (NOT NULL)
+ * @name: Returns - pointer for start of path name with in @buf (NOT NULL)
+ * @flags: flags controlling path lookup
+ * @disconnected: string to prefix to disconnected paths
+ *
+ * Handle path name lookup.
+ *
+ * Returns: %0 else error code if path lookup fails
+ * When no error the path name is returned in @name which points to
+ * a position in @buf
+ */
+static int d_namespace_path(const struct path *path, char *buf, char **name,
+ int flags, const char *disconnected)
+{
+ char *res;
+ int error = 0;
+ int connected = 1;
+ int isdir = (flags & PATH_IS_DIR) ? 1 : 0;
+ int buflen = aa_g_path_max - isdir;
+
+ if (path->mnt->mnt_flags & MNT_INTERNAL) {
+ /* it's not mounted anywhere */
+ res = dentry_path(path->dentry, buf, buflen);
+ *name = res;
+ if (IS_ERR(res)) {
+ *name = buf;
+ return PTR_ERR(res);
+ }
+ if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+ strncmp(*name, "/sys/", 5) == 0) {
+ /* TODO: convert over to using a per namespace
+ * control instead of hard coded /proc
+ */
+ error = prepend(name, *name - buf, "/proc", 5);
+ goto out;
+ } else
+ error = disconnect(path, buf, name, flags,
+ disconnected);
+ goto out;
+ }
+
+ /* resolve paths relative to chroot?*/
+ if (flags & PATH_CHROOT_REL) {
+ struct path root;
+ get_fs_root(current->fs, &root);
+ res = __d_path(path, &root, buf, buflen);
+ path_put(&root);
+ } else {
+ res = d_absolute_path(path, buf, buflen);
+ if (!our_mnt(path->mnt))
+ connected = 0;
+ }
+
+ /* handle error conditions - and still allow a partial path to
+ * be returned.
+ */
+ if (!res || IS_ERR(res)) {
+ if (PTR_ERR(res) == -ENAMETOOLONG) {
+ error = -ENAMETOOLONG;
+ *name = buf;
+ goto out;
+ }
+ connected = 0;
+ res = dentry_path_raw(path->dentry, buf, buflen);
+ if (IS_ERR(res)) {
+ error = PTR_ERR(res);
+ *name = buf;
+ goto out;
+ }
+ } else if (!our_mnt(path->mnt))
+ connected = 0;
+
+ *name = res;
+
+ if (!connected)
+ error = disconnect(path, buf, name, flags, disconnected);
+
+ /* Handle two cases:
+ * 1. A deleted dentry && profile is not allowing mediation of deleted
+ * 2. On some filesystems, newly allocated dentries appear to the
+ * security_path hooks as a deleted dentry except without an inode
+ * allocated.
+ */
+ if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
+ !(flags & (PATH_MEDIATE_DELETED | PATH_DELEGATE_DELETED))) {
+ error = -ENOENT;
+ goto out;
+ }
+
+out:
+ /*
+ * Append "/" to the pathname. The root directory is a special
+ * case; it already ends in slash.
+ */
+ if (!error && isdir && ((*name)[1] != '\0' || (*name)[0] != '/'))
+ strcpy(&buf[aa_g_path_max - 2], "/");
+
+ return error;
+}
+
+/**
+ * aa_path_name - get the pathname to a buffer ensure dir / is appended
+ * @path: path the file (NOT NULL)
+ * @flags: flags controlling path name generation
+ * @buffer: buffer to put name in (NOT NULL)
+ * @name: Returns - the generated path name if !error (NOT NULL)
+ * @info: Returns - information on why the path lookup failed (MAYBE NULL)
+ * @disconnected: string to prepend to disconnected paths
+ *
+ * @name is a pointer to the beginning of the pathname (which usually differs
+ * from the beginning of the buffer), or NULL. If there is an error @name
+ * may contain a partial or invalid name that can be used for audit purposes,
+ * but it can not be used for mediation.
+ *
+ * We need PATH_IS_DIR to indicate whether the file is a directory or not
+ * because the file may not yet exist, and so we cannot check the inode's
+ * file type.
+ *
+ * Returns: %0 else error code if could retrieve name
+ */
+int aa_path_name(const struct path *path, int flags, char *buffer,
+ const char **name, const char **info, const char *disconnected)
+{
+ char *str = NULL;
+ int error = d_namespace_path(path, buffer, &str, flags, disconnected);
+
+ if (info && error) {
+ if (error == -ENOENT)
+ *info = "Failed name lookup - deleted entry";
+ else if (error == -EACCES)
+ *info = "Failed name lookup - disconnected path";
+ else if (error == -ENAMETOOLONG)
+ *info = "Failed name lookup - name too long";
+ else
+ *info = "Failed name lookup";
+ }
+
+ *name = str;
+
+ return error;
+}
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
new file mode 100644
index 000000000..c7b84fb56
--- /dev/null
+++ b/security/apparmor/policy.c
@@ -0,0 +1,1209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy manipulation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * AppArmor policy is based around profiles, which contain the rules a
+ * task is confined by. Every task in the system has a profile attached
+ * to it determined either by matching "unconfined" tasks against the
+ * visible set of profiles or by following a profiles attachment rules.
+ *
+ * Each profile exists in a profile namespace which is a container of
+ * visible profiles. Each namespace contains a special "unconfined" profile,
+ * which doesn't enforce any confinement on a task beyond DAC.
+ *
+ * Namespace and profile names can be written together in either
+ * of two syntaxes.
+ * :namespace:profile - used by kernel interfaces for easy detection
+ * namespace://profile - used by policy
+ *
+ * Profile names can not start with : or @ or ^ and may not contain \0
+ *
+ * Reserved profile names
+ * unconfined - special automatically generated unconfined profile
+ * inherit - special name to indicate profile inheritance
+ * null-XXXX-YYYY - special automatically generated learning profiles
+ *
+ * Namespace names may not start with / or @ and may not contain \0 or :
+ * Reserved namespace names
+ * user-XXXX - user defined profiles
+ *
+ * a // in a profile or namespace name indicates a hierarchical name with the
+ * name before the // being the parent and the name after the child.
+ *
+ * Profile and namespace hierarchies serve two different but similar purposes.
+ * The namespace contains the set of visible profiles that are considered
+ * for attachment. The hierarchy of namespaces allows for virtualizing
+ * the namespace so that for example a chroot can have its own set of profiles
+ * which may define some local user namespaces.
+ * The profile hierarchy severs two distinct purposes,
+ * - it allows for sub profiles or hats, which allows an application to run
+ * subprograms under its own profile with different restriction than it
+ * self, and not have it use the system profile.
+ * eg. if a mail program starts an editor, the policy might make the
+ * restrictions tighter on the editor tighter than the mail program,
+ * and definitely different than general editor restrictions
+ * - it allows for binary hierarchy of profiles, so that execution history
+ * is preserved. This feature isn't exploited by AppArmor reference policy
+ * but is allowed. NOTE: this is currently suboptimal because profile
+ * aliasing is not currently implemented so that a profile for each
+ * level must be defined.
+ * eg. /bin/bash///bin/ls as a name would indicate /bin/ls was started
+ * from /bin/bash
+ *
+ * A profile or namespace name that can contain one or more // separators
+ * is referred to as an hname (hierarchical).
+ * eg. /bin/bash//bin/ls
+ *
+ * An fqname is a name that may contain both namespace and profile hnames.
+ * eg. :ns:/bin/bash//bin/ls
+ *
+ * NOTES:
+ * - locking of profile lists is currently fairly coarse. All profile
+ * lists within a namespace use the namespace lock.
+ * FIXME: move profile lists to using rcu_lists
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/cred.h>
+#include <linux/rculist.h>
+#include <linux/user_namespace.h>
+
+#include "include/apparmor.h"
+#include "include/capability.h"
+#include "include/cred.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/policy_ns.h"
+#include "include/policy_unpack.h"
+#include "include/resource.h"
+
+int unprivileged_userns_apparmor_policy = 1;
+
+const char *const aa_profile_mode_names[] = {
+ "enforce",
+ "complain",
+ "kill",
+ "unconfined",
+};
+
+
+/**
+ * __add_profile - add a profiles to list and label tree
+ * @list: list to add it to (NOT NULL)
+ * @profile: the profile to add (NOT NULL)
+ *
+ * refcount @profile, should be put by __list_remove_profile
+ *
+ * Requires: namespace lock be held, or list not be shared
+ */
+static void __add_profile(struct list_head *list, struct aa_profile *profile)
+{
+ struct aa_label *l;
+
+ AA_BUG(!list);
+ AA_BUG(!profile);
+ AA_BUG(!profile->ns);
+ AA_BUG(!mutex_is_locked(&profile->ns->lock));
+
+ list_add_rcu(&profile->base.list, list);
+ /* get list reference */
+ aa_get_profile(profile);
+ l = aa_label_insert(&profile->ns->labels, &profile->label);
+ AA_BUG(l != &profile->label);
+ aa_put_label(l);
+}
+
+/**
+ * __list_remove_profile - remove a profile from the list it is on
+ * @profile: the profile to remove (NOT NULL)
+ *
+ * remove a profile from the list, warning generally removal should
+ * be done with __replace_profile as most profile removals are
+ * replacements to the unconfined profile.
+ *
+ * put @profile list refcount
+ *
+ * Requires: namespace lock be held, or list not have been live
+ */
+static void __list_remove_profile(struct aa_profile *profile)
+{
+ AA_BUG(!profile);
+ AA_BUG(!profile->ns);
+ AA_BUG(!mutex_is_locked(&profile->ns->lock));
+
+ list_del_rcu(&profile->base.list);
+ aa_put_profile(profile);
+}
+
+/**
+ * __remove_profile - remove old profile, and children
+ * @profile: profile to be replaced (NOT NULL)
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __remove_profile(struct aa_profile *profile)
+{
+ AA_BUG(!profile);
+ AA_BUG(!profile->ns);
+ AA_BUG(!mutex_is_locked(&profile->ns->lock));
+
+ /* release any children lists first */
+ __aa_profile_list_release(&profile->base.profiles);
+ /* released by free_profile */
+ aa_label_remove(&profile->label);
+ __aafs_profile_rmdir(profile);
+ __list_remove_profile(profile);
+}
+
+/**
+ * __aa_profile_list_release - remove all profiles on the list and put refs
+ * @head: list of profiles (NOT NULL)
+ *
+ * Requires: namespace lock be held
+ */
+void __aa_profile_list_release(struct list_head *head)
+{
+ struct aa_profile *profile, *tmp;
+ list_for_each_entry_safe(profile, tmp, head, base.list)
+ __remove_profile(profile);
+}
+
+/**
+ * aa_free_data - free a data blob
+ * @ptr: data to free
+ * @arg: unused
+ */
+static void aa_free_data(void *ptr, void *arg)
+{
+ struct aa_data *data = ptr;
+
+ kfree_sensitive(data->data);
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+}
+
+/**
+ * aa_free_profile - free a profile
+ * @profile: the profile to free (MAYBE NULL)
+ *
+ * Free a profile, its hats and null_profile. All references to the profile,
+ * its hats and null_profile must have been put.
+ *
+ * If the profile was referenced from a task context, free_profile() will
+ * be called from an rcu callback routine, so we must not sleep here.
+ */
+void aa_free_profile(struct aa_profile *profile)
+{
+ struct rhashtable *rht;
+ int i;
+
+ AA_DEBUG("%s(%p)\n", __func__, profile);
+
+ if (!profile)
+ return;
+
+ /* free children profiles */
+ aa_policy_destroy(&profile->base);
+ aa_put_profile(rcu_access_pointer(profile->parent));
+
+ aa_put_ns(profile->ns);
+ kfree_sensitive(profile->rename);
+ kfree_sensitive(profile->disconnected);
+
+ aa_free_file_rules(&profile->file);
+ aa_free_cap_rules(&profile->caps);
+ aa_free_rlimit_rules(&profile->rlimits);
+
+ for (i = 0; i < profile->xattr_count; i++)
+ kfree_sensitive(profile->xattrs[i]);
+ kfree_sensitive(profile->xattrs);
+ for (i = 0; i < profile->secmark_count; i++)
+ kfree_sensitive(profile->secmark[i].label);
+ kfree_sensitive(profile->secmark);
+ kfree_sensitive(profile->dirname);
+ aa_put_dfa(profile->xmatch);
+ aa_put_dfa(profile->policy.dfa);
+
+ if (profile->data) {
+ rht = profile->data;
+ profile->data = NULL;
+ rhashtable_free_and_destroy(rht, aa_free_data, NULL);
+ kfree_sensitive(rht);
+ }
+
+ kfree_sensitive(profile->hash);
+ aa_put_loaddata(profile->rawdata);
+ aa_label_destroy(&profile->label);
+
+ kfree_sensitive(profile);
+}
+
+/**
+ * aa_alloc_profile - allocate, initialize and return a new profile
+ * @hname: name of the profile (NOT NULL)
+ * @gfp: allocation type
+ *
+ * Returns: refcount profile or NULL on failure
+ */
+struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
+ gfp_t gfp)
+{
+ struct aa_profile *profile;
+
+ /* freed by free_profile - usually through aa_put_profile */
+ profile = kzalloc(struct_size(profile, label.vec, 2), gfp);
+ if (!profile)
+ return NULL;
+
+ if (!aa_policy_init(&profile->base, NULL, hname, gfp))
+ goto fail;
+ if (!aa_label_init(&profile->label, 1, gfp))
+ goto fail;
+
+ /* update being set needed by fs interface */
+ if (!proxy) {
+ proxy = aa_alloc_proxy(&profile->label, gfp);
+ if (!proxy)
+ goto fail;
+ } else
+ aa_get_proxy(proxy);
+ profile->label.proxy = proxy;
+
+ profile->label.hname = profile->base.hname;
+ profile->label.flags |= FLAG_PROFILE;
+ profile->label.vec[0] = profile;
+
+ /* refcount released by caller */
+ return profile;
+
+fail:
+ aa_free_profile(profile);
+
+ return NULL;
+}
+
+/* TODO: profile accounting - setup in remove */
+
+/**
+ * __strn_find_child - find a profile on @head list using substring of @name
+ * @head: list to search (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ * @len: length of @name substring to match
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__strn_find_child(struct list_head *head,
+ const char *name, int len)
+{
+ return (struct aa_profile *)__policy_strn_find(head, name, len);
+}
+
+/**
+ * __find_child - find a profile on @head list with a name matching @name
+ * @head: list to search (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__find_child(struct list_head *head, const char *name)
+{
+ return __strn_find_child(head, name, strlen(name));
+}
+
+/**
+ * aa_find_child - find a profile by @name in @parent
+ * @parent: profile to search (NOT NULL)
+ * @name: profile name to search for (NOT NULL)
+ *
+ * Returns: a refcounted profile or NULL if not found
+ */
+struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
+{
+ struct aa_profile *profile;
+
+ rcu_read_lock();
+ do {
+ profile = __find_child(&parent->base.profiles, name);
+ } while (profile && !aa_get_profile_not0(profile));
+ rcu_read_unlock();
+
+ /* refcount released by caller */
+ return profile;
+}
+
+/**
+ * __lookup_parent - lookup the parent of a profile of name @hname
+ * @ns: namespace to lookup profile in (NOT NULL)
+ * @hname: hierarchical profile name to find parent of (NOT NULL)
+ *
+ * Lookups up the parent of a fully qualified profile name, the profile
+ * that matches hname does not need to exist, in general this
+ * is used to load a new profile.
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy or NULL if not found
+ */
+static struct aa_policy *__lookup_parent(struct aa_ns *ns,
+ const char *hname)
+{
+ struct aa_policy *policy;
+ struct aa_profile *profile = NULL;
+ char *split;
+
+ policy = &ns->base;
+
+ for (split = strstr(hname, "//"); split;) {
+ profile = __strn_find_child(&policy->profiles, hname,
+ split - hname);
+ if (!profile)
+ return NULL;
+ policy = &profile->base;
+ hname = split + 2;
+ split = strstr(hname, "//");
+ }
+ if (!profile)
+ return &ns->base;
+ return &profile->base;
+}
+
+/**
+ * __lookupn_profile - lookup the profile matching @hname
+ * @base: base list to start looking up profile name from (NOT NULL)
+ * @hname: hierarchical profile name (NOT NULL)
+ * @n: length of @hname
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile pointer or NULL if not found
+ *
+ * Do a relative name lookup, recursing through profile tree.
+ */
+static struct aa_profile *__lookupn_profile(struct aa_policy *base,
+ const char *hname, size_t n)
+{
+ struct aa_profile *profile = NULL;
+ const char *split;
+
+ for (split = strnstr(hname, "//", n); split;
+ split = strnstr(hname, "//", n)) {
+ profile = __strn_find_child(&base->profiles, hname,
+ split - hname);
+ if (!profile)
+ return NULL;
+
+ base = &profile->base;
+ n -= split + 2 - hname;
+ hname = split + 2;
+ }
+
+ if (n)
+ return __strn_find_child(&base->profiles, hname, n);
+ return NULL;
+}
+
+static struct aa_profile *__lookup_profile(struct aa_policy *base,
+ const char *hname)
+{
+ return __lookupn_profile(base, hname, strlen(hname));
+}
+
+/**
+ * aa_lookupn_profile - find a profile by its full or partial name
+ * @ns: the namespace to start from (NOT NULL)
+ * @hname: name to do lookup on. Does not contain namespace prefix (NOT NULL)
+ * @n: size of @hname
+ *
+ * Returns: refcounted profile or NULL if not found
+ */
+struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname,
+ size_t n)
+{
+ struct aa_profile *profile;
+
+ rcu_read_lock();
+ do {
+ profile = __lookupn_profile(&ns->base, hname, n);
+ } while (profile && !aa_get_profile_not0(profile));
+ rcu_read_unlock();
+
+ /* the unconfined profile is not in the regular profile list */
+ if (!profile && strncmp(hname, "unconfined", n) == 0)
+ profile = aa_get_newest_profile(ns->unconfined);
+
+ /* refcount released by caller */
+ return profile;
+}
+
+struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *hname)
+{
+ return aa_lookupn_profile(ns, hname, strlen(hname));
+}
+
+struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
+ const char *fqname, size_t n)
+{
+ struct aa_profile *profile;
+ struct aa_ns *ns;
+ const char *name, *ns_name;
+ size_t ns_len;
+
+ name = aa_splitn_fqname(fqname, n, &ns_name, &ns_len);
+ if (ns_name) {
+ ns = aa_lookupn_ns(labels_ns(base), ns_name, ns_len);
+ if (!ns)
+ return NULL;
+ } else
+ ns = aa_get_ns(labels_ns(base));
+
+ if (name)
+ profile = aa_lookupn_profile(ns, name, n - (name - fqname));
+ else if (ns)
+ /* default profile for ns, currently unconfined */
+ profile = aa_get_newest_profile(ns->unconfined);
+ else
+ profile = NULL;
+ aa_put_ns(ns);
+
+ return profile;
+}
+
+/**
+ * aa_new_null_profile - create or find a null-X learning profile
+ * @parent: profile that caused this profile to be created (NOT NULL)
+ * @hat: true if the null- learning profile is a hat
+ * @base: name to base the null profile off of
+ * @gfp: type of allocation
+ *
+ * Find/Create a null- complain mode profile used in learning mode. The
+ * name of the profile is unique and follows the format of parent//null-XXX.
+ * where XXX is based on the @name or if that fails or is not supplied
+ * a unique number
+ *
+ * null profiles are added to the profile list but the list does not
+ * hold a count on them so that they are automatically released when
+ * not in use.
+ *
+ * Returns: new refcounted profile else NULL on failure
+ */
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
+ const char *base, gfp_t gfp)
+{
+ struct aa_profile *p, *profile;
+ const char *bname;
+ char *name = NULL;
+
+ AA_BUG(!parent);
+
+ if (base) {
+ name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base),
+ gfp);
+ if (name) {
+ sprintf(name, "%s//null-%s", parent->base.hname, base);
+ goto name;
+ }
+ /* fall through to try shorter uniq */
+ }
+
+ name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp);
+ if (!name)
+ return NULL;
+ sprintf(name, "%s//null-%x", parent->base.hname,
+ atomic_inc_return(&parent->ns->uniq_null));
+
+name:
+ /* lookup to see if this is a dup creation */
+ bname = basename(name);
+ profile = aa_find_child(parent, bname);
+ if (profile)
+ goto out;
+
+ profile = aa_alloc_profile(name, NULL, gfp);
+ if (!profile)
+ goto fail;
+
+ profile->mode = APPARMOR_COMPLAIN;
+ profile->label.flags |= FLAG_NULL;
+ if (hat)
+ profile->label.flags |= FLAG_HAT;
+ profile->path_flags = parent->path_flags;
+
+ /* released on free_profile */
+ rcu_assign_pointer(profile->parent, aa_get_profile(parent));
+ profile->ns = aa_get_ns(parent->ns);
+ profile->file.dfa = aa_get_dfa(nulldfa);
+ profile->policy.dfa = aa_get_dfa(nulldfa);
+
+ mutex_lock_nested(&profile->ns->lock, profile->ns->level);
+ p = __find_child(&parent->base.profiles, bname);
+ if (p) {
+ aa_free_profile(profile);
+ profile = aa_get_profile(p);
+ } else {
+ __add_profile(&parent->base.profiles, profile);
+ }
+ mutex_unlock(&profile->ns->lock);
+
+ /* refcount released by caller */
+out:
+ kfree(name);
+
+ return profile;
+
+fail:
+ kfree(name);
+ aa_free_profile(profile);
+ return NULL;
+}
+
+/**
+ * replacement_allowed - test to see if replacement is allowed
+ * @profile: profile to test if it can be replaced (MAYBE NULL)
+ * @noreplace: true if replacement shouldn't be allowed but addition is okay
+ * @info: Returns - info about why replacement failed (NOT NULL)
+ *
+ * Returns: %0 if replacement allowed else error code
+ */
+static int replacement_allowed(struct aa_profile *profile, int noreplace,
+ const char **info)
+{
+ if (profile) {
+ if (profile->label.flags & FLAG_IMMUTIBLE) {
+ *info = "cannot replace immutable profile";
+ return -EPERM;
+ } else if (noreplace) {
+ *info = "profile already exists";
+ return -EEXIST;
+ }
+ }
+ return 0;
+}
+
+/* audit callback for net specific fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->iface.ns) {
+ audit_log_format(ab, " ns=");
+ audit_log_untrustedstring(ab, aad(sa)->iface.ns);
+ }
+}
+
+/**
+ * audit_policy - Do auditing of policy changes
+ * @label: label to check if it can manage policy
+ * @op: policy operation being performed
+ * @ns_name: name of namespace being manipulated
+ * @name: name of profile being manipulated (NOT NULL)
+ * @info: any extra information to be audited (MAYBE NULL)
+ * @error: error code
+ *
+ * Returns: the error to be returned after audit is done
+ */
+static int audit_policy(struct aa_label *label, const char *op,
+ const char *ns_name, const char *name,
+ const char *info, int error)
+{
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, op);
+
+ aad(&sa)->iface.ns = ns_name;
+ aad(&sa)->name = name;
+ aad(&sa)->info = info;
+ aad(&sa)->error = error;
+ aad(&sa)->label = label;
+
+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
+
+ return error;
+}
+
+/* don't call out to other LSMs in the stack for apparmor policy admin
+ * permissions
+ */
+static int policy_ns_capable(struct aa_label *label,
+ struct user_namespace *userns, int cap)
+{
+ int err;
+
+ /* check for MAC_ADMIN cap in cred */
+ err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
+ if (!err)
+ err = aa_capable(label, cap, CAP_OPT_NONE);
+
+ return err;
+}
+
+/**
+ * aa_policy_view_capable - check if viewing policy in at @ns is allowed
+ * label: label that is trying to view policy in ns
+ * ns: namespace being viewed by @label (may be NULL if @label's ns)
+ * Returns: true if viewing policy is allowed
+ *
+ * If @ns is NULL then the namespace being viewed is assumed to be the
+ * tasks current namespace.
+ */
+bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ struct aa_ns *view_ns = labels_view(label);
+ bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
+ in_egroup_p(make_kgid(user_ns, 0));
+ bool response = false;
+ if (!ns)
+ ns = view_ns;
+
+ if (root_in_user_ns && aa_ns_visible(view_ns, ns, true) &&
+ (user_ns == &init_user_ns ||
+ (unprivileged_userns_apparmor_policy != 0 &&
+ user_ns->level == view_ns->level)))
+ response = true;
+
+ return response;
+}
+
+bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
+
+ AA_DEBUG("cap_mac_admin? %d\n", capable);
+ AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
+
+ return aa_policy_view_capable(label, ns) && capable &&
+ !aa_g_lock_policy;
+}
+
+bool aa_current_policy_view_capable(struct aa_ns *ns)
+{
+ struct aa_label *label;
+ bool res;
+
+ label = __begin_current_label_crit_section();
+ res = aa_policy_view_capable(label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+}
+
+bool aa_current_policy_admin_capable(struct aa_ns *ns)
+{
+ struct aa_label *label;
+ bool res;
+
+ label = __begin_current_label_crit_section();
+ res = aa_policy_admin_capable(label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+}
+
+/**
+ * aa_may_manage_policy - can the current task manage policy
+ * @label: label to check if it can manage policy
+ * @op: the policy manipulation operation being done
+ *
+ * Returns: 0 if the task is allowed to manipulate policy else error
+ */
+int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
+{
+ const char *op;
+
+ if (mask & AA_MAY_REMOVE_POLICY)
+ op = OP_PROF_RM;
+ else if (mask & AA_MAY_REPLACE_POLICY)
+ op = OP_PROF_REPL;
+ else
+ op = OP_PROF_LOAD;
+
+ /* check if loading policy is locked out */
+ if (aa_g_lock_policy)
+ return audit_policy(label, op, NULL, NULL, "policy_locked",
+ -EACCES);
+
+ if (!aa_policy_admin_capable(label, ns))
+ return audit_policy(label, op, NULL, NULL, "not policy admin",
+ -EACCES);
+
+ /* TODO: add fine grained mediation of policy loads */
+ return 0;
+}
+
+static struct aa_profile *__list_lookup_parent(struct list_head *lh,
+ struct aa_profile *profile)
+{
+ const char *base = basename(profile->base.hname);
+ long len = base - profile->base.hname;
+ struct aa_load_ent *ent;
+
+ /* parent won't have trailing // so remove from len */
+ if (len <= 2)
+ return NULL;
+ len -= 2;
+
+ list_for_each_entry(ent, lh, list) {
+ if (ent->new == profile)
+ continue;
+ if (strncmp(ent->new->base.hname, profile->base.hname, len) ==
+ 0 && ent->new->base.hname[len] == 0)
+ return ent->new;
+ }
+
+ return NULL;
+}
+
+/**
+ * __replace_profile - replace @old with @new on a list
+ * @old: profile to be replaced (NOT NULL)
+ * @new: profile to replace @old with (NOT NULL)
+ * @share_proxy: transfer @old->proxy to @new
+ *
+ * Will duplicate and refcount elements that @new inherits from @old
+ * and will inherit @old children.
+ *
+ * refcount @new for list, put @old list refcount
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
+{
+ struct aa_profile *child, *tmp;
+
+ if (!list_empty(&old->base.profiles)) {
+ LIST_HEAD(lh);
+ list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu);
+
+ list_for_each_entry_safe(child, tmp, &lh, base.list) {
+ struct aa_profile *p;
+
+ list_del_init(&child->base.list);
+ p = __find_child(&new->base.profiles, child->base.name);
+ if (p) {
+ /* @p replaces @child */
+ __replace_profile(child, p);
+ continue;
+ }
+
+ /* inherit @child and its children */
+ /* TODO: update hname of inherited children */
+ /* list refcount transferred to @new */
+ p = aa_deref_parent(child);
+ rcu_assign_pointer(child->parent, aa_get_profile(new));
+ list_add_rcu(&child->base.list, &new->base.profiles);
+ aa_put_profile(p);
+ }
+ }
+
+ if (!rcu_access_pointer(new->parent)) {
+ struct aa_profile *parent = aa_deref_parent(old);
+ rcu_assign_pointer(new->parent, aa_get_profile(parent));
+ }
+ aa_label_replace(&old->label, &new->label);
+ /* migrate dents must come after label replacement b/c update */
+ __aafs_profile_migrate_dents(old, new);
+
+ if (list_empty(&new->base.list)) {
+ /* new is not on a list already */
+ list_replace_rcu(&old->base.list, &new->base.list);
+ aa_get_profile(new);
+ aa_put_profile(old);
+ } else
+ __list_remove_profile(old);
+}
+
+/**
+ * __lookup_replace - lookup replacement information for a profile
+ * @ns - namespace the lookup occurs in
+ * @hname - name of profile to lookup
+ * @noreplace - true if not replacing an existing profile
+ * @p - Returns: profile to be replaced
+ * @info - Returns: info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+static int __lookup_replace(struct aa_ns *ns, const char *hname,
+ bool noreplace, struct aa_profile **p,
+ const char **info)
+{
+ *p = aa_get_profile(__lookup_profile(&ns->base, hname));
+ if (*p) {
+ int error = replacement_allowed(*p, noreplace, info);
+ if (error) {
+ *info = "profile can not be replaced";
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static void share_name(struct aa_profile *old, struct aa_profile *new)
+{
+ aa_put_str(new->base.hname);
+ aa_get_str(old->base.hname);
+ new->base.hname = old->base.hname;
+ new->base.name = old->base.name;
+ new->label.hname = old->label.hname;
+}
+
+/* Update to newest version of parent after previous replacements
+ * Returns: unrefcount newest version of parent
+ */
+static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
+{
+ struct aa_profile *parent, *newest;
+
+ parent = rcu_dereference_protected(new->parent,
+ mutex_is_locked(&new->ns->lock));
+ newest = aa_get_newest_profile(parent);
+
+ /* parent replaced in this atomic set? */
+ if (newest != parent) {
+ aa_put_profile(parent);
+ rcu_assign_pointer(new->parent, newest);
+ } else
+ aa_put_profile(newest);
+
+ return newest;
+}
+
+/**
+ * aa_replace_profiles - replace profile(s) on the profile list
+ * @policy_ns: namespace load is occurring on
+ * @label: label that is attempting to load/replace policy
+ * @mask: permission mask
+ * @udata: serialized data stream (NOT NULL)
+ *
+ * unpack and replace a profile on the profile list and uses of that profile
+ * by any task creds via invalidating the old version of the profile, which
+ * tasks will notice to update their own cred. If the profile does not exist
+ * on the profile list it is added.
+ *
+ * Returns: size of data consumed else error code on failure.
+ */
+ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
+ u32 mask, struct aa_loaddata *udata)
+{
+ const char *ns_name = NULL, *info = NULL;
+ struct aa_ns *ns = NULL;
+ struct aa_load_ent *ent, *tmp;
+ struct aa_loaddata *rawdata_ent;
+ const char *op;
+ ssize_t count, error;
+ LIST_HEAD(lh);
+
+ op = mask & AA_MAY_REPLACE_POLICY ? OP_PROF_REPL : OP_PROF_LOAD;
+ aa_get_loaddata(udata);
+ /* released below */
+ error = aa_unpack(udata, &lh, &ns_name);
+ if (error)
+ goto out;
+
+ /* ensure that profiles are all for the same ns
+ * TODO: update locking to remove this constaint. All profiles in
+ * the load set must succeed as a set or the load will
+ * fail. Sort ent list and take ns locks in hierarchy order
+ */
+ count = 0;
+ list_for_each_entry(ent, &lh, list) {
+ if (ns_name) {
+ if (ent->ns_name &&
+ strcmp(ent->ns_name, ns_name) != 0) {
+ info = "policy load has mixed namespaces";
+ error = -EACCES;
+ goto fail;
+ }
+ } else if (ent->ns_name) {
+ if (count) {
+ info = "policy load has mixed namespaces";
+ error = -EACCES;
+ goto fail;
+ }
+ ns_name = ent->ns_name;
+ } else
+ count++;
+ }
+ if (ns_name) {
+ ns = aa_prepare_ns(policy_ns ? policy_ns : labels_ns(label),
+ ns_name);
+ if (IS_ERR(ns)) {
+ op = OP_PROF_LOAD;
+ info = "failed to prepare namespace";
+ error = PTR_ERR(ns);
+ ns = NULL;
+ ent = NULL;
+ goto fail;
+ }
+ } else
+ ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label));
+
+ mutex_lock_nested(&ns->lock, ns->level);
+ /* check for duplicate rawdata blobs: space and file dedup */
+ if (!list_empty(&ns->rawdata_list)) {
+ list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
+ if (aa_rawdata_eq(rawdata_ent, udata)) {
+ struct aa_loaddata *tmp;
+
+ tmp = __aa_get_loaddata(rawdata_ent);
+ /* check we didn't fail the race */
+ if (tmp) {
+ aa_put_loaddata(udata);
+ udata = tmp;
+ break;
+ }
+ }
+ }
+ }
+ /* setup parent and ns info */
+ list_for_each_entry(ent, &lh, list) {
+ struct aa_policy *policy;
+
+ if (aa_g_export_binary)
+ ent->new->rawdata = aa_get_loaddata(udata);
+ error = __lookup_replace(ns, ent->new->base.hname,
+ !(mask & AA_MAY_REPLACE_POLICY),
+ &ent->old, &info);
+ if (error)
+ goto fail_lock;
+
+ if (ent->new->rename) {
+ error = __lookup_replace(ns, ent->new->rename,
+ !(mask & AA_MAY_REPLACE_POLICY),
+ &ent->rename, &info);
+ if (error)
+ goto fail_lock;
+ }
+
+ /* released when @new is freed */
+ ent->new->ns = aa_get_ns(ns);
+
+ if (ent->old || ent->rename)
+ continue;
+
+ /* no ref on policy only use inside lock */
+ policy = __lookup_parent(ns, ent->new->base.hname);
+ if (!policy) {
+ struct aa_profile *p;
+ p = __list_lookup_parent(&lh, ent->new);
+ if (!p) {
+ error = -ENOENT;
+ info = "parent does not exist";
+ goto fail_lock;
+ }
+ rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+ } else if (policy != &ns->base) {
+ /* released on profile replacement or free_profile */
+ struct aa_profile *p = (struct aa_profile *) policy;
+ rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+ }
+ }
+
+ /* create new fs entries for introspection if needed */
+ if (!udata->dents[AAFS_LOADDATA_DIR] && aa_g_export_binary) {
+ error = __aa_fs_create_rawdata(ns, udata);
+ if (error) {
+ info = "failed to create raw_data dir and files";
+ ent = NULL;
+ goto fail_lock;
+ }
+ }
+ list_for_each_entry(ent, &lh, list) {
+ if (!ent->old) {
+ struct dentry *parent;
+ if (rcu_access_pointer(ent->new->parent)) {
+ struct aa_profile *p;
+ p = aa_deref_parent(ent->new);
+ parent = prof_child_dir(p);
+ } else
+ parent = ns_subprofs_dir(ent->new->ns);
+ error = __aafs_profile_mkdir(ent->new, parent);
+ }
+
+ if (error) {
+ info = "failed to create";
+ goto fail_lock;
+ }
+ }
+
+ /* Done with checks that may fail - do actual replacement */
+ __aa_bump_ns_revision(ns);
+ if (aa_g_export_binary)
+ __aa_loaddata_update(udata, ns->revision);
+ list_for_each_entry_safe(ent, tmp, &lh, list) {
+ list_del_init(&ent->list);
+ op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
+
+ if (ent->old && ent->old->rawdata == ent->new->rawdata &&
+ ent->new->rawdata) {
+ /* dedup actual profile replacement */
+ audit_policy(label, op, ns_name, ent->new->base.hname,
+ "same as current profile, skipping",
+ error);
+ /* break refcount cycle with proxy. */
+ aa_put_proxy(ent->new->label.proxy);
+ ent->new->label.proxy = NULL;
+ goto skip;
+ }
+
+ /*
+ * TODO: finer dedup based on profile range in data. Load set
+ * can differ but profile may remain unchanged
+ */
+ audit_policy(label, op, ns_name, ent->new->base.hname, NULL,
+ error);
+
+ if (ent->old) {
+ share_name(ent->old, ent->new);
+ __replace_profile(ent->old, ent->new);
+ } else {
+ struct list_head *lh;
+
+ if (rcu_access_pointer(ent->new->parent)) {
+ struct aa_profile *parent;
+
+ parent = update_to_newest_parent(ent->new);
+ lh = &parent->base.profiles;
+ } else
+ lh = &ns->base.profiles;
+ __add_profile(lh, ent->new);
+ }
+ skip:
+ aa_load_ent_free(ent);
+ }
+ __aa_labelset_update_subtree(ns);
+ mutex_unlock(&ns->lock);
+
+out:
+ aa_put_ns(ns);
+ aa_put_loaddata(udata);
+ kfree(ns_name);
+
+ if (error)
+ return error;
+ return udata->size;
+
+fail_lock:
+ mutex_unlock(&ns->lock);
+
+ /* audit cause of failure */
+ op = (ent && !ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
+fail:
+ audit_policy(label, op, ns_name, ent ? ent->new->base.hname : NULL,
+ info, error);
+ /* audit status that rest of profiles in the atomic set failed too */
+ info = "valid profile in failed atomic policy load";
+ list_for_each_entry(tmp, &lh, list) {
+ if (tmp == ent) {
+ info = "unchecked profile in failed atomic policy load";
+ /* skip entry that caused failure */
+ continue;
+ }
+ op = (!tmp->old) ? OP_PROF_LOAD : OP_PROF_REPL;
+ audit_policy(label, op, ns_name, tmp->new->base.hname, info,
+ error);
+ }
+ list_for_each_entry_safe(ent, tmp, &lh, list) {
+ list_del_init(&ent->list);
+ aa_load_ent_free(ent);
+ }
+
+ goto out;
+}
+
+/**
+ * aa_remove_profiles - remove profile(s) from the system
+ * @policy_ns: namespace the remove is being done from
+ * @subj: label attempting to remove policy
+ * @fqname: name of the profile or namespace to remove (NOT NULL)
+ * @size: size of the name
+ *
+ * Remove a profile or sub namespace from the current namespace, so that
+ * they can not be found anymore and mark them as replaced by unconfined
+ *
+ * NOTE: removing confinement does not restore rlimits to preconfinement values
+ *
+ * Returns: size of data consume else error code if fails
+ */
+ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
+ char *fqname, size_t size)
+{
+ struct aa_ns *ns = NULL;
+ struct aa_profile *profile = NULL;
+ const char *name = fqname, *info = NULL;
+ const char *ns_name = NULL;
+ ssize_t error = 0;
+
+ if (*fqname == 0) {
+ info = "no profile specified";
+ error = -ENOENT;
+ goto fail;
+ }
+
+ if (fqname[0] == ':') {
+ size_t ns_len;
+
+ name = aa_splitn_fqname(fqname, size, &ns_name, &ns_len);
+ /* released below */
+ ns = aa_lookupn_ns(policy_ns ? policy_ns : labels_ns(subj),
+ ns_name, ns_len);
+ if (!ns) {
+ info = "namespace does not exist";
+ error = -ENOENT;
+ goto fail;
+ }
+ } else
+ /* released below */
+ ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(subj));
+
+ if (!name) {
+ /* remove namespace - can only happen if fqname[0] == ':' */
+ mutex_lock_nested(&ns->parent->lock, ns->parent->level);
+ __aa_bump_ns_revision(ns);
+ __aa_remove_ns(ns);
+ mutex_unlock(&ns->parent->lock);
+ } else {
+ /* remove profile */
+ mutex_lock_nested(&ns->lock, ns->level);
+ profile = aa_get_profile(__lookup_profile(&ns->base, name));
+ if (!profile) {
+ error = -ENOENT;
+ info = "profile does not exist";
+ goto fail_ns_lock;
+ }
+ name = profile->base.hname;
+ __aa_bump_ns_revision(ns);
+ __remove_profile(profile);
+ __aa_labelset_update_subtree(ns);
+ mutex_unlock(&ns->lock);
+ }
+
+ /* don't fail removal if audit fails */
+ (void) audit_policy(subj, OP_PROF_RM, ns_name, name, info,
+ error);
+ aa_put_ns(ns);
+ aa_put_profile(profile);
+ return size;
+
+fail_ns_lock:
+ mutex_unlock(&ns->lock);
+ aa_put_ns(ns);
+
+fail:
+ (void) audit_policy(subj, OP_PROF_RM, ns_name, name, info,
+ error);
+ return error;
+}
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
new file mode 100644
index 000000000..78700d94b
--- /dev/null
+++ b/security/apparmor/policy_ns.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy manipulation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ *
+ * AppArmor policy namespaces, allow for different sets of policies
+ * to be loaded for tasks within the namespace.
+ */
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "include/apparmor.h"
+#include "include/cred.h"
+#include "include/policy_ns.h"
+#include "include/label.h"
+#include "include/policy.h"
+
+/* kernel label */
+struct aa_label *kernel_t;
+
+/* root profile namespace */
+struct aa_ns *root_ns;
+const char *aa_hidden_ns_name = "---";
+
+/**
+ * aa_ns_visible - test if @view is visible from @curr
+ * @curr: namespace to treat as the parent (NOT NULL)
+ * @view: namespace to test if visible from @curr (NOT NULL)
+ * @subns: whether view of a subns is allowed
+ *
+ * Returns: true if @view is visible from @curr else false
+ */
+bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns)
+{
+ if (curr == view)
+ return true;
+
+ if (!subns)
+ return false;
+
+ for ( ; view; view = view->parent) {
+ if (view->parent == curr)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * aa_ns_name - Find the ns name to display for @view from @curr
+ * @curr: current namespace (NOT NULL)
+ * @view: namespace attempting to view (NOT NULL)
+ * @subns: are subns visible
+ *
+ * Returns: name of @view visible from @curr
+ */
+const char *aa_ns_name(struct aa_ns *curr, struct aa_ns *view, bool subns)
+{
+ /* if view == curr then the namespace name isn't displayed */
+ if (curr == view)
+ return "";
+
+ if (aa_ns_visible(curr, view, subns)) {
+ /* at this point if a ns is visible it is in a view ns
+ * thus the curr ns.hname is a prefix of its name.
+ * Only output the virtualized portion of the name
+ * Add + 2 to skip over // separating curr hname prefix
+ * from the visible tail of the views hname
+ */
+ return view->base.hname + strlen(curr->base.hname) + 2;
+ }
+
+ return aa_hidden_ns_name;
+}
+
+static struct aa_profile *alloc_unconfined(const char *name)
+{
+ struct aa_profile *profile;
+
+ profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
+ if (!profile)
+ return NULL;
+
+ profile->label.flags |= FLAG_IX_ON_NAME_ERROR |
+ FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
+ profile->mode = APPARMOR_UNCONFINED;
+ profile->file.dfa = aa_get_dfa(nulldfa);
+ profile->policy.dfa = aa_get_dfa(nulldfa);
+
+ return profile;
+}
+
+/**
+ * alloc_ns - allocate, initialize and return a new namespace
+ * @prefix: parent namespace name (MAYBE NULL)
+ * @name: a preallocated name (NOT NULL)
+ *
+ * Returns: refcounted namespace or NULL on failure.
+ */
+static struct aa_ns *alloc_ns(const char *prefix, const char *name)
+{
+ struct aa_ns *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ AA_DEBUG("%s(%p)\n", __func__, ns);
+ if (!ns)
+ return NULL;
+ if (!aa_policy_init(&ns->base, prefix, name, GFP_KERNEL))
+ goto fail_ns;
+
+ INIT_LIST_HEAD(&ns->sub_ns);
+ INIT_LIST_HEAD(&ns->rawdata_list);
+ mutex_init(&ns->lock);
+ init_waitqueue_head(&ns->wait);
+
+ /* released by aa_free_ns() */
+ ns->unconfined = alloc_unconfined("unconfined");
+ if (!ns->unconfined)
+ goto fail_unconfined;
+ /* ns and ns->unconfined share ns->unconfined refcount */
+ ns->unconfined->ns = ns;
+
+ atomic_set(&ns->uniq_null, 0);
+
+ aa_labelset_init(&ns->labels);
+
+ return ns;
+
+fail_unconfined:
+ aa_policy_destroy(&ns->base);
+fail_ns:
+ kfree_sensitive(ns);
+ return NULL;
+}
+
+/**
+ * aa_free_ns - free a profile namespace
+ * @ns: the namespace to free (MAYBE NULL)
+ *
+ * Requires: All references to the namespace must have been put, if the
+ * namespace was referenced by a profile confining a task,
+ */
+void aa_free_ns(struct aa_ns *ns)
+{
+ if (!ns)
+ return;
+
+ aa_policy_destroy(&ns->base);
+ aa_labelset_destroy(&ns->labels);
+ aa_put_ns(ns->parent);
+
+ ns->unconfined->ns = NULL;
+ aa_free_profile(ns->unconfined);
+ kfree_sensitive(ns);
+}
+
+/**
+ * aa_findn_ns - look up a profile namespace on the namespace list
+ * @root: namespace to search in (NOT NULL)
+ * @name: name of namespace to find (NOT NULL)
+ * @n: length of @name
+ *
+ * Returns: a refcounted namespace on the list, or NULL if no namespace
+ * called @name exists.
+ *
+ * refcount released by caller
+ */
+struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n)
+{
+ struct aa_ns *ns = NULL;
+
+ rcu_read_lock();
+ ns = aa_get_ns(__aa_findn_ns(&root->sub_ns, name, n));
+ rcu_read_unlock();
+
+ return ns;
+}
+
+/**
+ * aa_find_ns - look up a profile namespace on the namespace list
+ * @root: namespace to search in (NOT NULL)
+ * @name: name of namespace to find (NOT NULL)
+ *
+ * Returns: a refcounted namespace on the list, or NULL if no namespace
+ * called @name exists.
+ *
+ * refcount released by caller
+ */
+struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name)
+{
+ return aa_findn_ns(root, name, strlen(name));
+}
+
+/**
+ * __aa_lookupn_ns - lookup the namespace matching @hname
+ * @view: namespace to search in (NOT NULL)
+ * @hname: hierarchical ns name (NOT NULL)
+ * @n: length of @hname
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted ns pointer or NULL if not found
+ *
+ * Do a relative name lookup, recursing through profile tree.
+ */
+struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n)
+{
+ struct aa_ns *ns = view;
+ const char *split;
+
+ for (split = strnstr(hname, "//", n); split;
+ split = strnstr(hname, "//", n)) {
+ ns = __aa_findn_ns(&ns->sub_ns, hname, split - hname);
+ if (!ns)
+ return NULL;
+
+ n -= split + 2 - hname;
+ hname = split + 2;
+ }
+
+ if (n)
+ return __aa_findn_ns(&ns->sub_ns, hname, n);
+ return NULL;
+}
+
+/**
+ * aa_lookupn_ns - look up a policy namespace relative to @view
+ * @view: namespace to search in (NOT NULL)
+ * @name: name of namespace to find (NOT NULL)
+ * @n: length of @name
+ *
+ * Returns: a refcounted namespace on the list, or NULL if no namespace
+ * called @name exists.
+ *
+ * refcount released by caller
+ */
+struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n)
+{
+ struct aa_ns *ns = NULL;
+
+ rcu_read_lock();
+ ns = aa_get_ns(__aa_lookupn_ns(view, name, n));
+ rcu_read_unlock();
+
+ return ns;
+}
+
+static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
+ struct dentry *dir)
+{
+ struct aa_ns *ns;
+ int error;
+
+ AA_BUG(!parent);
+ AA_BUG(!name);
+ AA_BUG(!mutex_is_locked(&parent->lock));
+
+ ns = alloc_ns(parent->base.hname, name);
+ if (!ns)
+ return ERR_PTR(-ENOMEM);
+ ns->level = parent->level + 1;
+ mutex_lock_nested(&ns->lock, ns->level);
+ error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
+ if (error) {
+ AA_ERROR("Failed to create interface for ns %s\n",
+ ns->base.name);
+ mutex_unlock(&ns->lock);
+ aa_free_ns(ns);
+ return ERR_PTR(error);
+ }
+ ns->parent = aa_get_ns(parent);
+ list_add_rcu(&ns->base.list, &parent->sub_ns);
+ /* add list ref */
+ aa_get_ns(ns);
+ mutex_unlock(&ns->lock);
+
+ return ns;
+}
+
+/**
+ * __aa_find_or_create_ns - create an ns, fail if it already exists
+ * @parent: the parent of the namespace being created
+ * @name: the name of the namespace
+ * @dir: if not null the dir to put the ns entries in
+ *
+ * Returns: the a refcounted ns that has been add or an ERR_PTR
+ */
+struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name,
+ struct dentry *dir)
+{
+ struct aa_ns *ns;
+
+ AA_BUG(!mutex_is_locked(&parent->lock));
+
+ /* try and find the specified ns */
+ /* released by caller */
+ ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
+ if (!ns)
+ ns = __aa_create_ns(parent, name, dir);
+ else
+ ns = ERR_PTR(-EEXIST);
+
+ /* return ref */
+ return ns;
+}
+
+/**
+ * aa_prepare_ns - find an existing or create a new namespace of @name
+ * @parent: ns to treat as parent
+ * @name: the namespace to find or add (NOT NULL)
+ *
+ * Returns: refcounted namespace or PTR_ERR if failed to create one
+ */
+struct aa_ns *aa_prepare_ns(struct aa_ns *parent, const char *name)
+{
+ struct aa_ns *ns;
+
+ mutex_lock_nested(&parent->lock, parent->level);
+ /* try and find the specified ns and if it doesn't exist create it */
+ /* released by caller */
+ ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
+ if (!ns)
+ ns = __aa_create_ns(parent, name, NULL);
+ mutex_unlock(&parent->lock);
+
+ /* return ref */
+ return ns;
+}
+
+static void __ns_list_release(struct list_head *head);
+
+/**
+ * destroy_ns - remove everything contained by @ns
+ * @ns: namespace to have it contents removed (NOT NULL)
+ */
+static void destroy_ns(struct aa_ns *ns)
+{
+ if (!ns)
+ return;
+
+ mutex_lock_nested(&ns->lock, ns->level);
+ /* release all profiles in this namespace */
+ __aa_profile_list_release(&ns->base.profiles);
+
+ /* release all sub namespaces */
+ __ns_list_release(&ns->sub_ns);
+
+ if (ns->parent) {
+ unsigned long flags;
+
+ write_lock_irqsave(&ns->labels.lock, flags);
+ __aa_proxy_redirect(ns_unconfined(ns),
+ ns_unconfined(ns->parent));
+ write_unlock_irqrestore(&ns->labels.lock, flags);
+ }
+ __aafs_ns_rmdir(ns);
+ mutex_unlock(&ns->lock);
+}
+
+/**
+ * __aa_remove_ns - remove a namespace and all its children
+ * @ns: namespace to be removed (NOT NULL)
+ *
+ * Requires: ns->parent->lock be held and ns removed from parent.
+ */
+void __aa_remove_ns(struct aa_ns *ns)
+{
+ /* remove ns from namespace list */
+ list_del_rcu(&ns->base.list);
+ destroy_ns(ns);
+ aa_put_ns(ns);
+}
+
+/**
+ * __ns_list_release - remove all profile namespaces on the list put refs
+ * @head: list of profile namespaces (NOT NULL)
+ *
+ * Requires: namespace lock be held
+ */
+static void __ns_list_release(struct list_head *head)
+{
+ struct aa_ns *ns, *tmp;
+
+ list_for_each_entry_safe(ns, tmp, head, base.list)
+ __aa_remove_ns(ns);
+
+}
+
+/**
+ * aa_alloc_root_ns - allocate the root profile namespace
+ *
+ * Returns: %0 on success else error
+ *
+ */
+int __init aa_alloc_root_ns(void)
+{
+ struct aa_profile *kernel_p;
+
+ /* released by aa_free_root_ns - used as list ref*/
+ root_ns = alloc_ns(NULL, "root");
+ if (!root_ns)
+ return -ENOMEM;
+
+ kernel_p = alloc_unconfined("kernel_t");
+ if (!kernel_p) {
+ destroy_ns(root_ns);
+ aa_free_ns(root_ns);
+ return -ENOMEM;
+ }
+ kernel_t = &kernel_p->label;
+ root_ns->unconfined->ns = aa_get_ns(root_ns);
+
+ return 0;
+}
+
+ /**
+ * aa_free_root_ns - free the root profile namespace
+ */
+void __init aa_free_root_ns(void)
+{
+ struct aa_ns *ns = root_ns;
+
+ root_ns = NULL;
+
+ aa_label_free(kernel_t);
+ destroy_ns(ns);
+ aa_put_ns(ns);
+}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
new file mode 100644
index 000000000..633e778ec
--- /dev/null
+++ b/security/apparmor/policy_unpack.c
@@ -0,0 +1,1237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor functions for unpacking policy loaded from
+ * userspace.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * AppArmor uses a serialized binary format for loading policy. To find
+ * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
+ * All policy is validated before it is used.
+ */
+
+#include <asm/unaligned.h>
+#include <kunit/visibility.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/zlib.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/crypto.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+
+#define K_ABI_MASK 0x3ff
+#define FORCE_COMPLAIN_FLAG 0x800
+#define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
+#define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
+
+#define v5 5 /* base version */
+#define v6 6 /* per entry policydb mediation check */
+#define v7 7
+#define v8 8 /* full network masking */
+
+/* audit callback for unpack fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->iface.ns) {
+ audit_log_format(ab, " ns=");
+ audit_log_untrustedstring(ab, aad(sa)->iface.ns);
+ }
+ if (aad(sa)->name) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, aad(sa)->name);
+ }
+ if (aad(sa)->iface.pos)
+ audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
+}
+
+/**
+ * audit_iface - do audit message for policy unpacking/load/replace/remove
+ * @new: profile if it has been allocated (MAYBE NULL)
+ * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
+ * @name: name of the profile being manipulated (MAYBE NULL)
+ * @info: any extra info about the failure (MAYBE NULL)
+ * @e: buffer position info
+ * @error: error code
+ *
+ * Returns: %0 or error
+ */
+static int audit_iface(struct aa_profile *new, const char *ns_name,
+ const char *name, const char *info, struct aa_ext *e,
+ int error)
+{
+ struct aa_profile *profile = labels_profile(aa_current_raw_label());
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
+ if (e)
+ aad(&sa)->iface.pos = e->pos - e->start;
+ aad(&sa)->iface.ns = ns_name;
+ if (new)
+ aad(&sa)->name = new->base.hname;
+ else
+ aad(&sa)->name = name;
+ aad(&sa)->info = info;
+ aad(&sa)->error = error;
+
+ return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
+}
+
+void __aa_loaddata_update(struct aa_loaddata *data, long revision)
+{
+ AA_BUG(!data);
+ AA_BUG(!data->ns);
+ AA_BUG(!mutex_is_locked(&data->ns->lock));
+ AA_BUG(data->revision > revision);
+
+ data->revision = revision;
+ if ((data->dents[AAFS_LOADDATA_REVISION])) {
+ d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
+ current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
+ d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
+ current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
+ }
+}
+
+bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
+{
+ if (l->size != r->size)
+ return false;
+ if (l->compressed_size != r->compressed_size)
+ return false;
+ if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
+ return false;
+ return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
+}
+
+/*
+ * need to take the ns mutex lock which is NOT safe most places that
+ * put_loaddata is called, so we have to delay freeing it
+ */
+static void do_loaddata_free(struct work_struct *work)
+{
+ struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
+ struct aa_ns *ns = aa_get_ns(d->ns);
+
+ if (ns) {
+ mutex_lock_nested(&ns->lock, ns->level);
+ __aa_fs_remove_rawdata(d);
+ mutex_unlock(&ns->lock);
+ aa_put_ns(ns);
+ }
+
+ kfree_sensitive(d->hash);
+ kfree_sensitive(d->name);
+ kvfree(d->data);
+ kfree_sensitive(d);
+}
+
+void aa_loaddata_kref(struct kref *kref)
+{
+ struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
+
+ if (d) {
+ INIT_WORK(&d->work, do_loaddata_free);
+ schedule_work(&d->work);
+ }
+}
+
+struct aa_loaddata *aa_loaddata_alloc(size_t size)
+{
+ struct aa_loaddata *d;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (d == NULL)
+ return ERR_PTR(-ENOMEM);
+ d->data = kvzalloc(size, GFP_KERNEL);
+ if (!d->data) {
+ kfree(d);
+ return ERR_PTR(-ENOMEM);
+ }
+ kref_init(&d->count);
+ INIT_LIST_HEAD(&d->list);
+
+ return d;
+}
+
+/* test if read will be in packed data bounds */
+VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
+{
+ return (size <= e->end - e->pos);
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
+
+static void *kvmemdup(const void *src, size_t len)
+{
+ void *p = kvmalloc(len, GFP_KERNEL);
+
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+
+/**
+ * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
+ * @e: serialized data read head (NOT NULL)
+ * @chunk: start address for chunk of data (NOT NULL)
+ *
+ * Returns: the size of chunk found with the read head at the end of the chunk.
+ */
+VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
+{
+ size_t size = 0;
+ void *pos = e->pos;
+
+ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
+ e->pos += sizeof(__le16);
+ if (!aa_inbounds(e, size))
+ goto fail;
+ *chunk = e->pos;
+ e->pos += size;
+ return size;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
+
+/* unpack control byte */
+VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
+{
+ if (!aa_inbounds(e, 1))
+ return false;
+ if (*(u8 *) e->pos != code)
+ return false;
+ e->pos++;
+ return true;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
+
+/**
+ * aa_unpack_nameX - check is the next element is of type X with a name of @name
+ * @e: serialized data extent information (NOT NULL)
+ * @code: type code
+ * @name: name to match to the serialized element. (MAYBE NULL)
+ *
+ * check that the next serialized data element is of type X and has a tag
+ * name @name. If @name is specified then there must be a matching
+ * name element in the stream. If @name is NULL any name element will be
+ * skipped and only the typecode will be tested.
+ *
+ * Returns true on success (both type code and name tests match) and the read
+ * head is advanced past the headers
+ *
+ * Returns: false if either match fails, the read head does not move
+ */
+VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+{
+ /*
+ * May need to reset pos if name or type doesn't match
+ */
+ void *pos = e->pos;
+ /*
+ * Check for presence of a tagname, and if present name size
+ * AA_NAME tag value is a u16.
+ */
+ if (aa_unpack_X(e, AA_NAME)) {
+ char *tag = NULL;
+ size_t size = aa_unpack_u16_chunk(e, &tag);
+ /* if a name is specified it must match. otherwise skip tag */
+ if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
+ goto fail;
+ } else if (name) {
+ /* if a name is specified and there is no name tag fail */
+ goto fail;
+ }
+
+ /* now check if type code matches */
+ if (aa_unpack_X(e, code))
+ return true;
+
+fail:
+ e->pos = pos;
+ return false;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
+
+static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
+{
+ void *pos = e->pos;
+
+ if (aa_unpack_nameX(e, AA_U8, name)) {
+ if (!aa_inbounds(e, sizeof(u8)))
+ goto fail;
+ if (data)
+ *data = *((u8 *)e->pos);
+ e->pos += sizeof(u8);
+ return true;
+ }
+
+fail:
+ e->pos = pos;
+ return false;
+}
+
+VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
+{
+ void *pos = e->pos;
+
+ if (aa_unpack_nameX(e, AA_U32, name)) {
+ if (!aa_inbounds(e, sizeof(u32)))
+ goto fail;
+ if (data)
+ *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
+ e->pos += sizeof(u32);
+ return true;
+ }
+
+fail:
+ e->pos = pos;
+ return false;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
+
+VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
+{
+ void *pos = e->pos;
+
+ if (aa_unpack_nameX(e, AA_U64, name)) {
+ if (!aa_inbounds(e, sizeof(u64)))
+ goto fail;
+ if (data)
+ *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
+ e->pos += sizeof(u64);
+ return true;
+ }
+
+fail:
+ e->pos = pos;
+ return false;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
+
+VISIBLE_IF_KUNIT size_t aa_unpack_array(struct aa_ext *e, const char *name)
+{
+ void *pos = e->pos;
+
+ if (aa_unpack_nameX(e, AA_ARRAY, name)) {
+ int size;
+ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
+ e->pos += sizeof(u16);
+ return size;
+ }
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
+
+VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
+{
+ void *pos = e->pos;
+
+ if (aa_unpack_nameX(e, AA_BLOB, name)) {
+ u32 size;
+ if (!aa_inbounds(e, sizeof(u32)))
+ goto fail;
+ size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
+ e->pos += sizeof(u32);
+ if (aa_inbounds(e, (size_t) size)) {
+ *blob = e->pos;
+ e->pos += size;
+ return size;
+ }
+ }
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
+
+VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
+{
+ char *src_str;
+ size_t size = 0;
+ void *pos = e->pos;
+ *string = NULL;
+ if (aa_unpack_nameX(e, AA_STRING, name)) {
+ size = aa_unpack_u16_chunk(e, &src_str);
+ if (size) {
+ /* strings are null terminated, length is size - 1 */
+ if (src_str[size - 1] != 0)
+ goto fail;
+ *string = src_str;
+
+ return size;
+ }
+ }
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
+
+VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
+{
+ const char *tmp;
+ void *pos = e->pos;
+ int res = aa_unpack_str(e, &tmp, name);
+ *string = NULL;
+
+ if (!res)
+ return 0;
+
+ *string = kmemdup(tmp, res, GFP_KERNEL);
+ if (!*string) {
+ e->pos = pos;
+ return 0;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
+
+
+/**
+ * unpack_dfa - unpack a file rule dfa
+ * @e: serialized data extent information (NOT NULL)
+ *
+ * returns dfa or ERR_PTR or NULL if no dfa
+ */
+static struct aa_dfa *unpack_dfa(struct aa_ext *e)
+{
+ char *blob = NULL;
+ size_t size;
+ struct aa_dfa *dfa = NULL;
+
+ size = aa_unpack_blob(e, &blob, "aadfa");
+ if (size) {
+ /*
+ * The dfa is aligned with in the blob to 8 bytes
+ * from the beginning of the stream.
+ * alignment adjust needed by dfa unpack
+ */
+ size_t sz = blob - (char *) e->start -
+ ((e->pos - e->start) & 7);
+ size_t pad = ALIGN(sz, 8) - sz;
+ int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32);
+ if (aa_g_paranoid_load)
+ flags |= DFA_FLAG_VERIFY_STATES;
+ dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
+
+ if (IS_ERR(dfa))
+ return dfa;
+
+ }
+
+ return dfa;
+}
+
+/**
+ * unpack_trans_table - unpack a profile transition table
+ * @e: serialized data extent information (NOT NULL)
+ * @profile: profile to add the accept table to (NOT NULL)
+ *
+ * Returns: true if table successfully unpacked
+ */
+static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *saved_pos = e->pos;
+
+ /* exec table is optional */
+ if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
+ int i, size;
+
+ size = aa_unpack_array(e, NULL);
+ /* currently 4 exec bits and entries 0-3 are reserved iupcx */
+ if (size > 16 - 4)
+ goto fail;
+ profile->file.trans.table = kcalloc(size, sizeof(char *),
+ GFP_KERNEL);
+ if (!profile->file.trans.table)
+ goto fail;
+
+ profile->file.trans.size = size;
+ for (i = 0; i < size; i++) {
+ char *str;
+ int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
+ /* aa_unpack_strdup verifies that the last character is
+ * null termination byte.
+ */
+ if (!size2)
+ goto fail;
+ profile->file.trans.table[i] = str;
+ /* verify that name doesn't start with space */
+ if (isspace(*str))
+ goto fail;
+
+ /* count internal # of internal \0 */
+ for (c = j = 0; j < size2 - 1; j++) {
+ if (!str[j]) {
+ pos = j;
+ c++;
+ }
+ }
+ if (*str == ':') {
+ /* first character after : must be valid */
+ if (!str[1])
+ goto fail;
+ /* beginning with : requires an embedded \0,
+ * verify that exactly 1 internal \0 exists
+ * trailing \0 already verified by aa_unpack_strdup
+ *
+ * convert \0 back to : for label_parse
+ */
+ if (c == 1)
+ str[pos] = ':';
+ else if (c > 1)
+ goto fail;
+ } else if (c)
+ /* fail - all other cases with embedded \0 */
+ goto fail;
+ }
+ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+ return true;
+
+fail:
+ aa_free_domain_entries(&profile->file.trans);
+ e->pos = saved_pos;
+ return false;
+}
+
+static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+
+ if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
+ int i, size;
+
+ size = aa_unpack_array(e, NULL);
+ profile->xattr_count = size;
+ profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
+ if (!profile->xattrs)
+ goto fail;
+ for (i = 0; i < size; i++) {
+ if (!aa_unpack_strdup(e, &profile->xattrs[i], NULL))
+ goto fail;
+ }
+ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ e->pos = pos;
+ return false;
+}
+
+static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+ int i, size;
+
+ if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
+ size = aa_unpack_array(e, NULL);
+
+ profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
+ GFP_KERNEL);
+ if (!profile->secmark)
+ goto fail;
+
+ profile->secmark_count = size;
+
+ for (i = 0; i < size; i++) {
+ if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
+ goto fail;
+ if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
+ goto fail;
+ if (!aa_unpack_strdup(e, &profile->secmark[i].label, NULL))
+ goto fail;
+ }
+ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ if (profile->secmark) {
+ for (i = 0; i < size; i++)
+ kfree(profile->secmark[i].label);
+ kfree(profile->secmark);
+ profile->secmark_count = 0;
+ profile->secmark = NULL;
+ }
+
+ e->pos = pos;
+ return false;
+}
+
+static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+
+ /* rlimits are optional */
+ if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
+ int i, size;
+ u32 tmp = 0;
+ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ profile->rlimits.mask = tmp;
+
+ size = aa_unpack_array(e, NULL);
+ if (size > RLIM_NLIMITS)
+ goto fail;
+ for (i = 0; i < size; i++) {
+ u64 tmp2 = 0;
+ int a = aa_map_resource(i);
+ if (!aa_unpack_u64(e, &tmp2, NULL))
+ goto fail;
+ profile->rlimits.limits[a].rlim_max = tmp2;
+ }
+ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+ return true;
+
+fail:
+ e->pos = pos;
+ return false;
+}
+
+static u32 strhash(const void *data, u32 len, u32 seed)
+{
+ const char * const *key = data;
+
+ return jhash(*key, strlen(*key), seed);
+}
+
+static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
+{
+ const struct aa_data *data = obj;
+ const char * const *key = arg->key;
+
+ return strcmp(data->key, *key);
+}
+
+/**
+ * unpack_profile - unpack a serialized profile
+ * @e: serialized data extent information (NOT NULL)
+ * @ns_name: pointer of newly allocated copy of %NULL in case of error
+ *
+ * NOTE: unpack profile sets audit struct if there is a failure
+ */
+static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+{
+ struct aa_profile *profile = NULL;
+ const char *tmpname, *tmpns = NULL, *name = NULL;
+ const char *info = "failed to unpack profile";
+ size_t ns_len;
+ struct rhashtable_params params = { 0 };
+ char *key = NULL, *disconnected = NULL;
+ struct aa_data *data;
+ int i, error = -EPROTO;
+ kernel_cap_t tmpcap;
+ u32 tmp;
+
+ *ns_name = NULL;
+
+ /* check that we have the right struct being passed */
+ if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
+ goto fail;
+ if (!aa_unpack_str(e, &name, NULL))
+ goto fail;
+ if (*name == '\0')
+ goto fail;
+
+ tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
+ if (tmpns) {
+ if (!tmpname) {
+ info = "empty profile name";
+ goto fail;
+ }
+ *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
+ if (!*ns_name) {
+ info = "out of memory";
+ goto fail;
+ }
+ name = tmpname;
+ }
+
+ profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
+ if (!profile)
+ return ERR_PTR(-ENOMEM);
+
+ /* profile renaming is optional */
+ (void) aa_unpack_str(e, &profile->rename, "rename");
+
+ /* attachment string is optional */
+ (void) aa_unpack_str(e, &profile->attach, "attach");
+
+ /* xmatch is optional and may be NULL */
+ profile->xmatch = unpack_dfa(e);
+ if (IS_ERR(profile->xmatch)) {
+ error = PTR_ERR(profile->xmatch);
+ profile->xmatch = NULL;
+ info = "bad xmatch";
+ goto fail;
+ }
+ /* xmatch_len is not optional if xmatch is set */
+ if (profile->xmatch) {
+ if (!aa_unpack_u32(e, &tmp, NULL)) {
+ info = "missing xmatch len";
+ goto fail;
+ }
+ profile->xmatch_len = tmp;
+ }
+
+ /* disconnected attachment string is optional */
+ (void) aa_unpack_strdup(e, &disconnected, "disconnected");
+ profile->disconnected = disconnected;
+
+ /* per profile debug flags (complain, audit) */
+ if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
+ info = "profile missing flags";
+ goto fail;
+ }
+ info = "failed to unpack profile flags";
+ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp & PACKED_FLAG_HAT)
+ profile->label.flags |= FLAG_HAT;
+ if (tmp & PACKED_FLAG_DEBUG1)
+ profile->label.flags |= FLAG_DEBUG1;
+ if (tmp & PACKED_FLAG_DEBUG2)
+ profile->label.flags |= FLAG_DEBUG2;
+ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
+ profile->mode = APPARMOR_COMPLAIN;
+ } else if (tmp == PACKED_MODE_ENFORCE) {
+ profile->mode = APPARMOR_ENFORCE;
+ } else if (tmp == PACKED_MODE_KILL) {
+ profile->mode = APPARMOR_KILL;
+ } else if (tmp == PACKED_MODE_UNCONFINED) {
+ profile->mode = APPARMOR_UNCONFINED;
+ profile->label.flags |= FLAG_UNCONFINED;
+ } else {
+ goto fail;
+ }
+ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp)
+ profile->audit = AUDIT_ALL;
+
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+ /* path_flags is optional */
+ if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
+ profile->path_flags |= profile->label.flags &
+ PATH_MEDIATE_DELETED;
+ else
+ /* set a default value if path_flags field is not present */
+ profile->path_flags = PATH_MEDIATE_DELETED;
+
+ info = "failed to unpack profile capabilities";
+ if (!aa_unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &tmpcap.cap[0], NULL))
+ goto fail;
+
+ info = "failed to unpack upper profile capabilities";
+ if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
+ /* optional upper half of 64 bit caps */
+ if (!aa_unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &(tmpcap.cap[1]), NULL))
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ info = "failed to unpack extended profile capabilities";
+ if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
+ /* optional extended caps mediation mask */
+ if (!aa_unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
+ goto fail;
+ if (!aa_unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ if (!unpack_xattrs(e, profile)) {
+ info = "failed to unpack profile xattrs";
+ goto fail;
+ }
+
+ if (!unpack_rlimits(e, profile)) {
+ info = "failed to unpack profile rlimits";
+ goto fail;
+ }
+
+ if (!unpack_secmark(e, profile)) {
+ info = "failed to unpack profile secmark rules";
+ goto fail;
+ }
+
+ if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
+ /* generic policy dfa - optional and may be NULL */
+ info = "failed to unpack policydb";
+ profile->policy.dfa = unpack_dfa(e);
+ if (IS_ERR(profile->policy.dfa)) {
+ error = PTR_ERR(profile->policy.dfa);
+ profile->policy.dfa = NULL;
+ goto fail;
+ } else if (!profile->policy.dfa) {
+ error = -EPROTO;
+ goto fail;
+ }
+ if (!aa_unpack_u32(e, &profile->policy.start[0], "start"))
+ /* default start state */
+ profile->policy.start[0] = DFA_START;
+ /* setup class index */
+ for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
+ profile->policy.start[i] =
+ aa_dfa_next(profile->policy.dfa,
+ profile->policy.start[0],
+ i);
+ }
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ } else
+ profile->policy.dfa = aa_get_dfa(nulldfa);
+
+ /* get file rules */
+ profile->file.dfa = unpack_dfa(e);
+ if (IS_ERR(profile->file.dfa)) {
+ error = PTR_ERR(profile->file.dfa);
+ profile->file.dfa = NULL;
+ info = "failed to unpack profile file rules";
+ goto fail;
+ } else if (profile->file.dfa) {
+ if (!aa_unpack_u32(e, &profile->file.start, "dfa_start"))
+ /* default start state */
+ profile->file.start = DFA_START;
+ } else if (profile->policy.dfa &&
+ profile->policy.start[AA_CLASS_FILE]) {
+ profile->file.dfa = aa_get_dfa(profile->policy.dfa);
+ profile->file.start = profile->policy.start[AA_CLASS_FILE];
+ } else
+ profile->file.dfa = aa_get_dfa(nulldfa);
+
+ if (!unpack_trans_table(e, profile)) {
+ info = "failed to unpack profile transition table";
+ goto fail;
+ }
+
+ if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
+ info = "out of memory";
+ profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
+ if (!profile->data)
+ goto fail;
+
+ params.nelem_hint = 3;
+ params.key_len = sizeof(void *);
+ params.key_offset = offsetof(struct aa_data, key);
+ params.head_offset = offsetof(struct aa_data, head);
+ params.hashfn = strhash;
+ params.obj_cmpfn = datacmp;
+
+ if (rhashtable_init(profile->data, &params)) {
+ info = "failed to init key, value hash table";
+ goto fail;
+ }
+
+ while (aa_unpack_strdup(e, &key, NULL)) {
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ kfree_sensitive(key);
+ goto fail;
+ }
+
+ data->key = key;
+ data->size = aa_unpack_blob(e, &data->data, NULL);
+ data->data = kvmemdup(data->data, data->size);
+ if (data->size && !data->data) {
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+ goto fail;
+ }
+
+ if (rhashtable_insert_fast(profile->data, &data->head,
+ profile->data->p)) {
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+ info = "failed to insert data to table";
+ goto fail;
+ }
+ }
+
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
+ info = "failed to unpack end of key, value data table";
+ goto fail;
+ }
+ }
+
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
+ info = "failed to unpack end of profile";
+ goto fail;
+ }
+
+ return profile;
+
+fail:
+ if (profile)
+ name = NULL;
+ else if (!name)
+ name = "unknown";
+ audit_iface(profile, NULL, name, info, e, error);
+ aa_free_profile(profile);
+
+ return ERR_PTR(error);
+}
+
+/**
+ * verify_header - unpack serialized stream header
+ * @e: serialized data read head (NOT NULL)
+ * @required: whether the header is required or optional
+ * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
+ *
+ * Returns: error or 0 if header is good
+ */
+static int verify_header(struct aa_ext *e, int required, const char **ns)
+{
+ int error = -EPROTONOSUPPORT;
+ const char *name = NULL;
+ *ns = NULL;
+
+ /* get the interface version */
+ if (!aa_unpack_u32(e, &e->version, "version")) {
+ if (required) {
+ audit_iface(NULL, NULL, NULL, "invalid profile format",
+ e, error);
+ return error;
+ }
+ }
+
+ /* Check that the interface version is currently supported.
+ * if not specified use previous version
+ * Mask off everything that is not kernel abi version
+ */
+ if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) {
+ audit_iface(NULL, NULL, NULL, "unsupported interface version",
+ e, error);
+ return error;
+ }
+
+ /* read the namespace if present */
+ if (aa_unpack_str(e, &name, "namespace")) {
+ if (*name == '\0') {
+ audit_iface(NULL, NULL, NULL, "invalid namespace name",
+ e, error);
+ return error;
+ }
+ if (*ns && strcmp(*ns, name)) {
+ audit_iface(NULL, NULL, NULL, "invalid ns change", e,
+ error);
+ } else if (!*ns) {
+ *ns = kstrdup(name, GFP_KERNEL);
+ if (!*ns)
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static bool verify_xindex(int xindex, int table_size)
+{
+ int index, xtype;
+ xtype = xindex & AA_X_TYPE_MASK;
+ index = xindex & AA_X_INDEX_MASK;
+ if (xtype == AA_X_TABLE && index >= table_size)
+ return false;
+ return true;
+}
+
+/* verify dfa xindexes are in range of transition tables */
+static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
+{
+ int i;
+ for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
+ if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
+ return false;
+ if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
+ return false;
+ }
+ return true;
+}
+
+/**
+ * verify_profile - Do post unpack analysis to verify profile consistency
+ * @profile: profile to verify (NOT NULL)
+ *
+ * Returns: 0 if passes verification else error
+ */
+static int verify_profile(struct aa_profile *profile)
+{
+ if (profile->file.dfa &&
+ !verify_dfa_xindex(profile->file.dfa,
+ profile->file.trans.size)) {
+ audit_iface(profile, NULL, NULL, "Invalid named transition",
+ NULL, -EPROTO);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+void aa_load_ent_free(struct aa_load_ent *ent)
+{
+ if (ent) {
+ aa_put_profile(ent->rename);
+ aa_put_profile(ent->old);
+ aa_put_profile(ent->new);
+ kfree(ent->ns_name);
+ kfree_sensitive(ent);
+ }
+}
+
+struct aa_load_ent *aa_load_ent_alloc(void)
+{
+ struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+ if (ent)
+ INIT_LIST_HEAD(&ent->list);
+ return ent;
+}
+
+static int deflate_compress(const char *src, size_t slen, char **dst,
+ size_t *dlen)
+{
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+ int error;
+ struct z_stream_s strm;
+ void *stgbuf, *dstbuf;
+ size_t stglen = deflateBound(slen);
+
+ memset(&strm, 0, sizeof(strm));
+
+ if (stglen < slen)
+ return -EFBIG;
+
+ strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
+ MAX_MEM_LEVEL),
+ GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
+
+ error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_deflate_init;
+ }
+
+ stgbuf = kvzalloc(stglen, GFP_KERNEL);
+ if (!stgbuf) {
+ error = -ENOMEM;
+ goto fail_stg_alloc;
+ }
+
+ strm.next_in = src;
+ strm.avail_in = slen;
+ strm.next_out = stgbuf;
+ strm.avail_out = stglen;
+
+ error = zlib_deflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END) {
+ error = -EINVAL;
+ goto fail_deflate;
+ }
+ error = 0;
+
+ if (is_vmalloc_addr(stgbuf)) {
+ dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
+ if (dstbuf) {
+ memcpy(dstbuf, stgbuf, strm.total_out);
+ kvfree(stgbuf);
+ }
+ } else
+ /*
+ * If the staging buffer was kmalloc'd, then using krealloc is
+ * probably going to be faster. The destination buffer will
+ * always be smaller, so it's just shrunk, avoiding a memcpy
+ */
+ dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
+
+ if (!dstbuf) {
+ error = -ENOMEM;
+ goto fail_deflate;
+ }
+
+ *dst = dstbuf;
+ *dlen = strm.total_out;
+
+fail_stg_alloc:
+ zlib_deflateEnd(&strm);
+fail_deflate_init:
+ kvfree(strm.workspace);
+ return error;
+
+fail_deflate:
+ kvfree(stgbuf);
+ goto fail_stg_alloc;
+#else
+ *dlen = slen;
+ return 0;
+#endif
+}
+
+static int compress_loaddata(struct aa_loaddata *data)
+{
+
+ AA_BUG(data->compressed_size > 0);
+
+ /*
+ * Shortcut the no compression case, else we increase the amount of
+ * storage required by a small amount
+ */
+ if (aa_g_rawdata_compression_level != 0) {
+ void *udata = data->data;
+ int error = deflate_compress(udata, data->size, &data->data,
+ &data->compressed_size);
+ if (error)
+ return error;
+
+ if (udata != data->data)
+ kvfree(udata);
+ } else
+ data->compressed_size = data->size;
+
+ return 0;
+}
+
+/**
+ * aa_unpack - unpack packed binary profile(s) data loaded from user space
+ * @udata: user data copied to kmem (NOT NULL)
+ * @lh: list to place unpacked profiles in a aa_repl_ws
+ * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
+ *
+ * Unpack user data and return refcounted allocated profile(s) stored in
+ * @lh in order of discovery, with the list chain stored in base.list
+ * or error
+ *
+ * Returns: profile(s) on @lh else error pointer if fails to unpack
+ */
+int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
+ const char **ns)
+{
+ struct aa_load_ent *tmp, *ent;
+ struct aa_profile *profile = NULL;
+ int error;
+ struct aa_ext e = {
+ .start = udata->data,
+ .end = udata->data + udata->size,
+ .pos = udata->data,
+ };
+
+ *ns = NULL;
+ while (e.pos < e.end) {
+ char *ns_name = NULL;
+ void *start;
+ error = verify_header(&e, e.pos == e.start, ns);
+ if (error)
+ goto fail;
+
+ start = e.pos;
+ profile = unpack_profile(&e, &ns_name);
+ if (IS_ERR(profile)) {
+ error = PTR_ERR(profile);
+ goto fail;
+ }
+
+ error = verify_profile(profile);
+ if (error)
+ goto fail_profile;
+
+ if (aa_g_hash_policy)
+ error = aa_calc_profile_hash(profile, e.version, start,
+ e.pos - start);
+ if (error)
+ goto fail_profile;
+
+ ent = aa_load_ent_alloc();
+ if (!ent) {
+ error = -ENOMEM;
+ goto fail_profile;
+ }
+
+ ent->new = profile;
+ ent->ns_name = ns_name;
+ list_add_tail(&ent->list, lh);
+ }
+ udata->abi = e.version & K_ABI_MASK;
+ if (aa_g_hash_policy) {
+ udata->hash = aa_calc_hash(udata->data, udata->size);
+ if (IS_ERR(udata->hash)) {
+ error = PTR_ERR(udata->hash);
+ udata->hash = NULL;
+ goto fail;
+ }
+ }
+
+ if (aa_g_export_binary) {
+ error = compress_loaddata(udata);
+ if (error)
+ goto fail;
+ }
+ return 0;
+
+fail_profile:
+ aa_put_profile(profile);
+
+fail:
+ list_for_each_entry_safe(ent, tmp, lh, list) {
+ list_del_init(&ent->list);
+ aa_load_ent_free(ent);
+ }
+
+ return error;
+}
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
new file mode 100644
index 000000000..f25cf2a02
--- /dev/null
+++ b/security/apparmor/policy_unpack_test.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for AppArmor's policy unpack.
+ */
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+
+#define TEST_STRING_NAME "TEST_STRING"
+#define TEST_STRING_DATA "testing"
+#define TEST_STRING_BUF_OFFSET \
+ (3 + strlen(TEST_STRING_NAME) + 1)
+
+#define TEST_U32_NAME "U32_TEST"
+#define TEST_U32_DATA ((u32)0x01020304)
+#define TEST_NAMED_U32_BUF_OFFSET \
+ (TEST_STRING_BUF_OFFSET + 3 + strlen(TEST_STRING_DATA) + 1)
+#define TEST_U32_BUF_OFFSET \
+ (TEST_NAMED_U32_BUF_OFFSET + 3 + strlen(TEST_U32_NAME) + 1)
+
+#define TEST_U16_OFFSET (TEST_U32_BUF_OFFSET + 3)
+#define TEST_U16_DATA ((u16)(TEST_U32_DATA >> 16))
+
+#define TEST_U64_NAME "U64_TEST"
+#define TEST_U64_DATA ((u64)0x0102030405060708)
+#define TEST_NAMED_U64_BUF_OFFSET (TEST_U32_BUF_OFFSET + sizeof(u32) + 1)
+#define TEST_U64_BUF_OFFSET \
+ (TEST_NAMED_U64_BUF_OFFSET + 3 + strlen(TEST_U64_NAME) + 1)
+
+#define TEST_BLOB_NAME "BLOB_TEST"
+#define TEST_BLOB_DATA "\xde\xad\x00\xbe\xef"
+#define TEST_BLOB_DATA_SIZE (ARRAY_SIZE(TEST_BLOB_DATA))
+#define TEST_NAMED_BLOB_BUF_OFFSET (TEST_U64_BUF_OFFSET + sizeof(u64) + 1)
+#define TEST_BLOB_BUF_OFFSET \
+ (TEST_NAMED_BLOB_BUF_OFFSET + 3 + strlen(TEST_BLOB_NAME) + 1)
+
+#define TEST_ARRAY_NAME "ARRAY_TEST"
+#define TEST_ARRAY_SIZE 16
+#define TEST_NAMED_ARRAY_BUF_OFFSET \
+ (TEST_BLOB_BUF_OFFSET + 5 + TEST_BLOB_DATA_SIZE)
+#define TEST_ARRAY_BUF_OFFSET \
+ (TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+struct policy_unpack_fixture {
+ struct aa_ext *e;
+ size_t e_size;
+};
+
+static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ struct kunit *test, size_t buf_size)
+{
+ char *buf;
+ struct aa_ext *e;
+
+ buf = kunit_kzalloc(test, buf_size, GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, buf);
+
+ e = kunit_kmalloc(test, sizeof(*e), GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, e);
+
+ e->start = buf;
+ e->end = e->start + buf_size;
+ e->pos = e->start;
+
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_STRING_NAME) + 1;
+ strcpy(buf + 3, TEST_STRING_NAME);
+
+ buf = e->start + TEST_STRING_BUF_OFFSET;
+ *buf = AA_STRING;
+ *(buf + 1) = strlen(TEST_STRING_DATA) + 1;
+ strcpy(buf + 3, TEST_STRING_DATA);
+
+ buf = e->start + TEST_NAMED_U32_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_U32_NAME) + 1;
+ strcpy(buf + 3, TEST_U32_NAME);
+ *(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
+ *((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
+
+ buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_U64_NAME) + 1;
+ strcpy(buf + 3, TEST_U64_NAME);
+ *(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
+ *((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
+
+ buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_BLOB_NAME) + 1;
+ strcpy(buf + 3, TEST_BLOB_NAME);
+ *(buf + 3 + strlen(TEST_BLOB_NAME) + 1) = AA_BLOB;
+ *(buf + 3 + strlen(TEST_BLOB_NAME) + 2) = TEST_BLOB_DATA_SIZE;
+ memcpy(buf + 3 + strlen(TEST_BLOB_NAME) + 6,
+ TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE);
+
+ buf = e->start + TEST_NAMED_ARRAY_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
+ strcpy(buf + 3, TEST_ARRAY_NAME);
+ *(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
+ *((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
+
+ return e;
+}
+
+static int policy_unpack_test_init(struct kunit *test)
+{
+ size_t e_size = TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1;
+ struct policy_unpack_fixture *puf;
+
+ puf = kunit_kmalloc(test, sizeof(*puf), GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, puf);
+
+ puf->e_size = e_size;
+ puf->e = build_aa_ext_struct(puf, test, e_size);
+
+ test->priv = puf;
+ return 0;
+}
+
+static void policy_unpack_test_inbounds_when_inbounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+
+ KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, 0));
+ KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size / 2));
+ KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size));
+}
+
+static void policy_unpack_test_inbounds_when_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+
+ KUNIT_EXPECT_FALSE(test, aa_inbounds(puf->e, puf->e_size + 1));
+}
+
+static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ u16 array_size;
+
+ puf->e->pos += TEST_ARRAY_BUF_OFFSET;
+
+ array_size = aa_unpack_array(puf->e, NULL);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
+}
+
+static void policy_unpack_test_unpack_array_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_ARRAY_NAME;
+ u16 array_size;
+
+ puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+
+ array_size = aa_unpack_array(puf->e, name);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
+}
+
+static void policy_unpack_test_unpack_array_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_ARRAY_NAME;
+ u16 array_size;
+
+ puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16);
+
+ array_size = aa_unpack_array(puf->e, name);
+
+ KUNIT_EXPECT_EQ(test, array_size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_ARRAY_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_blob_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *blob = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_BLOB_BUF_OFFSET;
+ size = aa_unpack_blob(puf->e, &blob, NULL);
+
+ KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ KUNIT_EXPECT_TRUE(test,
+ memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
+}
+
+static void policy_unpack_test_unpack_blob_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *blob = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+ size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+ KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ KUNIT_EXPECT_TRUE(test,
+ memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
+}
+
+static void policy_unpack_test_unpack_blob_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *blob = NULL;
+ void *start;
+ int size;
+
+ puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+ start = puf->e->pos;
+ puf->e->end = puf->e->start + TEST_BLOB_BUF_OFFSET
+ + TEST_BLOB_DATA_SIZE - 1;
+
+ size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_str_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char *string = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_STRING_BUF_OFFSET;
+ size = aa_unpack_str(puf->e, &string, NULL);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_str_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char *string = NULL;
+ size_t size;
+
+ size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_str_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char *string = NULL;
+ void *start = puf->e->pos;
+ int size;
+
+ puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ + strlen(TEST_STRING_DATA) - 1;
+
+ size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *string = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_STRING_BUF_OFFSET;
+ size = aa_unpack_strdup(puf->e, &string, NULL);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_FALSE(test,
+ ((uintptr_t)puf->e->start <= (uintptr_t)string)
+ && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *string = NULL;
+ size_t size;
+
+ size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_FALSE(test,
+ ((uintptr_t)puf->e->start <= (uintptr_t)string)
+ && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ void *start = puf->e->pos;
+ char *string = NULL;
+ int size;
+
+ puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ + strlen(TEST_STRING_DATA) - 1;
+
+ size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_NULL(test, string);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+ success = aa_unpack_nameX(puf->e, AA_U32, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + 1);
+}
+
+static void policy_unpack_test_unpack_nameX_with_wrong_code(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+ success = aa_unpack_nameX(puf->e, AA_BLOB, NULL);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_nameX_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U32_NAME;
+ bool success;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+ success = aa_unpack_nameX(puf->e, AA_U32, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + 1);
+}
+
+static void policy_unpack_test_unpack_nameX_with_wrong_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ static const char name[] = "12345678";
+ bool success;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+ success = aa_unpack_nameX(puf->e, AA_U32, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u16_chunk_basic(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *chunk = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_U16_OFFSET;
+ /*
+ * WARNING: For unit testing purposes, we're pushing puf->e->end past
+ * the end of the allocated memory. Doing anything other than comparing
+ * memory addresses is dangerous.
+ */
+ puf->e->end += TEST_U16_DATA;
+
+ size = aa_unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_PTR_EQ(test, chunk,
+ puf->e->start + TEST_U16_OFFSET + 2);
+ KUNIT_EXPECT_EQ(test, size, TEST_U16_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, (chunk + TEST_U16_DATA));
+}
+
+static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_1(
+ struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *chunk = NULL;
+ size_t size;
+
+ puf->e->pos = puf->e->end - 1;
+
+ size = aa_unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_NULL(test, chunk);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->end - 1);
+}
+
+static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_2(
+ struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *chunk = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_U16_OFFSET;
+ /*
+ * WARNING: For unit testing purposes, we're pushing puf->e->end past
+ * the end of the allocated memory. Doing anything other than comparing
+ * memory addresses is dangerous.
+ */
+ puf->e->end = puf->e->pos + TEST_U16_DATA - 1;
+
+ size = aa_unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_NULL(test, chunk);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->start + TEST_U16_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+ u32 data = 0;
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+ success = aa_unpack_u32(puf->e, &data, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
+}
+
+static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U32_NAME;
+ bool success;
+ u32 data = 0;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+ success = aa_unpack_u32(puf->e, &data, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
+}
+
+static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U32_NAME;
+ bool success;
+ u32 data = 0;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
+
+ success = aa_unpack_u32(puf->e, &data, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+ u64 data = 0;
+
+ puf->e->pos += TEST_U64_BUF_OFFSET;
+
+ success = aa_unpack_u64(puf->e, &data, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
+}
+
+static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U64_NAME;
+ bool success;
+ u64 data = 0;
+
+ puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+
+ success = aa_unpack_u64(puf->e, &data, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
+}
+
+static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U64_NAME;
+ bool success;
+ u64 data = 0;
+
+ puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
+
+ success = aa_unpack_u64(puf->e, &data, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_U64_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success = aa_unpack_X(puf->e, AA_NAME);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start + 1);
+}
+
+static void policy_unpack_test_unpack_X_code_mismatch(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success = aa_unpack_X(puf->e, AA_STRING);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start);
+}
+
+static void policy_unpack_test_unpack_X_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+
+ puf->e->pos = puf->e->end;
+ success = aa_unpack_X(puf->e, AA_NAME);
+
+ KUNIT_EXPECT_FALSE(test, success);
+}
+
+static struct kunit_case apparmor_policy_unpack_test_cases[] = {
+ KUNIT_CASE(policy_unpack_test_inbounds_when_inbounds),
+ KUNIT_CASE(policy_unpack_test_inbounds_when_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_array_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_array_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_array_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_blob_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_blob_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_blob_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_code),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_name),
+ KUNIT_CASE(policy_unpack_test_unpack_str_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_str_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_str_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_strdup_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_strdup_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_strdup_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_basic),
+ KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_1),
+ KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_2),
+ KUNIT_CASE(policy_unpack_test_unpack_u32_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u32_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u32_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_u64_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u64_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u64_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_X_code_match),
+ KUNIT_CASE(policy_unpack_test_unpack_X_code_mismatch),
+ KUNIT_CASE(policy_unpack_test_unpack_X_out_of_bounds),
+ {},
+};
+
+static struct kunit_suite apparmor_policy_unpack_test_module = {
+ .name = "apparmor_policy_unpack",
+ .init = policy_unpack_test_init,
+ .test_cases = apparmor_policy_unpack_test_cases,
+};
+
+kunit_test_suite(apparmor_policy_unpack_test_module);
+
+MODULE_LICENSE("GPL");
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
new file mode 100644
index 000000000..86ad26ef7
--- /dev/null
+++ b/security/apparmor/procattr.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /proc/<pid>/attr/ interface functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include "include/apparmor.h"
+#include "include/cred.h"
+#include "include/policy.h"
+#include "include/policy_ns.h"
+#include "include/domain.h"
+#include "include/procattr.h"
+
+
+/**
+ * aa_getprocattr - Return the profile information for @profile
+ * @profile: the profile to print profile info about (NOT NULL)
+ * @string: Returns - string containing the profile info (NOT NULL)
+ *
+ * Requires: profile != NULL
+ *
+ * Creates a string containing the namespace_name://profile_name for
+ * @profile.
+ *
+ * Returns: size of string placed in @string else error code on failure
+ */
+int aa_getprocattr(struct aa_label *label, char **string)
+{
+ struct aa_ns *ns = labels_ns(label);
+ struct aa_ns *current_ns = aa_get_current_ns();
+ int len;
+
+ if (!aa_ns_visible(current_ns, ns, true)) {
+ aa_put_ns(current_ns);
+ return -EACCES;
+ }
+
+ len = aa_label_snxprint(NULL, 0, current_ns, label,
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
+ FLAG_HIDDEN_UNCONFINED);
+ AA_BUG(len < 0);
+
+ *string = kmalloc(len + 2, GFP_KERNEL);
+ if (!*string) {
+ aa_put_ns(current_ns);
+ return -ENOMEM;
+ }
+
+ len = aa_label_snxprint(*string, len + 2, current_ns, label,
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
+ FLAG_HIDDEN_UNCONFINED);
+ if (len < 0) {
+ aa_put_ns(current_ns);
+ return len;
+ }
+
+ (*string)[len] = '\n';
+ (*string)[len + 1] = 0;
+
+ aa_put_ns(current_ns);
+ return len + 1;
+}
+
+/**
+ * split_token_from_name - separate a string of form <token>^<name>
+ * @op: operation being checked
+ * @args: string to parse (NOT NULL)
+ * @token: stores returned parsed token value (NOT NULL)
+ *
+ * Returns: start position of name after token else NULL on failure
+ */
+static char *split_token_from_name(const char *op, char *args, u64 *token)
+{
+ char *name;
+
+ *token = simple_strtoull(args, &name, 16);
+ if ((name == args) || *name != '^') {
+ AA_ERROR("%s: Invalid input '%s'", op, args);
+ return ERR_PTR(-EINVAL);
+ }
+
+ name++; /* skip ^ */
+ if (!*name)
+ name = NULL;
+ return name;
+}
+
+/**
+ * aa_setprocattr_changehat - handle procattr interface to change_hat
+ * @args: args received from writing to /proc/<pid>/attr/current (NOT NULL)
+ * @size: size of the args
+ * @flags: set of flags governing behavior
+ *
+ * Returns: %0 or error code if change_hat fails
+ */
+int aa_setprocattr_changehat(char *args, size_t size, int flags)
+{
+ char *hat;
+ u64 token;
+ const char *hats[16]; /* current hard limit on # of names */
+ int count = 0;
+
+ hat = split_token_from_name(OP_CHANGE_HAT, args, &token);
+ if (IS_ERR(hat))
+ return PTR_ERR(hat);
+
+ if (!hat && !token) {
+ AA_ERROR("change_hat: Invalid input, NULL hat and NULL magic");
+ return -EINVAL;
+ }
+
+ if (hat) {
+ /* set up hat name vector, args guaranteed null terminated
+ * at args[size] by setprocattr.
+ *
+ * If there are multiple hat names in the buffer each is
+ * separated by a \0. Ie. userspace writes them pre tokenized
+ */
+ char *end = args + size;
+ for (count = 0; (hat < end) && count < 16; ++count) {
+ char *next = hat + strlen(hat) + 1;
+ hats[count] = hat;
+ AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d hat '%s'\n"
+ , __func__, current->pid, token, count, hat);
+ hat = next;
+ }
+ } else
+ AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n",
+ __func__, current->pid, token, count, "<NULL>");
+
+ return aa_change_hat(hats, count, token, flags);
+}
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
new file mode 100644
index 000000000..1ae487425
--- /dev/null
+++ b/security/apparmor/resource.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor resource mediation and attachment
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ */
+
+#include <linux/audit.h>
+#include <linux/security.h>
+
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/resource.h"
+#include "include/policy.h"
+
+/*
+ * Table of rlimit names: we generate it from resource.h.
+ */
+#include "rlim_names.h"
+
+struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
+ AA_SFS_FILE_STRING("mask", AA_SFS_RLIMIT_MASK),
+ { }
+};
+
+/* audit callback for resource specific fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ audit_log_format(ab, " rlimit=%s value=%lu",
+ rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
+ if (aad(sa)->peer) {
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+}
+
+/**
+ * audit_resource - audit setting resource limit
+ * @profile: profile being enforced (NOT NULL)
+ * @resource: rlimit being auditing
+ * @value: value being set
+ * @error: error value
+ *
+ * Returns: 0 or sa->error else other error code on failure
+ */
+static int audit_resource(struct aa_profile *profile, unsigned int resource,
+ unsigned long value, struct aa_label *peer,
+ const char *info, int error)
+{
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SETRLIMIT);
+
+ aad(&sa)->rlim.rlim = resource;
+ aad(&sa)->rlim.max = value;
+ aad(&sa)->peer = peer;
+ aad(&sa)->info = info;
+ aad(&sa)->error = error;
+
+ return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
+}
+
+/**
+ * aa_map_resouce - map compiled policy resource to internal #
+ * @resource: flattened policy resource number
+ *
+ * Returns: resource # for the current architecture.
+ *
+ * rlimit resource can vary based on architecture, map the compiled policy
+ * resource # to the internal representation for the architecture.
+ */
+int aa_map_resource(int resource)
+{
+ return rlim_map[resource];
+}
+
+static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
+ struct rlimit *new_rlim)
+{
+ int e = 0;
+
+ if (profile->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
+ profile->rlimits.limits[resource].rlim_max)
+ e = -EACCES;
+ return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
+ e);
+}
+
+/**
+ * aa_task_setrlimit - test permission to set an rlimit
+ * @label - label confining the task (NOT NULL)
+ * @task - task the resource is being set on
+ * @resource - the resource being set
+ * @new_rlim - the new resource limit (NOT NULL)
+ *
+ * Control raising the processes hard limit.
+ *
+ * Returns: 0 or error code if setting resource failed
+ */
+int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
+{
+ struct aa_profile *profile;
+ struct aa_label *peer;
+ int error = 0;
+
+ rcu_read_lock();
+ peer = aa_get_newest_cred_label(__task_cred(task));
+ rcu_read_unlock();
+
+ /* TODO: extend resource control to handle other (non current)
+ * profiles. AppArmor rules currently have the implicit assumption
+ * that the task is setting the resource of a task confined with
+ * the same profile or that the task setting the resource of another
+ * task has CAP_SYS_RESOURCE.
+ */
+
+ if (label != peer &&
+ aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
+ error = fn_for_each(label, profile,
+ audit_resource(profile, resource,
+ new_rlim->rlim_max, peer,
+ "cap_sys_resource", -EACCES));
+ else
+ error = fn_for_each_confined(label, profile,
+ profile_setrlimit(profile, resource, new_rlim));
+ aa_put_label(peer);
+
+ return error;
+}
+
+/**
+ * __aa_transition_rlimits - apply new profile rlimits
+ * @old_l: old label on task (NOT NULL)
+ * @new_l: new label with rlimits to apply (NOT NULL)
+ */
+void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l)
+{
+ unsigned int mask = 0;
+ struct rlimit *rlim, *initrlim;
+ struct aa_profile *old, *new;
+ struct label_it i;
+
+ old = labels_profile(old_l);
+ new = labels_profile(new_l);
+
+ /* for any rlimits the profile controlled, reset the soft limit
+ * to the lesser of the tasks hard limit and the init tasks soft limit
+ */
+ label_for_each_confined(i, old_l, old) {
+ if (old->rlimits.mask) {
+ int j;
+
+ for (j = 0, mask = 1; j < RLIM_NLIMITS; j++,
+ mask <<= 1) {
+ if (old->rlimits.mask & mask) {
+ rlim = current->signal->rlim + j;
+ initrlim = init_task.signal->rlim + j;
+ rlim->rlim_cur = min(rlim->rlim_max,
+ initrlim->rlim_cur);
+ }
+ }
+ }
+ }
+
+ /* set any new hard limits as dictated by the new profile */
+ label_for_each_confined(i, new_l, new) {
+ int j;
+
+ if (!new->rlimits.mask)
+ continue;
+ for (j = 0, mask = 1; j < RLIM_NLIMITS; j++, mask <<= 1) {
+ if (!(new->rlimits.mask & mask))
+ continue;
+
+ rlim = current->signal->rlim + j;
+ rlim->rlim_max = min(rlim->rlim_max,
+ new->rlimits.limits[j].rlim_max);
+ /* soft limit should not exceed hard limit */
+ rlim->rlim_cur = min(rlim->rlim_cur, rlim->rlim_max);
+ }
+ }
+}
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
new file mode 100644
index 000000000..24a0e23f1
--- /dev/null
+++ b/security/apparmor/secid.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security identifier (secid) manipulation fns
+ *
+ * Copyright 2009-2017 Canonical Ltd.
+ *
+ * AppArmor allocates a unique secid for every label used. If a label
+ * is replaced it receives the secid of the label it is replacing.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+#include "include/cred.h"
+#include "include/lib.h"
+#include "include/secid.h"
+#include "include/label.h"
+#include "include/policy_ns.h"
+
+/*
+ * secids - do not pin labels with a refcount. They rely on the label
+ * properly updating/freeing them
+ */
+#define AA_FIRST_SECID 2
+
+static DEFINE_XARRAY_FLAGS(aa_secids, XA_FLAGS_LOCK_IRQ | XA_FLAGS_TRACK_FREE);
+
+int apparmor_display_secid_mode;
+
+/*
+ * TODO: allow policy to reserve a secid range?
+ * TODO: add secid pinning
+ * TODO: use secid_update in label replace
+ */
+
+/**
+ * aa_secid_update - update a secid mapping to a new label
+ * @secid: secid to update
+ * @label: label the secid will now map to
+ */
+void aa_secid_update(u32 secid, struct aa_label *label)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&aa_secids, flags);
+ __xa_store(&aa_secids, secid, label, 0);
+ xa_unlock_irqrestore(&aa_secids, flags);
+}
+
+/**
+ *
+ * see label for inverse aa_label_to_secid
+ */
+struct aa_label *aa_secid_to_label(u32 secid)
+{
+ return xa_load(&aa_secids, secid);
+}
+
+int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ /* TODO: cache secctx and ref count so we don't have to recreate */
+ struct aa_label *label = aa_secid_to_label(secid);
+ int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT;
+ int len;
+
+ AA_BUG(!seclen);
+
+ if (!label)
+ return -EINVAL;
+
+ if (apparmor_display_secid_mode)
+ flags |= FLAG_SHOW_MODE;
+
+ if (secdata)
+ len = aa_label_asxprint(secdata, root_ns, label,
+ flags, GFP_ATOMIC);
+ else
+ len = aa_label_snxprint(NULL, 0, root_ns, label, flags);
+
+ if (len < 0)
+ return -ENOMEM;
+
+ *seclen = len;
+
+ return 0;
+}
+
+int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+ struct aa_label *label;
+
+ label = aa_label_strn_parse(&root_ns->unconfined->label, secdata,
+ seclen, GFP_KERNEL, false, false);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+ *secid = label->secid;
+
+ return 0;
+}
+
+void apparmor_release_secctx(char *secdata, u32 seclen)
+{
+ kfree(secdata);
+}
+
+/**
+ * aa_alloc_secid - allocate a new secid for a profile
+ * @label: the label to allocate a secid for
+ * @gfp: memory allocation flags
+ *
+ * Returns: 0 with @label->secid initialized
+ * <0 returns error with @label->secid set to AA_SECID_INVALID
+ */
+int aa_alloc_secid(struct aa_label *label, gfp_t gfp)
+{
+ unsigned long flags;
+ int ret;
+
+ xa_lock_irqsave(&aa_secids, flags);
+ ret = __xa_alloc(&aa_secids, &label->secid, label,
+ XA_LIMIT(AA_FIRST_SECID, INT_MAX), gfp);
+ xa_unlock_irqrestore(&aa_secids, flags);
+
+ if (ret < 0) {
+ label->secid = AA_SECID_INVALID;
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * aa_free_secid - free a secid
+ * @secid: secid to free
+ */
+void aa_free_secid(u32 secid)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&aa_secids, flags);
+ __xa_erase(&aa_secids, secid);
+ xa_unlock_irqrestore(&aa_secids, flags);
+}
diff --git a/security/apparmor/stacksplitdfa.in b/security/apparmor/stacksplitdfa.in
new file mode 100644
index 000000000..4bddd10b6
--- /dev/null
+++ b/security/apparmor/stacksplitdfa.in
@@ -0,0 +1,114 @@
+/* 0x1 [^\000]*[^/\000]//& */ 0x1B, 0x5E, 0x78, 0x3D, 0x00, 0x00,
+0x00, 0x18, 0x00, 0x00, 0x04, 0xD8, 0x00, 0x00, 0x6E, 0x6F, 0x74,
+0x66, 0x6C, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x02, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x02, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02,
+0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x08, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x05, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x00,
+0x04, 0x00, 0x01, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00,
+0x02, 0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04,
+0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
new file mode 100644
index 000000000..503dc0877
--- /dev/null
+++ b/security/apparmor/task.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor task related definitions and mediation
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * TODO
+ * If a task uses change_hat it currently does not return to the old
+ * cred or task context but instead creates a new one. Ideally the task
+ * should return to the previous cred if it has not been modified.
+ */
+
+#include <linux/gfp.h>
+#include <linux/ptrace.h>
+
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/policy.h"
+#include "include/task.h"
+
+/**
+ * aa_get_task_label - Get another task's label
+ * @task: task to query (NOT NULL)
+ *
+ * Returns: counted reference to @task's label
+ */
+struct aa_label *aa_get_task_label(struct task_struct *task)
+{
+ struct aa_label *p;
+
+ rcu_read_lock();
+ p = aa_get_newest_label(__aa_task_raw_label(task));
+ rcu_read_unlock();
+
+ return p;
+}
+
+/**
+ * aa_replace_current_label - replace the current tasks label
+ * @label: new label (NOT NULL)
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_replace_current_label(struct aa_label *label)
+{
+ struct aa_label *old = aa_current_raw_label();
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct cred *new;
+
+ AA_BUG(!label);
+
+ if (old == label)
+ return 0;
+
+ if (current_cred() != current_real_cred())
+ return -EBUSY;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ if (ctx->nnp && label_is_stale(ctx->nnp)) {
+ struct aa_label *tmp = ctx->nnp;
+
+ ctx->nnp = aa_get_newest_label(tmp);
+ aa_put_label(tmp);
+ }
+ if (unconfined(label) || (labels_ns(old) != labels_ns(label)))
+ /*
+ * if switching to unconfined or a different label namespace
+ * clear out context state
+ */
+ aa_clear_task_ctx_trans(task_ctx(current));
+
+ /*
+ * be careful switching cred label, when racing replacement it
+ * is possible that the cred labels's->proxy->label is the reference
+ * keeping @label valid, so make sure to get its reference before
+ * dropping the reference on the cred's label
+ */
+ aa_get_label(label);
+ aa_put_label(cred_label(new));
+ set_cred_label(new, label);
+
+ commit_creds(new);
+ return 0;
+}
+
+
+/**
+ * aa_set_current_onexec - set the tasks change_profile to happen onexec
+ * @label: system label to set at exec (MAYBE NULL to clear value)
+ * @stack: whether stacking should be done
+ * Returns: 0 or error on failure
+ */
+int aa_set_current_onexec(struct aa_label *label, bool stack)
+{
+ struct aa_task_ctx *ctx = task_ctx(current);
+
+ aa_get_label(label);
+ aa_put_label(ctx->onexec);
+ ctx->onexec = label;
+ ctx->token = stack;
+
+ return 0;
+}
+
+/**
+ * aa_set_current_hat - set the current tasks hat
+ * @label: label to set as the current hat (NOT NULL)
+ * @token: token value that must be specified to change from the hat
+ *
+ * Do switch of tasks hat. If the task is currently in a hat
+ * validate the token to match.
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_set_current_hat(struct aa_label *label, u64 token)
+{
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct cred *new;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ AA_BUG(!label);
+
+ if (!ctx->previous) {
+ /* transfer refcount */
+ ctx->previous = cred_label(new);
+ ctx->token = token;
+ } else if (ctx->token == token) {
+ aa_put_label(cred_label(new));
+ } else {
+ /* previous_profile && ctx->token != token */
+ abort_creds(new);
+ return -EACCES;
+ }
+
+ set_cred_label(new, aa_get_newest_label(label));
+ /* clear exec on switching context */
+ aa_put_label(ctx->onexec);
+ ctx->onexec = NULL;
+
+ commit_creds(new);
+ return 0;
+}
+
+/**
+ * aa_restore_previous_label - exit from hat context restoring previous label
+ * @token: the token that must be matched to exit hat context
+ *
+ * Attempt to return out of a hat to the previous label. The token
+ * must match the stored token value.
+ *
+ * Returns: 0 or error of failure
+ */
+int aa_restore_previous_label(u64 token)
+{
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct cred *new;
+
+ if (ctx->token != token)
+ return -EACCES;
+ /* ignore restores when there is no saved label */
+ if (!ctx->previous)
+ return 0;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ aa_put_label(cred_label(new));
+ set_cred_label(new, aa_get_newest_label(ctx->previous));
+ AA_BUG(!cred_label(new));
+ /* clear exec && prev information when restoring to previous context */
+ aa_clear_task_ctx_trans(ctx);
+
+ commit_creds(new);
+
+ return 0;
+}
+
+/**
+ * audit_ptrace_mask - convert mask to permission string
+ * @mask: permission mask to convert
+ *
+ * Returns: pointer to static string
+ */
+static const char *audit_ptrace_mask(u32 mask)
+{
+ switch (mask) {
+ case MAY_READ:
+ return "read";
+ case MAY_WRITE:
+ return "trace";
+ case AA_MAY_BE_READ:
+ return "readby";
+ case AA_MAY_BE_TRACED:
+ return "tracedby";
+ }
+ return "";
+}
+
+/* call back to audit ptrace fields */
+static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+ audit_ptrace_mask(aad(sa)->request));
+
+ if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+ audit_ptrace_mask(aad(sa)->denied));
+ }
+ }
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+}
+
+/* assumes check for PROFILE_MEDIATES is already done */
+/* TODO: conditionals */
+static int profile_ptrace_perm(struct aa_profile *profile,
+ struct aa_label *peer, u32 request,
+ struct common_audit_data *sa)
+{
+ struct aa_perms perms = { };
+
+ aad(sa)->peer = peer;
+ aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
+ &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+ return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
+}
+
+static int profile_tracee_perm(struct aa_profile *tracee,
+ struct aa_label *tracer, u32 request,
+ struct common_audit_data *sa)
+{
+ if (profile_unconfined(tracee) || unconfined(tracer) ||
+ !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
+ return 0;
+
+ return profile_ptrace_perm(tracee, tracer, request, sa);
+}
+
+static int profile_tracer_perm(struct aa_profile *tracer,
+ struct aa_label *tracee, u32 request,
+ struct common_audit_data *sa)
+{
+ if (profile_unconfined(tracer))
+ return 0;
+
+ if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
+ return profile_ptrace_perm(tracer, tracee, request, sa);
+
+ /* profile uses the old style capability check for ptrace */
+ if (&tracer->label == tracee)
+ return 0;
+
+ aad(sa)->label = &tracer->label;
+ aad(sa)->peer = tracee;
+ aad(sa)->request = 0;
+ aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
+ CAP_OPT_NONE);
+
+ return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
+}
+
+/**
+ * aa_may_ptrace - test if tracer task can trace the tracee
+ * @tracer: label of the task doing the tracing (NOT NULL)
+ * @tracee: task label to be traced
+ * @request: permission request
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+ u32 request)
+{
+ struct aa_profile *profile;
+ u32 xrequest = request << PTRACE_PERM_SHIFT;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
+
+ return xcheck_labels(tracer, tracee, profile,
+ profile_tracer_perm(profile, tracee, request, &sa),
+ profile_tracee_perm(profile, tracer, xrequest, &sa));
+}
diff --git a/security/bpf/Makefile b/security/bpf/Makefile
new file mode 100644
index 000000000..c7a89a962
--- /dev/null
+++ b/security/bpf/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2020 Google LLC.
+
+obj-$(CONFIG_BPF_LSM) := hooks.o
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
new file mode 100644
index 000000000..e5971fa74
--- /dev/null
+++ b/security/bpf/hooks.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+#include <linux/lsm_hooks.h>
+#include <linux/bpf_lsm.h>
+
+static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ LSM_HOOK_INIT(NAME, bpf_lsm_##NAME),
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+ LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free),
+ LSM_HOOK_INIT(task_free, bpf_task_storage_free),
+};
+
+static int __init bpf_lsm_init(void)
+{
+ security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
+ pr_info("LSM support for eBPF active\n");
+ return 0;
+}
+
+struct lsm_blob_sizes bpf_lsm_blob_sizes __lsm_ro_after_init = {
+ .lbs_inode = sizeof(struct bpf_storage_blob),
+ .lbs_task = sizeof(struct bpf_storage_blob),
+};
+
+DEFINE_LSM(bpf) = {
+ .name = "bpf",
+ .init = bpf_lsm_init,
+ .blobs = &bpf_lsm_blob_sizes
+};
diff --git a/security/commoncap.c b/security/commoncap.c
new file mode 100644
index 000000000..bc751fa5a
--- /dev/null
+++ b/security/commoncap.c
@@ -0,0 +1,1485 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Common capabilities, needed by capability.o.
+ */
+
+#include <linux/capability.h>
+#include <linux/audit.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/lsm_hooks.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/ptrace.h>
+#include <linux/xattr.h>
+#include <linux/hugetlb.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/prctl.h>
+#include <linux/securebits.h>
+#include <linux/user_namespace.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/mnt_idmapping.h>
+
+/*
+ * If a non-root user executes a setuid-root binary in
+ * !secure(SECURE_NOROOT) mode, then we raise capabilities.
+ * However if fE is also set, then the intent is for only
+ * the file capabilities to be applied, and the setuid-root
+ * bit is left on either to change the uid (plausible) or
+ * to get full privilege on a kernel without file capabilities
+ * support. So in that case we do not raise capabilities.
+ *
+ * Warn if that happens, once per boot.
+ */
+static void warn_setuid_and_fcaps_mixed(const char *fname)
+{
+ static int warned;
+ if (!warned) {
+ printk(KERN_INFO "warning: `%s' has both setuid-root and"
+ " effective capabilities. Therefore not raising all"
+ " capabilities.\n", fname);
+ warned = 1;
+ }
+}
+
+/**
+ * cap_capable - Determine whether a task has a particular effective capability
+ * @cred: The credentials to use
+ * @targ_ns: The user namespace in which we need the capability
+ * @cap: The capability to check for
+ * @opts: Bitmask of options defined in include/linux/security.h
+ *
+ * Determine whether the nominated task has the specified capability amongst
+ * its effective set, returning 0 if it does, -ve if it does not.
+ *
+ * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
+ * and has_capability() functions. That is, it has the reverse semantics:
+ * cap_has_capability() returns 0 when a task has a capability, but the
+ * kernel's capable() and has_capability() returns 1 for this case.
+ */
+int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
+ int cap, unsigned int opts)
+{
+ struct user_namespace *ns = targ_ns;
+
+ /* See if cred has the capability in the target user namespace
+ * by examining the target user namespace and all of the target
+ * user namespace's parents.
+ */
+ for (;;) {
+ /* Do we have the necessary capabilities? */
+ if (ns == cred->user_ns)
+ return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
+
+ /*
+ * If we're already at a lower level than we're looking for,
+ * we're done searching.
+ */
+ if (ns->level <= cred->user_ns->level)
+ return -EPERM;
+
+ /*
+ * The owner of the user namespace in the parent of the
+ * user namespace has all caps.
+ */
+ if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+ return 0;
+
+ /*
+ * If you have a capability in a parent user ns, then you have
+ * it over all children user namespaces as well.
+ */
+ ns = ns->parent;
+ }
+
+ /* We never get here */
+}
+
+/**
+ * cap_settime - Determine whether the current process may set the system clock
+ * @ts: The time to set
+ * @tz: The timezone to set
+ *
+ * Determine whether the current process may set the system clock and timezone
+ * information, returning 0 if permission granted, -ve if denied.
+ */
+int cap_settime(const struct timespec64 *ts, const struct timezone *tz)
+{
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+ return 0;
+}
+
+/**
+ * cap_ptrace_access_check - Determine whether the current process may access
+ * another
+ * @child: The process to be accessed
+ * @mode: The mode of attachment.
+ *
+ * If we are in the same or an ancestor user_ns and have all the target
+ * task's capabilities, then ptrace access is allowed.
+ * If we have the ptrace capability to the target user_ns, then ptrace
+ * access is allowed.
+ * Else denied.
+ *
+ * Determine whether a process may access another, returning 0 if permission
+ * granted, -ve if denied.
+ */
+int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
+{
+ int ret = 0;
+ const struct cred *cred, *child_cred;
+ const kernel_cap_t *caller_caps;
+
+ rcu_read_lock();
+ cred = current_cred();
+ child_cred = __task_cred(child);
+ if (mode & PTRACE_MODE_FSCREDS)
+ caller_caps = &cred->cap_effective;
+ else
+ caller_caps = &cred->cap_permitted;
+ if (cred->user_ns == child_cred->user_ns &&
+ cap_issubset(child_cred->cap_permitted, *caller_caps))
+ goto out;
+ if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
+ goto out;
+ ret = -EPERM;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * cap_ptrace_traceme - Determine whether another process may trace the current
+ * @parent: The task proposed to be the tracer
+ *
+ * If parent is in the same or an ancestor user_ns and has all current's
+ * capabilities, then ptrace access is allowed.
+ * If parent has the ptrace capability to current's user_ns, then ptrace
+ * access is allowed.
+ * Else denied.
+ *
+ * Determine whether the nominated task is permitted to trace the current
+ * process, returning 0 if permission is granted, -ve if denied.
+ */
+int cap_ptrace_traceme(struct task_struct *parent)
+{
+ int ret = 0;
+ const struct cred *cred, *child_cred;
+
+ rcu_read_lock();
+ cred = __task_cred(parent);
+ child_cred = current_cred();
+ if (cred->user_ns == child_cred->user_ns &&
+ cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+ goto out;
+ if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
+ goto out;
+ ret = -EPERM;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * cap_capget - Retrieve a task's capability sets
+ * @target: The task from which to retrieve the capability sets
+ * @effective: The place to record the effective set
+ * @inheritable: The place to record the inheritable set
+ * @permitted: The place to record the permitted set
+ *
+ * This function retrieves the capabilities of the nominated task and returns
+ * them to the caller.
+ */
+int cap_capget(struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+ const struct cred *cred;
+
+ /* Derived from kernel/capability.c:sys_capget. */
+ rcu_read_lock();
+ cred = __task_cred(target);
+ *effective = cred->cap_effective;
+ *inheritable = cred->cap_inheritable;
+ *permitted = cred->cap_permitted;
+ rcu_read_unlock();
+ return 0;
+}
+
+/*
+ * Determine whether the inheritable capabilities are limited to the old
+ * permitted set. Returns 1 if they are limited, 0 if they are not.
+ */
+static inline int cap_inh_is_capped(void)
+{
+ /* they are so limited unless the current task has the CAP_SETPCAP
+ * capability
+ */
+ if (cap_capable(current_cred(), current_cred()->user_ns,
+ CAP_SETPCAP, CAP_OPT_NONE) == 0)
+ return 0;
+ return 1;
+}
+
+/**
+ * cap_capset - Validate and apply proposed changes to current's capabilities
+ * @new: The proposed new credentials; alterations should be made here
+ * @old: The current task's current credentials
+ * @effective: A pointer to the proposed new effective capabilities set
+ * @inheritable: A pointer to the proposed new inheritable capabilities set
+ * @permitted: A pointer to the proposed new permitted capabilities set
+ *
+ * This function validates and applies a proposed mass change to the current
+ * process's capability sets. The changes are made to the proposed new
+ * credentials, and assuming no error, will be committed by the caller of LSM.
+ */
+int cap_capset(struct cred *new,
+ const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+{
+ if (cap_inh_is_capped() &&
+ !cap_issubset(*inheritable,
+ cap_combine(old->cap_inheritable,
+ old->cap_permitted)))
+ /* incapable of using this inheritable set */
+ return -EPERM;
+
+ if (!cap_issubset(*inheritable,
+ cap_combine(old->cap_inheritable,
+ old->cap_bset)))
+ /* no new pI capabilities outside bounding set */
+ return -EPERM;
+
+ /* verify restrictions on target's new Permitted set */
+ if (!cap_issubset(*permitted, old->cap_permitted))
+ return -EPERM;
+
+ /* verify the _new_Effective_ is a subset of the _new_Permitted_ */
+ if (!cap_issubset(*effective, *permitted))
+ return -EPERM;
+
+ new->cap_effective = *effective;
+ new->cap_inheritable = *inheritable;
+ new->cap_permitted = *permitted;
+
+ /*
+ * Mask off ambient bits that are no longer both permitted and
+ * inheritable.
+ */
+ new->cap_ambient = cap_intersect(new->cap_ambient,
+ cap_intersect(*permitted,
+ *inheritable));
+ if (WARN_ON(!cap_ambient_invariant_ok(new)))
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * cap_inode_need_killpriv - Determine if inode change affects privileges
+ * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
+ *
+ * Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
+ * affects the security markings on that inode, and if it is, should
+ * inode_killpriv() be invoked or the change rejected.
+ *
+ * Return: 1 if security.capability has a value, meaning inode_killpriv()
+ * is required, 0 otherwise, meaning inode_killpriv() is not required.
+ */
+int cap_inode_need_killpriv(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ int error;
+
+ error = __vfs_getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
+ return error > 0;
+}
+
+/**
+ * cap_inode_killpriv - Erase the security markings on an inode
+ *
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dentry: The inode/dentry to alter
+ *
+ * Erase the privilege-enhancing security markings on an inode.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ *
+ * Return: 0 if successful, -ve on error.
+ */
+int cap_inode_killpriv(struct user_namespace *mnt_userns, struct dentry *dentry)
+{
+ int error;
+
+ error = __vfs_removexattr(mnt_userns, dentry, XATTR_NAME_CAPS);
+ if (error == -EOPNOTSUPP)
+ error = 0;
+ return error;
+}
+
+static bool rootid_owns_currentns(kuid_t kroot)
+{
+ struct user_namespace *ns;
+
+ if (!uid_valid(kroot))
+ return false;
+
+ for (ns = current_user_ns(); ; ns = ns->parent) {
+ if (from_kuid(ns, kroot) == 0)
+ return true;
+ if (ns == &init_user_ns)
+ break;
+ }
+
+ return false;
+}
+
+static __u32 sansflags(__u32 m)
+{
+ return m & ~VFS_CAP_FLAGS_EFFECTIVE;
+}
+
+static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
+{
+ if (size != XATTR_CAPS_SZ_2)
+ return false;
+ return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
+}
+
+static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
+{
+ if (size != XATTR_CAPS_SZ_3)
+ return false;
+ return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
+}
+
+/*
+ * getsecurity: We are called for security.* before any attempt to read the
+ * xattr from the inode itself.
+ *
+ * This gives us a chance to read the on-disk value and convert it. If we
+ * return -EOPNOTSUPP, then vfs_getxattr() will call the i_op handler.
+ *
+ * Note we are not called by vfs_getxattr_alloc(), but that is only called
+ * by the integrity subsystem, which really wants the unconverted values -
+ * so that's good.
+ */
+int cap_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name, void **buffer,
+ bool alloc)
+{
+ int size, ret;
+ kuid_t kroot;
+ u32 nsmagic, magic;
+ uid_t root, mappedroot;
+ char *tmpbuf = NULL;
+ struct vfs_cap_data *cap;
+ struct vfs_ns_cap_data *nscap = NULL;
+ struct dentry *dentry;
+ struct user_namespace *fs_ns;
+
+ if (strcmp(name, "capability") != 0)
+ return -EOPNOTSUPP;
+
+ dentry = d_find_any_alias(inode);
+ if (!dentry)
+ return -EINVAL;
+
+ size = sizeof(struct vfs_ns_cap_data);
+ ret = (int)vfs_getxattr_alloc(mnt_userns, dentry, XATTR_NAME_CAPS,
+ &tmpbuf, size, GFP_NOFS);
+ dput(dentry);
+
+ if (ret < 0 || !tmpbuf) {
+ size = ret;
+ goto out_free;
+ }
+
+ fs_ns = inode->i_sb->s_user_ns;
+ cap = (struct vfs_cap_data *) tmpbuf;
+ if (is_v2header((size_t) ret, cap)) {
+ root = 0;
+ } else if (is_v3header((size_t) ret, cap)) {
+ nscap = (struct vfs_ns_cap_data *) tmpbuf;
+ root = le32_to_cpu(nscap->rootid);
+ } else {
+ size = -EINVAL;
+ goto out_free;
+ }
+
+ kroot = make_kuid(fs_ns, root);
+
+ /* If this is an idmapped mount shift the kuid. */
+ kroot = mapped_kuid_fs(mnt_userns, fs_ns, kroot);
+
+ /* If the root kuid maps to a valid uid in current ns, then return
+ * this as a nscap. */
+ mappedroot = from_kuid(current_user_ns(), kroot);
+ if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
+ size = sizeof(struct vfs_ns_cap_data);
+ if (alloc) {
+ if (!nscap) {
+ /* v2 -> v3 conversion */
+ nscap = kzalloc(size, GFP_ATOMIC);
+ if (!nscap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
+ nsmagic = VFS_CAP_REVISION_3;
+ magic = le32_to_cpu(cap->magic_etc);
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ nscap->magic_etc = cpu_to_le32(nsmagic);
+ } else {
+ /* use allocated v3 buffer */
+ tmpbuf = NULL;
+ }
+ nscap->rootid = cpu_to_le32(mappedroot);
+ *buffer = nscap;
+ }
+ goto out_free;
+ }
+
+ if (!rootid_owns_currentns(kroot)) {
+ size = -EOVERFLOW;
+ goto out_free;
+ }
+
+ /* This comes from a parent namespace. Return as a v2 capability */
+ size = sizeof(struct vfs_cap_data);
+ if (alloc) {
+ if (nscap) {
+ /* v3 -> v2 conversion */
+ cap = kzalloc(size, GFP_ATOMIC);
+ if (!cap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
+ magic = VFS_CAP_REVISION_2;
+ nsmagic = le32_to_cpu(nscap->magic_etc);
+ if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
+ magic |= VFS_CAP_FLAGS_EFFECTIVE;
+ memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ cap->magic_etc = cpu_to_le32(magic);
+ } else {
+ /* use unconverted v2 */
+ tmpbuf = NULL;
+ }
+ *buffer = cap;
+ }
+out_free:
+ kfree(tmpbuf);
+ return size;
+}
+
+/**
+ * rootid_from_xattr - translate root uid of vfs caps
+ *
+ * @value: vfs caps value which may be modified by this function
+ * @size: size of @ivalue
+ * @task_ns: user namespace of the caller
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @fs_userns: user namespace of the filesystem
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ */
+static kuid_t rootid_from_xattr(const void *value, size_t size,
+ struct user_namespace *task_ns,
+ struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns)
+{
+ const struct vfs_ns_cap_data *nscap = value;
+ kuid_t rootkid;
+ uid_t rootid = 0;
+
+ if (size == XATTR_CAPS_SZ_3)
+ rootid = le32_to_cpu(nscap->rootid);
+
+ rootkid = make_kuid(task_ns, rootid);
+ return mapped_kuid_user(mnt_userns, fs_userns, rootkid);
+}
+
+static bool validheader(size_t size, const struct vfs_cap_data *cap)
+{
+ return is_v2header(size, cap) || is_v3header(size, cap);
+}
+
+/**
+ * cap_convert_nscap - check vfs caps
+ *
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dentry: used to retrieve inode to check permissions on
+ * @ivalue: vfs caps value which may be modified by this function
+ * @size: size of @ivalue
+ *
+ * User requested a write of security.capability. If needed, update the
+ * xattr to change from v2 to v3, or to fixup the v3 rootid.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ *
+ * Return: On success, return the new size; on error, return < 0.
+ */
+int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const void **ivalue, size_t size)
+{
+ struct vfs_ns_cap_data *nscap;
+ uid_t nsrootid;
+ const struct vfs_cap_data *cap = *ivalue;
+ __u32 magic, nsmagic;
+ struct inode *inode = d_backing_inode(dentry);
+ struct user_namespace *task_ns = current_user_ns(),
+ *fs_ns = inode->i_sb->s_user_ns;
+ kuid_t rootid;
+ size_t newsize;
+
+ if (!*ivalue)
+ return -EINVAL;
+ if (!validheader(size, cap))
+ return -EINVAL;
+ if (!capable_wrt_inode_uidgid(mnt_userns, inode, CAP_SETFCAP))
+ return -EPERM;
+ if (size == XATTR_CAPS_SZ_2 && (mnt_userns == fs_ns))
+ if (ns_capable(inode->i_sb->s_user_ns, CAP_SETFCAP))
+ /* user is privileged, just write the v2 */
+ return size;
+
+ rootid = rootid_from_xattr(*ivalue, size, task_ns, mnt_userns, fs_ns);
+ if (!uid_valid(rootid))
+ return -EINVAL;
+
+ nsrootid = from_kuid(fs_ns, rootid);
+ if (nsrootid == -1)
+ return -EINVAL;
+
+ newsize = sizeof(struct vfs_ns_cap_data);
+ nscap = kmalloc(newsize, GFP_ATOMIC);
+ if (!nscap)
+ return -ENOMEM;
+ nscap->rootid = cpu_to_le32(nsrootid);
+ nsmagic = VFS_CAP_REVISION_3;
+ magic = le32_to_cpu(cap->magic_etc);
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+ nscap->magic_etc = cpu_to_le32(nsmagic);
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+
+ *ivalue = nscap;
+ return newsize;
+}
+
+/*
+ * Calculate the new process capability sets from the capability sets attached
+ * to a file.
+ */
+static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
+ struct linux_binprm *bprm,
+ bool *effective,
+ bool *has_fcap)
+{
+ struct cred *new = bprm->cred;
+ unsigned i;
+ int ret = 0;
+
+ if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
+ *effective = true;
+
+ if (caps->magic_etc & VFS_CAP_REVISION_MASK)
+ *has_fcap = true;
+
+ CAP_FOR_EACH_U32(i) {
+ __u32 permitted = caps->permitted.cap[i];
+ __u32 inheritable = caps->inheritable.cap[i];
+
+ /*
+ * pP' = (X & fP) | (pI & fI)
+ * The addition of pA' is handled later.
+ */
+ new->cap_permitted.cap[i] =
+ (new->cap_bset.cap[i] & permitted) |
+ (new->cap_inheritable.cap[i] & inheritable);
+
+ if (permitted & ~new->cap_permitted.cap[i])
+ /* insufficient to execute correctly */
+ ret = -EPERM;
+ }
+
+ /*
+ * For legacy apps, with no internal support for recognizing they
+ * do not have enough capabilities, we return an error if they are
+ * missing some "forced" (aka file-permitted) capabilities.
+ */
+ return *effective ? ret : 0;
+}
+
+/**
+ * get_vfs_caps_from_disk - retrieve vfs caps from disk
+ *
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dentry: dentry from which @inode is retrieved
+ * @cpu_caps: vfs capabilities
+ *
+ * Extract the on-exec-apply capability sets for an executable file.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ */
+int get_vfs_caps_from_disk(struct user_namespace *mnt_userns,
+ const struct dentry *dentry,
+ struct cpu_vfs_cap_data *cpu_caps)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ __u32 magic_etc;
+ unsigned tocopy, i;
+ int size;
+ struct vfs_ns_cap_data data, *nscaps = &data;
+ struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
+ kuid_t rootkuid;
+ struct user_namespace *fs_ns;
+
+ memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
+
+ if (!inode)
+ return -ENODATA;
+
+ fs_ns = inode->i_sb->s_user_ns;
+ size = __vfs_getxattr((struct dentry *)dentry, inode,
+ XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
+ if (size == -ENODATA || size == -EOPNOTSUPP)
+ /* no data, that's ok */
+ return -ENODATA;
+
+ if (size < 0)
+ return size;
+
+ if (size < sizeof(magic_etc))
+ return -EINVAL;
+
+ cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps->magic_etc);
+
+ rootkuid = make_kuid(fs_ns, 0);
+ switch (magic_etc & VFS_CAP_REVISION_MASK) {
+ case VFS_CAP_REVISION_1:
+ if (size != XATTR_CAPS_SZ_1)
+ return -EINVAL;
+ tocopy = VFS_CAP_U32_1;
+ break;
+ case VFS_CAP_REVISION_2:
+ if (size != XATTR_CAPS_SZ_2)
+ return -EINVAL;
+ tocopy = VFS_CAP_U32_2;
+ break;
+ case VFS_CAP_REVISION_3:
+ if (size != XATTR_CAPS_SZ_3)
+ return -EINVAL;
+ tocopy = VFS_CAP_U32_3;
+ rootkuid = make_kuid(fs_ns, le32_to_cpu(nscaps->rootid));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ /* Limit the caps to the mounter of the filesystem
+ * or the more limited uid specified in the xattr.
+ */
+ rootkuid = mapped_kuid_fs(mnt_userns, fs_ns, rootkuid);
+ if (!rootid_owns_currentns(rootkuid))
+ return -ENODATA;
+
+ CAP_FOR_EACH_U32(i) {
+ if (i >= tocopy)
+ break;
+ cpu_caps->permitted.cap[i] = le32_to_cpu(caps->data[i].permitted);
+ cpu_caps->inheritable.cap[i] = le32_to_cpu(caps->data[i].inheritable);
+ }
+
+ cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+ cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+
+ cpu_caps->rootid = rootkuid;
+
+ return 0;
+}
+
+/*
+ * Attempt to get the on-exec apply capability sets for an executable file from
+ * its xattrs and, if present, apply them to the proposed credentials being
+ * constructed by execve().
+ */
+static int get_file_caps(struct linux_binprm *bprm, struct file *file,
+ bool *effective, bool *has_fcap)
+{
+ int rc = 0;
+ struct cpu_vfs_cap_data vcaps;
+
+ cap_clear(bprm->cred->cap_permitted);
+
+ if (!file_caps_enabled)
+ return 0;
+
+ if (!mnt_may_suid(file->f_path.mnt))
+ return 0;
+
+ /*
+ * This check is redundant with mnt_may_suid() but is kept to make
+ * explicit that capability bits are limited to s_user_ns and its
+ * descendants.
+ */
+ if (!current_in_userns(file->f_path.mnt->mnt_sb->s_user_ns))
+ return 0;
+
+ rc = get_vfs_caps_from_disk(file_mnt_user_ns(file),
+ file->f_path.dentry, &vcaps);
+ if (rc < 0) {
+ if (rc == -EINVAL)
+ printk(KERN_NOTICE "Invalid argument reading file caps for %s\n",
+ bprm->filename);
+ else if (rc == -ENODATA)
+ rc = 0;
+ goto out;
+ }
+
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_fcap);
+
+out:
+ if (rc)
+ cap_clear(bprm->cred->cap_permitted);
+
+ return rc;
+}
+
+static inline bool root_privileged(void) { return !issecure(SECURE_NOROOT); }
+
+static inline bool __is_real(kuid_t uid, struct cred *cred)
+{ return uid_eq(cred->uid, uid); }
+
+static inline bool __is_eff(kuid_t uid, struct cred *cred)
+{ return uid_eq(cred->euid, uid); }
+
+static inline bool __is_suid(kuid_t uid, struct cred *cred)
+{ return !__is_real(uid, cred) && __is_eff(uid, cred); }
+
+/*
+ * handle_privileged_root - Handle case of privileged root
+ * @bprm: The execution parameters, including the proposed creds
+ * @has_fcap: Are any file capabilities set?
+ * @effective: Do we have effective root privilege?
+ * @root_uid: This namespace' root UID WRT initial USER namespace
+ *
+ * Handle the case where root is privileged and hasn't been neutered by
+ * SECURE_NOROOT. If file capabilities are set, they won't be combined with
+ * set UID root and nothing is changed. If we are root, cap_permitted is
+ * updated. If we have become set UID root, the effective bit is set.
+ */
+static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap,
+ bool *effective, kuid_t root_uid)
+{
+ const struct cred *old = current_cred();
+ struct cred *new = bprm->cred;
+
+ if (!root_privileged())
+ return;
+ /*
+ * If the legacy file capability is set, then don't set privs
+ * for a setuid root binary run by a non-root user. Do set it
+ * for a root user just to cause least surprise to an admin.
+ */
+ if (has_fcap && __is_suid(root_uid, new)) {
+ warn_setuid_and_fcaps_mixed(bprm->filename);
+ return;
+ }
+ /*
+ * To support inheritance of root-permissions and suid-root
+ * executables under compatibility mode, we override the
+ * capability sets for the file.
+ */
+ if (__is_eff(root_uid, new) || __is_real(root_uid, new)) {
+ /* pP' = (cap_bset & ~0) | (pI & ~0) */
+ new->cap_permitted = cap_combine(old->cap_bset,
+ old->cap_inheritable);
+ }
+ /*
+ * If only the real uid is 0, we do not set the effective bit.
+ */
+ if (__is_eff(root_uid, new))
+ *effective = true;
+}
+
+#define __cap_gained(field, target, source) \
+ !cap_issubset(target->cap_##field, source->cap_##field)
+#define __cap_grew(target, source, cred) \
+ !cap_issubset(cred->cap_##target, cred->cap_##source)
+#define __cap_full(field, cred) \
+ cap_issubset(CAP_FULL_SET, cred->cap_##field)
+
+static inline bool __is_setuid(struct cred *new, const struct cred *old)
+{ return !uid_eq(new->euid, old->uid); }
+
+static inline bool __is_setgid(struct cred *new, const struct cred *old)
+{ return !gid_eq(new->egid, old->gid); }
+
+/*
+ * 1) Audit candidate if current->cap_effective is set
+ *
+ * We do not bother to audit if 3 things are true:
+ * 1) cap_effective has all caps
+ * 2) we became root *OR* are were already root
+ * 3) root is supposed to have all caps (SECURE_NOROOT)
+ * Since this is just a normal root execing a process.
+ *
+ * Number 1 above might fail if you don't have a full bset, but I think
+ * that is interesting information to audit.
+ *
+ * A number of other conditions require logging:
+ * 2) something prevented setuid root getting all caps
+ * 3) non-setuid root gets fcaps
+ * 4) non-setuid root gets ambient
+ */
+static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
+ kuid_t root, bool has_fcap)
+{
+ bool ret = false;
+
+ if ((__cap_grew(effective, ambient, new) &&
+ !(__cap_full(effective, new) &&
+ (__is_eff(root, new) || __is_real(root, new)) &&
+ root_privileged())) ||
+ (root_privileged() &&
+ __is_suid(root, new) &&
+ !__cap_full(effective, new)) ||
+ (!__is_setuid(new, old) &&
+ ((has_fcap &&
+ __cap_gained(permitted, new, old)) ||
+ __cap_gained(ambient, new, old))))
+
+ ret = true;
+
+ return ret;
+}
+
+/**
+ * cap_bprm_creds_from_file - Set up the proposed credentials for execve().
+ * @bprm: The execution parameters, including the proposed creds
+ * @file: The file to pull the credentials from
+ *
+ * Set up the proposed credentials for a new execution context being
+ * constructed by execve(). The proposed creds in @bprm->cred is altered,
+ * which won't take effect immediately.
+ *
+ * Return: 0 if successful, -ve on error.
+ */
+int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
+{
+ /* Process setpcap binaries and capabilities for uid 0 */
+ const struct cred *old = current_cred();
+ struct cred *new = bprm->cred;
+ bool effective = false, has_fcap = false, is_setid;
+ int ret;
+ kuid_t root_uid;
+
+ if (WARN_ON(!cap_ambient_invariant_ok(old)))
+ return -EPERM;
+
+ ret = get_file_caps(bprm, file, &effective, &has_fcap);
+ if (ret < 0)
+ return ret;
+
+ root_uid = make_kuid(new->user_ns, 0);
+
+ handle_privileged_root(bprm, has_fcap, &effective, root_uid);
+
+ /* if we have fs caps, clear dangerous personality flags */
+ if (__cap_gained(permitted, new, old))
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+ /* Don't let someone trace a set[ug]id/setpcap binary with the revised
+ * credentials unless they have the appropriate permit.
+ *
+ * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
+ */
+ is_setid = __is_setuid(new, old) || __is_setgid(new, old);
+
+ if ((is_setid || __cap_gained(permitted, new, old)) &&
+ ((bprm->unsafe & ~LSM_UNSAFE_PTRACE) ||
+ !ptracer_capable(current, new->user_ns))) {
+ /* downgrade; they get no more than they had, and maybe less */
+ if (!ns_capable(new->user_ns, CAP_SETUID) ||
+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
+ new->euid = new->uid;
+ new->egid = new->gid;
+ }
+ new->cap_permitted = cap_intersect(new->cap_permitted,
+ old->cap_permitted);
+ }
+
+ new->suid = new->fsuid = new->euid;
+ new->sgid = new->fsgid = new->egid;
+
+ /* File caps or setid cancels ambient. */
+ if (has_fcap || is_setid)
+ cap_clear(new->cap_ambient);
+
+ /*
+ * Now that we've computed pA', update pP' to give:
+ * pP' = (X & fP) | (pI & fI) | pA'
+ */
+ new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
+
+ /*
+ * Set pE' = (fE ? pP' : pA'). Because pA' is zero if fE is set,
+ * this is the same as pE' = (fE ? pP' : 0) | pA'.
+ */
+ if (effective)
+ new->cap_effective = new->cap_permitted;
+ else
+ new->cap_effective = new->cap_ambient;
+
+ if (WARN_ON(!cap_ambient_invariant_ok(new)))
+ return -EPERM;
+
+ if (nonroot_raised_pE(new, old, root_uid, has_fcap)) {
+ ret = audit_log_bprm_fcaps(bprm, new, old);
+ if (ret < 0)
+ return ret;
+ }
+
+ new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+
+ if (WARN_ON(!cap_ambient_invariant_ok(new)))
+ return -EPERM;
+
+ /* Check for privilege-elevated exec. */
+ if (is_setid ||
+ (!__is_real(root_uid, new) &&
+ (effective ||
+ __cap_grew(permitted, ambient, new))))
+ bprm->secureexec = 1;
+
+ return 0;
+}
+
+/**
+ * cap_inode_setxattr - Determine whether an xattr may be altered
+ * @dentry: The inode/dentry being altered
+ * @name: The name of the xattr to be changed
+ * @value: The value that the xattr will be changed to
+ * @size: The size of value
+ * @flags: The replacement flag
+ *
+ * Determine whether an xattr may be altered or set on an inode, returning 0 if
+ * permission is granted, -ve if denied.
+ *
+ * This is used to make sure security xattrs don't get updated or set by those
+ * who aren't privileged to do so.
+ */
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct user_namespace *user_ns = dentry->d_sb->s_user_ns;
+
+ /* Ignore non-security xattrs */
+ if (strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) != 0)
+ return 0;
+
+ /*
+ * For XATTR_NAME_CAPS the check will be done in
+ * cap_convert_nscap(), called by setxattr()
+ */
+ if (strcmp(name, XATTR_NAME_CAPS) == 0)
+ return 0;
+
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
+}
+
+/**
+ * cap_inode_removexattr - Determine whether an xattr may be removed
+ *
+ * @mnt_userns: User namespace of the mount the inode was found from
+ * @dentry: The inode/dentry being altered
+ * @name: The name of the xattr to be changed
+ *
+ * Determine whether an xattr may be removed from an inode, returning 0 if
+ * permission is granted, -ve if denied.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ *
+ * This is used to make sure security xattrs don't get removed by those who
+ * aren't privileged to remove them.
+ */
+int cap_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name)
+{
+ struct user_namespace *user_ns = dentry->d_sb->s_user_ns;
+
+ /* Ignore non-security xattrs */
+ if (strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) != 0)
+ return 0;
+
+ if (strcmp(name, XATTR_NAME_CAPS) == 0) {
+ /* security.capability gets namespaced */
+ struct inode *inode = d_backing_inode(dentry);
+ if (!inode)
+ return -EINVAL;
+ if (!capable_wrt_inode_uidgid(mnt_userns, inode, CAP_SETFCAP))
+ return -EPERM;
+ return 0;
+ }
+
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
+}
+
+/*
+ * cap_emulate_setxuid() fixes the effective / permitted capabilities of
+ * a process after a call to setuid, setreuid, or setresuid.
+ *
+ * 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
+ * {r,e,s}uid != 0, the permitted and effective capabilities are
+ * cleared.
+ *
+ * 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
+ * capabilities of the process are cleared.
+ *
+ * 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
+ * capabilities are set to the permitted capabilities.
+ *
+ * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
+ * never happen.
+ *
+ * -astor
+ *
+ * cevans - New behaviour, Oct '99
+ * A process may, via prctl(), elect to keep its capabilities when it
+ * calls setuid() and switches away from uid==0. Both permitted and
+ * effective sets will be retained.
+ * Without this change, it was impossible for a daemon to drop only some
+ * of its privilege. The call to setuid(!=0) would drop all privileges!
+ * Keeping uid 0 is not an option because uid 0 owns too many vital
+ * files..
+ * Thanks to Olaf Kirch and Peter Benie for spotting this.
+ */
+static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
+{
+ kuid_t root_uid = make_kuid(old->user_ns, 0);
+
+ if ((uid_eq(old->uid, root_uid) ||
+ uid_eq(old->euid, root_uid) ||
+ uid_eq(old->suid, root_uid)) &&
+ (!uid_eq(new->uid, root_uid) &&
+ !uid_eq(new->euid, root_uid) &&
+ !uid_eq(new->suid, root_uid))) {
+ if (!issecure(SECURE_KEEP_CAPS)) {
+ cap_clear(new->cap_permitted);
+ cap_clear(new->cap_effective);
+ }
+
+ /*
+ * Pre-ambient programs expect setresuid to nonroot followed
+ * by exec to drop capabilities. We should make sure that
+ * this remains the case.
+ */
+ cap_clear(new->cap_ambient);
+ }
+ if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
+ cap_clear(new->cap_effective);
+ if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid))
+ new->cap_effective = new->cap_permitted;
+}
+
+/**
+ * cap_task_fix_setuid - Fix up the results of setuid() call
+ * @new: The proposed credentials
+ * @old: The current task's current credentials
+ * @flags: Indications of what has changed
+ *
+ * Fix up the results of setuid() call before the credential changes are
+ * actually applied.
+ *
+ * Return: 0 to grant the changes, -ve to deny them.
+ */
+int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
+{
+ switch (flags) {
+ case LSM_SETID_RE:
+ case LSM_SETID_ID:
+ case LSM_SETID_RES:
+ /* juggle the capabilities to follow [RES]UID changes unless
+ * otherwise suppressed */
+ if (!issecure(SECURE_NO_SETUID_FIXUP))
+ cap_emulate_setxuid(new, old);
+ break;
+
+ case LSM_SETID_FS:
+ /* juggle the capabilties to follow FSUID changes, unless
+ * otherwise suppressed
+ *
+ * FIXME - is fsuser used for all CAP_FS_MASK capabilities?
+ * if not, we might be a bit too harsh here.
+ */
+ if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+ kuid_t root_uid = make_kuid(old->user_ns, 0);
+ if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
+ new->cap_effective =
+ cap_drop_fs_set(new->cap_effective);
+
+ if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
+ new->cap_effective =
+ cap_raise_fs_set(new->cap_effective,
+ new->cap_permitted);
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Rationale: code calling task_setscheduler, task_setioprio, and
+ * task_setnice, assumes that
+ * . if capable(cap_sys_nice), then those actions should be allowed
+ * . if not capable(cap_sys_nice), but acting on your own processes,
+ * then those actions should be allowed
+ * This is insufficient now since you can call code without suid, but
+ * yet with increased caps.
+ * So we check for increased caps on the target process.
+ */
+static int cap_safe_nice(struct task_struct *p)
+{
+ int is_subset, ret = 0;
+
+ rcu_read_lock();
+ is_subset = cap_issubset(__task_cred(p)->cap_permitted,
+ current_cred()->cap_permitted);
+ if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
+ ret = -EPERM;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * cap_task_setscheduler - Detemine if scheduler policy change is permitted
+ * @p: The task to affect
+ *
+ * Detemine if the requested scheduler policy change is permitted for the
+ * specified task.
+ *
+ * Return: 0 if permission is granted, -ve if denied.
+ */
+int cap_task_setscheduler(struct task_struct *p)
+{
+ return cap_safe_nice(p);
+}
+
+/**
+ * cap_task_setioprio - Detemine if I/O priority change is permitted
+ * @p: The task to affect
+ * @ioprio: The I/O priority to set
+ *
+ * Detemine if the requested I/O priority change is permitted for the specified
+ * task.
+ *
+ * Return: 0 if permission is granted, -ve if denied.
+ */
+int cap_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return cap_safe_nice(p);
+}
+
+/**
+ * cap_task_setnice - Detemine if task priority change is permitted
+ * @p: The task to affect
+ * @nice: The nice value to set
+ *
+ * Detemine if the requested task priority change is permitted for the
+ * specified task.
+ *
+ * Return: 0 if permission is granted, -ve if denied.
+ */
+int cap_task_setnice(struct task_struct *p, int nice)
+{
+ return cap_safe_nice(p);
+}
+
+/*
+ * Implement PR_CAPBSET_DROP. Attempt to remove the specified capability from
+ * the current task's bounding set. Returns 0 on success, -ve on error.
+ */
+static int cap_prctl_drop(unsigned long cap)
+{
+ struct cred *new;
+
+ if (!ns_capable(current_user_ns(), CAP_SETPCAP))
+ return -EPERM;
+ if (!cap_valid(cap))
+ return -EINVAL;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ cap_lower(new->cap_bset, cap);
+ return commit_creds(new);
+}
+
+/**
+ * cap_task_prctl - Implement process control functions for this security module
+ * @option: The process control function requested
+ * @arg2: The argument data for this function
+ * @arg3: The argument data for this function
+ * @arg4: The argument data for this function
+ * @arg5: The argument data for this function
+ *
+ * Allow process control functions (sys_prctl()) to alter capabilities; may
+ * also deny access to other functions not otherwise implemented here.
+ *
+ * Return: 0 or +ve on success, -ENOSYS if this function is not implemented
+ * here, other -ve on error. If -ENOSYS is returned, sys_prctl() and other LSM
+ * modules will consider performing the function.
+ */
+int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+{
+ const struct cred *old = current_cred();
+ struct cred *new;
+
+ switch (option) {
+ case PR_CAPBSET_READ:
+ if (!cap_valid(arg2))
+ return -EINVAL;
+ return !!cap_raised(old->cap_bset, arg2);
+
+ case PR_CAPBSET_DROP:
+ return cap_prctl_drop(arg2);
+
+ /*
+ * The next four prctl's remain to assist with transitioning a
+ * system from legacy UID=0 based privilege (when filesystem
+ * capabilities are not in use) to a system using filesystem
+ * capabilities only - as the POSIX.1e draft intended.
+ *
+ * Note:
+ *
+ * PR_SET_SECUREBITS =
+ * issecure_mask(SECURE_KEEP_CAPS_LOCKED)
+ * | issecure_mask(SECURE_NOROOT)
+ * | issecure_mask(SECURE_NOROOT_LOCKED)
+ * | issecure_mask(SECURE_NO_SETUID_FIXUP)
+ * | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
+ *
+ * will ensure that the current process and all of its
+ * children will be locked into a pure
+ * capability-based-privilege environment.
+ */
+ case PR_SET_SECUREBITS:
+ if ((((old->securebits & SECURE_ALL_LOCKS) >> 1)
+ & (old->securebits ^ arg2)) /*[1]*/
+ || ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
+ || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
+ || (cap_capable(current_cred(),
+ current_cred()->user_ns,
+ CAP_SETPCAP,
+ CAP_OPT_NONE) != 0) /*[4]*/
+ /*
+ * [1] no changing of bits that are locked
+ * [2] no unlocking of locks
+ * [3] no setting of unsupported bits
+ * [4] doing anything requires privilege (go read about
+ * the "sendmail capabilities bug")
+ */
+ )
+ /* cannot change a locked bit */
+ return -EPERM;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ new->securebits = arg2;
+ return commit_creds(new);
+
+ case PR_GET_SECUREBITS:
+ return old->securebits;
+
+ case PR_GET_KEEPCAPS:
+ return !!issecure(SECURE_KEEP_CAPS);
+
+ case PR_SET_KEEPCAPS:
+ if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
+ return -EINVAL;
+ if (issecure(SECURE_KEEP_CAPS_LOCKED))
+ return -EPERM;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ if (arg2)
+ new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
+ else
+ new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+ return commit_creds(new);
+
+ case PR_CAP_AMBIENT:
+ if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
+ if (arg3 | arg4 | arg5)
+ return -EINVAL;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ cap_clear(new->cap_ambient);
+ return commit_creds(new);
+ }
+
+ if (((!cap_valid(arg3)) | arg4 | arg5))
+ return -EINVAL;
+
+ if (arg2 == PR_CAP_AMBIENT_IS_SET) {
+ return !!cap_raised(current_cred()->cap_ambient, arg3);
+ } else if (arg2 != PR_CAP_AMBIENT_RAISE &&
+ arg2 != PR_CAP_AMBIENT_LOWER) {
+ return -EINVAL;
+ } else {
+ if (arg2 == PR_CAP_AMBIENT_RAISE &&
+ (!cap_raised(current_cred()->cap_permitted, arg3) ||
+ !cap_raised(current_cred()->cap_inheritable,
+ arg3) ||
+ issecure(SECURE_NO_CAP_AMBIENT_RAISE)))
+ return -EPERM;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ if (arg2 == PR_CAP_AMBIENT_RAISE)
+ cap_raise(new->cap_ambient, arg3);
+ else
+ cap_lower(new->cap_ambient, arg3);
+ return commit_creds(new);
+ }
+
+ default:
+ /* No functionality available - continue with default */
+ return -ENOSYS;
+ }
+}
+
+/**
+ * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
+ * @mm: The VM space in which the new mapping is to be made
+ * @pages: The size of the mapping
+ *
+ * Determine whether the allocation of a new virtual mapping by the current
+ * task is permitted.
+ *
+ * Return: 1 if permission is granted, 0 if not.
+ */
+int cap_vm_enough_memory(struct mm_struct *mm, long pages)
+{
+ int cap_sys_admin = 0;
+
+ if (cap_capable(current_cred(), &init_user_ns,
+ CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) == 0)
+ cap_sys_admin = 1;
+
+ return cap_sys_admin;
+}
+
+/**
+ * cap_mmap_addr - check if able to map given addr
+ * @addr: address attempting to be mapped
+ *
+ * If the process is attempting to map memory below dac_mmap_min_addr they need
+ * CAP_SYS_RAWIO. The other parameters to this function are unused by the
+ * capability security module.
+ *
+ * Return: 0 if this mapping should be allowed or -EPERM if not.
+ */
+int cap_mmap_addr(unsigned long addr)
+{
+ int ret = 0;
+
+ if (addr < dac_mmap_min_addr) {
+ ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
+ CAP_OPT_NONE);
+ /* set PF_SUPERPRIV if it turns out we allow the low mmap */
+ if (ret == 0)
+ current->flags |= PF_SUPERPRIV;
+ }
+ return ret;
+}
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SECURITY
+
+static struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(capable, cap_capable),
+ LSM_HOOK_INIT(settime, cap_settime),
+ LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, cap_ptrace_traceme),
+ LSM_HOOK_INIT(capget, cap_capget),
+ LSM_HOOK_INIT(capset, cap_capset),
+ LSM_HOOK_INIT(bprm_creds_from_file, cap_bprm_creds_from_file),
+ LSM_HOOK_INIT(inode_need_killpriv, cap_inode_need_killpriv),
+ LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
+ LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity),
+ LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
+ LSM_HOOK_INIT(mmap_file, cap_mmap_file),
+ LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid),
+ LSM_HOOK_INIT(task_prctl, cap_task_prctl),
+ LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler),
+ LSM_HOOK_INIT(task_setioprio, cap_task_setioprio),
+ LSM_HOOK_INIT(task_setnice, cap_task_setnice),
+ LSM_HOOK_INIT(vm_enough_memory, cap_vm_enough_memory),
+};
+
+static int __init capability_init(void)
+{
+ security_add_hooks(capability_hooks, ARRAY_SIZE(capability_hooks),
+ "capability");
+ return 0;
+}
+
+DEFINE_LSM(capability) = {
+ .name = "capability",
+ .order = LSM_ORDER_FIRST,
+ .init = capability_init,
+};
+
+#endif /* CONFIG_SECURITY */
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
new file mode 100644
index 000000000..bef2b9285
--- /dev/null
+++ b/security/device_cgroup.c
@@ -0,0 +1,877 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * device_cgroup.c - device cgroup subsystem
+ *
+ * Copyright 2007 IBM Corp
+ */
+
+#include <linux/bpf-cgroup.h>
+#include <linux/device_cgroup.h>
+#include <linux/cgroup.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/mutex.h>
+
+#ifdef CONFIG_CGROUP_DEVICE
+
+static DEFINE_MUTEX(devcgroup_mutex);
+
+enum devcg_behavior {
+ DEVCG_DEFAULT_NONE,
+ DEVCG_DEFAULT_ALLOW,
+ DEVCG_DEFAULT_DENY,
+};
+
+/*
+ * exception list locking rules:
+ * hold devcgroup_mutex for update/read.
+ * hold rcu_read_lock() for read.
+ */
+
+struct dev_exception_item {
+ u32 major, minor;
+ short type;
+ short access;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+struct dev_cgroup {
+ struct cgroup_subsys_state css;
+ struct list_head exceptions;
+ enum devcg_behavior behavior;
+};
+
+static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
+{
+ return s ? container_of(s, struct dev_cgroup, css) : NULL;
+}
+
+static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
+{
+ return css_to_devcgroup(task_css(task, devices_cgrp_id));
+}
+
+/*
+ * called under devcgroup_mutex
+ */
+static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
+{
+ struct dev_exception_item *ex, *tmp, *new;
+
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry(ex, orig, list) {
+ new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
+ if (!new)
+ goto free_and_exit;
+ list_add_tail(&new->list, dest);
+ }
+
+ return 0;
+
+free_and_exit:
+ list_for_each_entry_safe(ex, tmp, dest, list) {
+ list_del(&ex->list);
+ kfree(ex);
+ }
+ return -ENOMEM;
+}
+
+static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
+{
+ struct dev_exception_item *ex, *tmp;
+
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry_safe(ex, tmp, orig, list) {
+ list_move_tail(&ex->list, dest);
+ }
+}
+
+/*
+ * called under devcgroup_mutex
+ */
+static int dev_exception_add(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *ex)
+{
+ struct dev_exception_item *excopy, *walk;
+
+ lockdep_assert_held(&devcgroup_mutex);
+
+ excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
+ if (!excopy)
+ return -ENOMEM;
+
+ list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
+ if (walk->type != ex->type)
+ continue;
+ if (walk->major != ex->major)
+ continue;
+ if (walk->minor != ex->minor)
+ continue;
+
+ walk->access |= ex->access;
+ kfree(excopy);
+ excopy = NULL;
+ }
+
+ if (excopy != NULL)
+ list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
+ return 0;
+}
+
+/*
+ * called under devcgroup_mutex
+ */
+static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *ex)
+{
+ struct dev_exception_item *walk, *tmp;
+
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
+ if (walk->type != ex->type)
+ continue;
+ if (walk->major != ex->major)
+ continue;
+ if (walk->minor != ex->minor)
+ continue;
+
+ walk->access &= ~ex->access;
+ if (!walk->access) {
+ list_del_rcu(&walk->list);
+ kfree_rcu(walk, rcu);
+ }
+ }
+}
+
+static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+ struct dev_exception_item *ex, *tmp;
+
+ list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
+ list_del_rcu(&ex->list);
+ kfree_rcu(ex, rcu);
+ }
+}
+
+/**
+ * dev_exception_clean - frees all entries of the exception list
+ * @dev_cgroup: dev_cgroup with the exception list to be cleaned
+ *
+ * called under devcgroup_mutex
+ */
+static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+ lockdep_assert_held(&devcgroup_mutex);
+
+ __dev_exception_clean(dev_cgroup);
+}
+
+static inline bool is_devcg_online(const struct dev_cgroup *devcg)
+{
+ return (devcg->behavior != DEVCG_DEFAULT_NONE);
+}
+
+/**
+ * devcgroup_online - initializes devcgroup's behavior and exceptions based on
+ * parent's
+ * @css: css getting online
+ * returns 0 in case of success, error code otherwise
+ */
+static int devcgroup_online(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+ struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
+ int ret = 0;
+
+ mutex_lock(&devcgroup_mutex);
+
+ if (parent_dev_cgroup == NULL)
+ dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ else {
+ ret = dev_exceptions_copy(&dev_cgroup->exceptions,
+ &parent_dev_cgroup->exceptions);
+ if (!ret)
+ dev_cgroup->behavior = parent_dev_cgroup->behavior;
+ }
+ mutex_unlock(&devcgroup_mutex);
+
+ return ret;
+}
+
+static void devcgroup_offline(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+
+ mutex_lock(&devcgroup_mutex);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+ mutex_unlock(&devcgroup_mutex);
+}
+
+/*
+ * called from kernel/cgroup.c with cgroup_lock() held.
+ */
+static struct cgroup_subsys_state *
+devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct dev_cgroup *dev_cgroup;
+
+ dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
+ if (!dev_cgroup)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&dev_cgroup->exceptions);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+
+ return &dev_cgroup->css;
+}
+
+static void devcgroup_css_free(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+
+ __dev_exception_clean(dev_cgroup);
+ kfree(dev_cgroup);
+}
+
+#define DEVCG_ALLOW 1
+#define DEVCG_DENY 2
+#define DEVCG_LIST 3
+
+#define MAJMINLEN 13
+#define ACCLEN 4
+
+static void set_access(char *acc, short access)
+{
+ int idx = 0;
+ memset(acc, 0, ACCLEN);
+ if (access & DEVCG_ACC_READ)
+ acc[idx++] = 'r';
+ if (access & DEVCG_ACC_WRITE)
+ acc[idx++] = 'w';
+ if (access & DEVCG_ACC_MKNOD)
+ acc[idx++] = 'm';
+}
+
+static char type_to_char(short type)
+{
+ if (type == DEVCG_DEV_ALL)
+ return 'a';
+ if (type == DEVCG_DEV_CHAR)
+ return 'c';
+ if (type == DEVCG_DEV_BLOCK)
+ return 'b';
+ return 'X';
+}
+
+static void set_majmin(char *str, unsigned m)
+{
+ if (m == ~0)
+ strcpy(str, "*");
+ else
+ sprintf(str, "%u", m);
+}
+
+static int devcgroup_seq_show(struct seq_file *m, void *v)
+{
+ struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
+ struct dev_exception_item *ex;
+ char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
+
+ rcu_read_lock();
+ /*
+ * To preserve the compatibility:
+ * - Only show the "all devices" when the default policy is to allow
+ * - List the exceptions in case the default policy is to deny
+ * This way, the file remains as a "whitelist of devices"
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ set_access(acc, DEVCG_ACC_MASK);
+ set_majmin(maj, ~0);
+ set_majmin(min, ~0);
+ seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL),
+ maj, min, acc);
+ } else {
+ list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
+ set_access(acc, ex->access);
+ set_majmin(maj, ex->major);
+ set_majmin(min, ex->minor);
+ seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
+ maj, min, acc);
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/**
+ * match_exception - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
+ */
+static bool match_exception(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
+{
+ struct dev_exception_item *ex;
+
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
+ continue;
+ if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
+ continue;
+ if (ex->major != ~0 && ex->major != major)
+ continue;
+ if (ex->minor != ~0 && ex->minor != minor)
+ continue;
+ /* provided access cannot have more than the exception rule */
+ if (access & (~ex->access))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
+{
+ struct dev_exception_item *ex;
+
+ list_for_each_entry_rcu(ex, exceptions, list,
+ lockdep_is_held(&devcgroup_mutex)) {
+ if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
+ continue;
+ if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
+ continue;
+ /*
+ * We must be sure that both the exception and the provided
+ * range aren't masking all devices
+ */
+ if (ex->major != ~0 && major != ~0 && ex->major != major)
+ continue;
+ if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
+ continue;
+ /*
+ * In order to make sure the provided range isn't matching
+ * an exception, all its access bits shouldn't match the
+ * exception's access bits
+ */
+ if (!(access & ex->access))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *refex,
+ enum devcg_behavior behavior)
+{
+ bool match = false;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+ !lockdep_is_held(&devcgroup_mutex),
+ "device_cgroup:verify_new_ex called without proper synchronization");
+
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ if (behavior == DEVCG_DEFAULT_ALLOW) {
+ /*
+ * new exception in the child doesn't matter, only
+ * adding extra restrictions
+ */
+ return true;
+ } else {
+ /*
+ * new exception in the child will add more devices
+ * that can be acessed, so it can't match any of
+ * parent's exceptions, even slightly
+ */
+ match = match_exception_partial(&dev_cgroup->exceptions,
+ refex->type,
+ refex->major,
+ refex->minor,
+ refex->access);
+
+ if (match)
+ return false;
+ return true;
+ }
+ } else {
+ /*
+ * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+ * the new exception will add access to more devices and must
+ * be contained completely in an parent's exception to be
+ * allowed
+ */
+ match = match_exception(&dev_cgroup->exceptions, refex->type,
+ refex->major, refex->minor,
+ refex->access);
+
+ if (match)
+ /* parent has an exception that matches the proposed */
+ return true;
+ else
+ return false;
+ }
+ return false;
+}
+
+/*
+ * parent_has_perm:
+ * when adding a new allow rule to a device exception list, the rule
+ * must be allowed in the parent device
+ */
+static int parent_has_perm(struct dev_cgroup *childcg,
+ struct dev_exception_item *ex)
+{
+ struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+ if (!parent)
+ return 1;
+ return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+ struct dev_exception_item *ex)
+{
+ struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+ if (!parent)
+ return true;
+
+ /* It's always allowed to remove access to devices */
+ if (childcg->behavior == DEVCG_DEFAULT_DENY)
+ return true;
+
+ /*
+ * Make sure you're not removing part or a whole exception existing in
+ * the parent cgroup
+ */
+ return !match_exception_partial(&parent->exceptions, ex->type,
+ ex->major, ex->minor, ex->access);
+}
+
+/**
+ * may_allow_all - checks if it's possible to change the behavior to
+ * allow based on parent's rules.
+ * @parent: device cgroup's parent
+ * returns: != 0 in case it's allowed, 0 otherwise
+ */
+static inline int may_allow_all(struct dev_cgroup *parent)
+{
+ if (!parent)
+ return 1;
+ return parent->behavior == DEVCG_DEFAULT_ALLOW;
+}
+
+/**
+ * revalidate_active_exceptions - walks through the active exception list and
+ * revalidates the exceptions based on parent's
+ * behavior and exceptions. The exceptions that
+ * are no longer valid will be removed.
+ * Called with devcgroup_mutex held.
+ * @devcg: cgroup which exceptions will be checked
+ *
+ * This is one of the three key functions for hierarchy implementation.
+ * This function is responsible for re-evaluating all the cgroup's active
+ * exceptions due to a parent's exception change.
+ * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
+ */
+static void revalidate_active_exceptions(struct dev_cgroup *devcg)
+{
+ struct dev_exception_item *ex;
+ struct list_head *this, *tmp;
+
+ list_for_each_safe(this, tmp, &devcg->exceptions) {
+ ex = container_of(this, struct dev_exception_item, list);
+ if (!parent_has_perm(devcg, ex))
+ dev_exception_rm(devcg, ex);
+ }
+}
+
+/**
+ * propagate_exception - propagates a new exception to the children
+ * @devcg_root: device cgroup that added a new exception
+ * @ex: new exception to be propagated
+ *
+ * returns: 0 in case of success, != 0 in case of error
+ */
+static int propagate_exception(struct dev_cgroup *devcg_root,
+ struct dev_exception_item *ex)
+{
+ struct cgroup_subsys_state *pos;
+ int rc = 0;
+
+ rcu_read_lock();
+
+ css_for_each_descendant_pre(pos, &devcg_root->css) {
+ struct dev_cgroup *devcg = css_to_devcgroup(pos);
+
+ /*
+ * Because devcgroup_mutex is held, no devcg will become
+ * online or offline during the tree walk (see on/offline
+ * methods), and online ones are safe to access outside RCU
+ * read lock without bumping refcnt.
+ */
+ if (pos == &devcg_root->css || !is_devcg_online(devcg))
+ continue;
+
+ rcu_read_unlock();
+
+ /*
+ * in case both root's behavior and devcg is allow, a new
+ * restriction means adding to the exception list
+ */
+ if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
+ devcg->behavior == DEVCG_DEFAULT_ALLOW) {
+ rc = dev_exception_add(devcg, ex);
+ if (rc)
+ return rc;
+ } else {
+ /*
+ * in the other possible cases:
+ * root's behavior: allow, devcg's: deny
+ * root's behavior: deny, devcg's: deny
+ * the exception will be removed
+ */
+ dev_exception_rm(devcg, ex);
+ }
+ revalidate_active_exceptions(devcg);
+
+ rcu_read_lock();
+ }
+
+ rcu_read_unlock();
+ return rc;
+}
+
+/*
+ * Modify the exception list using allow/deny rules.
+ * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
+ * so we can give a container CAP_MKNOD to let it create devices but not
+ * modify the exception list.
+ * It seems likely we'll want to add a CAP_CONTAINER capability to allow
+ * us to also grant CAP_SYS_ADMIN to containers without giving away the
+ * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
+ *
+ * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
+ * new access is only allowed if you're in the top-level cgroup, or your
+ * parent cgroup has the access you're asking for.
+ */
+static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+ int filetype, char *buffer)
+{
+ const char *b;
+ char temp[12]; /* 11 + 1 characters needed for a u32 */
+ int count, rc = 0;
+ struct dev_exception_item ex;
+ struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
+ struct dev_cgroup tmp_devcgrp;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ memset(&ex, 0, sizeof(ex));
+ memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
+ b = buffer;
+
+ switch (*b) {
+ case 'a':
+ switch (filetype) {
+ case DEVCG_ALLOW:
+ if (css_has_online_children(&devcgroup->css))
+ return -EINVAL;
+
+ if (!may_allow_all(parent))
+ return -EPERM;
+ if (!parent) {
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ dev_exception_clean(devcgroup);
+ break;
+ }
+
+ INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
+ rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
+ &devcgroup->exceptions);
+ if (rc)
+ return rc;
+ dev_exception_clean(devcgroup);
+ rc = dev_exceptions_copy(&devcgroup->exceptions,
+ &parent->exceptions);
+ if (rc) {
+ dev_exceptions_move(&devcgroup->exceptions,
+ &tmp_devcgrp.exceptions);
+ return rc;
+ }
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ dev_exception_clean(&tmp_devcgrp);
+ break;
+ case DEVCG_DENY:
+ if (css_has_online_children(&devcgroup->css))
+ return -EINVAL;
+
+ dev_exception_clean(devcgroup);
+ devcgroup->behavior = DEVCG_DEFAULT_DENY;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ case 'b':
+ ex.type = DEVCG_DEV_BLOCK;
+ break;
+ case 'c':
+ ex.type = DEVCG_DEV_CHAR;
+ break;
+ default:
+ return -EINVAL;
+ }
+ b++;
+ if (!isspace(*b))
+ return -EINVAL;
+ b++;
+ if (*b == '*') {
+ ex.major = ~0;
+ b++;
+ } else if (isdigit(*b)) {
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.major);
+ if (rc)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ if (*b != ':')
+ return -EINVAL;
+ b++;
+
+ /* read minor */
+ if (*b == '*') {
+ ex.minor = ~0;
+ b++;
+ } else if (isdigit(*b)) {
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.minor);
+ if (rc)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ if (!isspace(*b))
+ return -EINVAL;
+ for (b++, count = 0; count < 3; count++, b++) {
+ switch (*b) {
+ case 'r':
+ ex.access |= DEVCG_ACC_READ;
+ break;
+ case 'w':
+ ex.access |= DEVCG_ACC_WRITE;
+ break;
+ case 'm':
+ ex.access |= DEVCG_ACC_MKNOD;
+ break;
+ case '\n':
+ case '\0':
+ count = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (filetype) {
+ case DEVCG_ALLOW:
+ /*
+ * If the default policy is to allow by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ /* Check if the parent allows removing it first */
+ if (!parent_allows_removal(devcgroup, &ex))
+ return -EPERM;
+ dev_exception_rm(devcgroup, &ex);
+ break;
+ }
+
+ if (!parent_has_perm(devcgroup, &ex))
+ return -EPERM;
+ rc = dev_exception_add(devcgroup, &ex);
+ break;
+ case DEVCG_DENY:
+ /*
+ * If the default policy is to deny by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
+ dev_exception_rm(devcgroup, &ex);
+ else
+ rc = dev_exception_add(devcgroup, &ex);
+
+ if (rc)
+ break;
+ /* we only propagate new restrictions */
+ rc = propagate_exception(devcgroup, &ex);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ int retval;
+
+ mutex_lock(&devcgroup_mutex);
+ retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
+ of_cft(of)->private, strstrip(buf));
+ mutex_unlock(&devcgroup_mutex);
+ return retval ?: nbytes;
+}
+
+static struct cftype dev_cgroup_files[] = {
+ {
+ .name = "allow",
+ .write = devcgroup_access_write,
+ .private = DEVCG_ALLOW,
+ },
+ {
+ .name = "deny",
+ .write = devcgroup_access_write,
+ .private = DEVCG_DENY,
+ },
+ {
+ .name = "list",
+ .seq_show = devcgroup_seq_show,
+ .private = DEVCG_LIST,
+ },
+ { } /* terminate */
+};
+
+struct cgroup_subsys devices_cgrp_subsys = {
+ .css_alloc = devcgroup_css_alloc,
+ .css_free = devcgroup_css_free,
+ .css_online = devcgroup_online,
+ .css_offline = devcgroup_offline,
+ .legacy_cftypes = dev_cgroup_files,
+};
+
+/**
+ * devcgroup_legacy_check_permission - checks if an inode operation is permitted
+ * @dev_cgroup: the dev cgroup to be tested against
+ * @type: device type
+ * @major: device major number
+ * @minor: device minor number
+ * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD
+ *
+ * returns 0 on success, -EPERM case the operation is not permitted
+ */
+static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
+ short access)
+{
+ struct dev_cgroup *dev_cgroup;
+ bool rc;
+
+ rcu_read_lock();
+ dev_cgroup = task_devcgroup(current);
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+ /* Can't match any of the exceptions, even partially */
+ rc = !match_exception_partial(&dev_cgroup->exceptions,
+ type, major, minor, access);
+ else
+ /* Need to match completely one exception to be allowed */
+ rc = match_exception(&dev_cgroup->exceptions, type, major,
+ minor, access);
+ rcu_read_unlock();
+
+ if (!rc)
+ return -EPERM;
+
+ return 0;
+}
+
+#endif /* CONFIG_CGROUP_DEVICE */
+
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+
+int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
+{
+ int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
+
+ if (rc)
+ return rc;
+
+ #ifdef CONFIG_CGROUP_DEVICE
+ return devcgroup_legacy_check_permission(type, major, minor, access);
+
+ #else /* CONFIG_CGROUP_DEVICE */
+ return 0;
+
+ #endif /* CONFIG_CGROUP_DEVICE */
+}
+EXPORT_SYMBOL(devcgroup_check_permission);
+#endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
diff --git a/security/inode.c b/security/inode.c
new file mode 100644
index 000000000..6c3269397
--- /dev/null
+++ b/security/inode.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * inode.c - securityfs
+ *
+ * Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ * Based on fs/debugfs/inode.c which had the following copyright notice:
+ * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2004 IBM Inc.
+ */
+
+/* #define DEBUG */
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/lsm_hooks.h>
+#include <linux/magic.h>
+
+static struct vfsmount *mount;
+static int mount_count;
+
+static void securityfs_free_inode(struct inode *inode)
+{
+ if (S_ISLNK(inode->i_mode))
+ kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
+static const struct super_operations securityfs_super_operations = {
+ .statfs = simple_statfs,
+ .free_inode = securityfs_free_inode,
+};
+
+static int securityfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ static const struct tree_descr files[] = {{""}};
+ int error;
+
+ error = simple_fill_super(sb, SECURITYFS_MAGIC, files);
+ if (error)
+ return error;
+
+ sb->s_op = &securityfs_super_operations;
+
+ return 0;
+}
+
+static int securityfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, securityfs_fill_super);
+}
+
+static const struct fs_context_operations securityfs_context_ops = {
+ .get_tree = securityfs_get_tree,
+};
+
+static int securityfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &securityfs_context_ops;
+ return 0;
+}
+
+static struct file_system_type fs_type = {
+ .owner = THIS_MODULE,
+ .name = "securityfs",
+ .init_fs_context = securityfs_init_fs_context,
+ .kill_sb = kill_litter_super,
+};
+
+/**
+ * securityfs_create_dentry - create a dentry in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the securityfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ * this file.
+ * @iops: a point to a struct of inode_operations that should be used for
+ * this file/dir
+ *
+ * This is the basic "create a file/dir/symlink" function for
+ * securityfs. It allows for a wide range of flexibility in creating
+ * a file, or a directory (if you want to create a directory, the
+ * securityfs_create_dir() function is recommended to be used
+ * instead).
+ *
+ * This function returns a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the securityfs_remove() function when the
+ * file is to be removed (no automatic cleanup happens if your module
+ * is unloaded, you are responsible here). If an error occurs, the
+ * function will return the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ const struct inode_operations *iops)
+{
+ struct dentry *dentry;
+ struct inode *dir, *inode;
+ int error;
+
+ if (!(mode & S_IFMT))
+ mode = (mode & S_IALLUGO) | S_IFREG;
+
+ pr_debug("securityfs: creating file '%s'\n",name);
+
+ error = simple_pin_fs(&fs_type, &mount, &mount_count);
+ if (error)
+ return ERR_PTR(error);
+
+ if (!parent)
+ parent = mount->mnt_root;
+
+ dir = d_inode(parent);
+
+ inode_lock(dir);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ goto out;
+
+ if (d_really_is_positive(dentry)) {
+ error = -EEXIST;
+ goto out1;
+ }
+
+ inode = new_inode(dir->i_sb);
+ if (!inode) {
+ error = -ENOMEM;
+ goto out1;
+ }
+
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_private = data;
+ if (S_ISDIR(mode)) {
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ } else if (S_ISLNK(mode)) {
+ inode->i_op = iops ? iops : &simple_symlink_inode_operations;
+ inode->i_link = data;
+ } else {
+ inode->i_fop = fops;
+ }
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ inode_unlock(dir);
+ return dentry;
+
+out1:
+ dput(dentry);
+ dentry = ERR_PTR(error);
+out:
+ inode_unlock(dir);
+ simple_release_fs(&mount, &mount_count);
+ return dentry;
+}
+
+/**
+ * securityfs_create_file - create a file in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the securityfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ * this file.
+ *
+ * This function creates a file in securityfs with the given @name.
+ *
+ * This function returns a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here). If an error occurs, the function will return
+ * the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+struct dentry *securityfs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops)
+{
+ return securityfs_create_dentry(name, mode, parent, data, fops, NULL);
+}
+EXPORT_SYMBOL_GPL(securityfs_create_file);
+
+/**
+ * securityfs_create_dir - create a directory in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the directory to
+ * create.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * directory will be created in the root of the securityfs filesystem.
+ *
+ * This function creates a directory in securityfs with the given @name.
+ *
+ * This function returns a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here). If an error occurs, the function will return
+ * the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+struct dentry *securityfs_create_dir(const char *name, struct dentry *parent)
+{
+ return securityfs_create_file(name, S_IFDIR | 0755, parent, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(securityfs_create_dir);
+
+/**
+ * securityfs_create_symlink - create a symlink in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the symlink to
+ * create.
+ * @parent: a pointer to the parent dentry for the symlink. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * directory will be created in the root of the securityfs filesystem.
+ * @target: a pointer to a string containing the name of the symlink's target.
+ * If this parameter is %NULL, then the @iops parameter needs to be
+ * setup to handle .readlink and .get_link inode_operations.
+ * @iops: a pointer to the struct inode_operations to use for the symlink. If
+ * this parameter is %NULL, then the default simple_symlink_inode
+ * operations will be used.
+ *
+ * This function creates a symlink in securityfs with the given @name.
+ *
+ * This function returns a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here). If an error occurs, the function will return
+ * the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+struct dentry *securityfs_create_symlink(const char *name,
+ struct dentry *parent,
+ const char *target,
+ const struct inode_operations *iops)
+{
+ struct dentry *dent;
+ char *link = NULL;
+
+ if (target) {
+ link = kstrdup(target, GFP_KERNEL);
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+ }
+ dent = securityfs_create_dentry(name, S_IFLNK | 0444, parent,
+ link, NULL, iops);
+ if (IS_ERR(dent))
+ kfree(link);
+
+ return dent;
+}
+EXPORT_SYMBOL_GPL(securityfs_create_symlink);
+
+/**
+ * securityfs_remove - removes a file or directory from the securityfs filesystem
+ *
+ * @dentry: a pointer to a the dentry of the file or directory to be removed.
+ *
+ * This function removes a file or directory in securityfs that was previously
+ * created with a call to another securityfs function (like
+ * securityfs_create_file() or variants thereof.)
+ *
+ * This function is required to be called in order for the file to be
+ * removed. No automatic cleanup of files will happen when a module is
+ * removed; you are responsible here.
+ */
+void securityfs_remove(struct dentry *dentry)
+{
+ struct inode *dir;
+
+ if (!dentry || IS_ERR(dentry))
+ return;
+
+ dir = d_inode(dentry->d_parent);
+ inode_lock(dir);
+ if (simple_positive(dentry)) {
+ if (d_is_dir(dentry))
+ simple_rmdir(dir, dentry);
+ else
+ simple_unlink(dir, dentry);
+ dput(dentry);
+ }
+ inode_unlock(dir);
+ simple_release_fs(&mount, &mount_count);
+}
+EXPORT_SYMBOL_GPL(securityfs_remove);
+
+#ifdef CONFIG_SECURITY
+static struct dentry *lsm_dentry;
+static ssize_t lsm_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, lsm_names,
+ strlen(lsm_names));
+}
+
+static const struct file_operations lsm_ops = {
+ .read = lsm_read,
+ .llseek = generic_file_llseek,
+};
+#endif
+
+static int __init securityfs_init(void)
+{
+ int retval;
+
+ retval = sysfs_create_mount_point(kernel_kobj, "security");
+ if (retval)
+ return retval;
+
+ retval = register_filesystem(&fs_type);
+ if (retval) {
+ sysfs_remove_mount_point(kernel_kobj, "security");
+ return retval;
+ }
+#ifdef CONFIG_SECURITY
+ lsm_dentry = securityfs_create_file("lsm", 0444, NULL, NULL,
+ &lsm_ops);
+#endif
+ return 0;
+}
+core_initcall(securityfs_init);
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 000000000..599429f99
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+config INTEGRITY
+ bool "Integrity subsystem"
+ depends on SECURITY
+ default y
+ help
+ This option enables the integrity subsystem, which is comprised
+ of a number of different components including the Integrity
+ Measurement Architecture (IMA), Extended Verification Module
+ (EVM), IMA-appraisal extension, digital signature verification
+ extension and audit measurement log support.
+
+ Each of these components can be enabled/disabled separately.
+ Refer to the individual components for additional details.
+
+if INTEGRITY
+
+config INTEGRITY_SIGNATURE
+ bool "Digital signature verification using multiple keyrings"
+ default n
+ select KEYS
+ select SIGNATURE
+ help
+ This option enables digital signature verification support
+ using multiple keyrings. It defines separate keyrings for each
+ of the different use cases - evm, ima, and modules.
+ Different keyrings improves search performance, but also allow
+ to "lock" certain keyring to prevent adding new keys.
+ This is useful for evm and module keyrings, when keys are
+ usually only added from initramfs.
+
+config INTEGRITY_ASYMMETRIC_KEYS
+ bool "Enable asymmetric keys support"
+ depends on INTEGRITY_SIGNATURE
+ default n
+ select ASYMMETRIC_KEY_TYPE
+ select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select CRYPTO_RSA
+ select X509_CERTIFICATE_PARSER
+ help
+ This option enables digital signature verification using
+ asymmetric keys.
+
+config INTEGRITY_TRUSTED_KEYRING
+ bool "Require all keys on the integrity keyrings be signed"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ default y
+ help
+ This option requires that all keys added to the .ima and
+ .evm keyrings be signed by a key on the system trusted
+ keyring.
+
+config INTEGRITY_PLATFORM_KEYRING
+ bool "Provide keyring for platform/firmware trusted keys"
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ depends on SYSTEM_BLACKLIST_KEYRING
+ help
+ Provide a separate, distinct keyring for platform trusted keys, which
+ the kernel automatically populates during initialization from values
+ provided by the platform for verifying the kexec'ed kerned image
+ and, possibly, the initramfs signature.
+
+config INTEGRITY_MACHINE_KEYRING
+ bool "Provide a keyring to which Machine Owner Keys may be added"
+ depends on SECONDARY_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ depends on SYSTEM_BLACKLIST_KEYRING
+ depends on LOAD_UEFI_KEYS
+ depends on !IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ help
+ If set, provide a keyring to which Machine Owner Keys (MOK) may
+ be added. This keyring shall contain just MOK keys. Unlike keys
+ in the platform keyring, keys contained in the .machine keyring will
+ be trusted within the kernel.
+
+config LOAD_UEFI_KEYS
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on EFI
+ def_bool y
+
+config LOAD_IPL_KEYS
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on S390
+ def_bool y
+
+config LOAD_PPC_KEYS
+ bool "Enable loading of platform and blacklisted keys for POWER"
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on PPC_SECURE_BOOT
+ default y
+ help
+ Enable loading of keys to the .platform keyring and blacklisted
+ hashes to the .blacklist keyring for powerpc based platforms.
+
+config INTEGRITY_AUDIT
+ bool "Enables integrity auditing support "
+ depends on AUDIT
+ default y
+ help
+ In addition to enabling integrity auditing support, this
+ option adds a kernel parameter 'integrity_audit', which
+ controls the level of integrity auditing messages.
+ 0 - basic integrity auditing messages (default)
+ 1 - additional integrity auditing messages
+
+ Additional informational integrity auditing messages would
+ be enabled by specifying 'integrity_audit=1' on the kernel
+ command line.
+
+source "security/integrity/ima/Kconfig"
+source "security/integrity/evm/Kconfig"
+
+endif # if INTEGRITY
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 000000000..d0ffe37dc
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+
+integrity-y := iint.o
+integrity-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
+integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
+integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
+integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o
+integrity-$(CONFIG_INTEGRITY_MACHINE_KEYRING) += platform_certs/machine_keyring.o
+integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
+ platform_certs/load_uefi.o \
+ platform_certs/keyring_handler.o
+integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
+integrity-$(CONFIG_LOAD_PPC_KEYS) += platform_certs/efi_parser.o \
+ platform_certs/load_powerpc.o \
+ platform_certs/keyring_handler.o
+obj-$(CONFIG_IMA) += ima/
+obj-$(CONFIG_EVM) += evm/
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
new file mode 100644
index 000000000..f2193c531
--- /dev/null
+++ b/security/integrity/digsig.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ */
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/kernel_read_file.h>
+#include <linux/key-type.h>
+#include <linux/digsig.h>
+#include <linux/vmalloc.h>
+#include <crypto/public_key.h>
+#include <keys/system_keyring.h>
+
+#include "integrity.h"
+
+static struct key *keyring[INTEGRITY_KEYRING_MAX];
+
+static const char * const keyring_name[INTEGRITY_KEYRING_MAX] = {
+#ifndef CONFIG_INTEGRITY_TRUSTED_KEYRING
+ "_evm",
+ "_ima",
+#else
+ ".evm",
+ ".ima",
+#endif
+ ".platform",
+ ".machine",
+};
+
+#ifdef CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+#define restrict_link_to_ima restrict_link_by_builtin_and_secondary_trusted
+#else
+#define restrict_link_to_ima restrict_link_by_builtin_trusted
+#endif
+
+static struct key *integrity_keyring_from_id(const unsigned int id)
+{
+ if (id >= INTEGRITY_KEYRING_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (!keyring[id]) {
+ keyring[id] =
+ request_key(&key_type_keyring, keyring_name[id], NULL);
+ if (IS_ERR(keyring[id])) {
+ int err = PTR_ERR(keyring[id]);
+ pr_err("no %s keyring: %d\n", keyring_name[id], err);
+ keyring[id] = NULL;
+ return ERR_PTR(err);
+ }
+ }
+
+ return keyring[id];
+}
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ struct key *keyring;
+
+ if (siglen < 2)
+ return -EINVAL;
+
+ keyring = integrity_keyring_from_id(id);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ switch (sig[1]) {
+ case 1:
+ /* v1 API expect signature without xattr type */
+ return digsig_verify(keyring, sig + 1, siglen - 1, digest,
+ digestlen);
+ case 2: /* regular file data hash based signature */
+ case 3: /* struct ima_file_id data based signature */
+ return asymmetric_verify(keyring, sig, siglen, digest,
+ digestlen);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int integrity_modsig_verify(const unsigned int id, const struct modsig *modsig)
+{
+ struct key *keyring;
+
+ keyring = integrity_keyring_from_id(id);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ return ima_modsig_verify(keyring, modsig);
+}
+
+static int __init __integrity_init_keyring(const unsigned int id,
+ key_perm_t perm,
+ struct key_restriction *restriction)
+{
+ const struct cred *cred = current_cred();
+ int err = 0;
+
+ keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0),
+ KGIDT_INIT(0), cred, perm,
+ KEY_ALLOC_NOT_IN_QUOTA, restriction, NULL);
+ if (IS_ERR(keyring[id])) {
+ err = PTR_ERR(keyring[id]);
+ pr_info("Can't allocate %s keyring (%d)\n",
+ keyring_name[id], err);
+ keyring[id] = NULL;
+ } else {
+ if (id == INTEGRITY_KEYRING_PLATFORM)
+ set_platform_trusted_keys(keyring[id]);
+ if (id == INTEGRITY_KEYRING_MACHINE && trust_moklist())
+ set_machine_trusted_keys(keyring[id]);
+ if (id == INTEGRITY_KEYRING_IMA)
+ load_module_cert(keyring[id]);
+ }
+
+ return err;
+}
+
+int __init integrity_init_keyring(const unsigned int id)
+{
+ struct key_restriction *restriction;
+ key_perm_t perm;
+ int ret;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW
+ | KEY_USR_READ | KEY_USR_SEARCH;
+
+ if (id == INTEGRITY_KEYRING_PLATFORM ||
+ id == INTEGRITY_KEYRING_MACHINE) {
+ restriction = NULL;
+ goto out;
+ }
+
+ if (!IS_ENABLED(CONFIG_INTEGRITY_TRUSTED_KEYRING))
+ return 0;
+
+ restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+ if (!restriction)
+ return -ENOMEM;
+
+ restriction->check = restrict_link_to_ima;
+
+ /*
+ * MOK keys can only be added through a read-only runtime services
+ * UEFI variable during boot. No additional keys shall be allowed to
+ * load into the machine keyring following init from userspace.
+ */
+ if (id != INTEGRITY_KEYRING_MACHINE)
+ perm |= KEY_USR_WRITE;
+
+out:
+ ret = __integrity_init_keyring(id, perm, restriction);
+ if (ret)
+ kfree(restriction);
+ return ret;
+}
+
+static int __init integrity_add_key(const unsigned int id, const void *data,
+ off_t size, key_perm_t perm)
+{
+ key_ref_t key;
+ int rc = 0;
+
+ if (!keyring[id])
+ return -EINVAL;
+
+ key = key_create_or_update(make_key_ref(keyring[id], 1), "asymmetric",
+ NULL, data, size, perm,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(key)) {
+ rc = PTR_ERR(key);
+ pr_err("Problem loading X.509 certificate %d\n", rc);
+ } else {
+ pr_notice("Loaded X.509 cert '%s'\n",
+ key_ref_to_ptr(key)->description);
+ key_ref_put(key);
+ }
+
+ return rc;
+
+}
+
+int __init integrity_load_x509(const unsigned int id, const char *path)
+{
+ void *data = NULL;
+ size_t size;
+ int rc;
+ key_perm_t perm;
+
+ rc = kernel_read_file_from_path(path, 0, &data, INT_MAX, NULL,
+ READING_X509_CERTIFICATE);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
+ return rc;
+ }
+ size = rc;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ;
+
+ pr_info("Loading X.509 certificate: %s\n", path);
+ rc = integrity_add_key(id, (const void *)data, size, perm);
+
+ vfree(data);
+ return rc;
+}
+
+int __init integrity_load_cert(const unsigned int id, const char *source,
+ const void *data, size_t len, key_perm_t perm)
+{
+ if (!data)
+ return -EINVAL;
+
+ pr_info("Loading X.509 certificate: %s\n", source);
+ return integrity_add_key(id, data, len, perm);
+}
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
new file mode 100644
index 000000000..895f4b9ce
--- /dev/null
+++ b/security/integrity/digsig_asymmetric.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ */
+
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/key-type.h>
+#include <crypto/public_key.h>
+#include <crypto/hash_info.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "integrity.h"
+
+/*
+ * Request an asymmetric key.
+ */
+static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
+{
+ struct key *key;
+ char name[12];
+
+ sprintf(name, "id:%08x", keyid);
+
+ pr_debug("key search: \"%s\"\n", name);
+
+ key = get_ima_blacklist_keyring();
+ if (key) {
+ key_ref_t kref;
+
+ kref = keyring_search(make_key_ref(key, 1),
+ &key_type_asymmetric, name, true);
+ if (!IS_ERR(kref)) {
+ pr_err("Key '%s' is in ima_blacklist_keyring\n", name);
+ return ERR_PTR(-EKEYREJECTED);
+ }
+ }
+
+ if (keyring) {
+ /* search in specific keyring */
+ key_ref_t kref;
+
+ kref = keyring_search(make_key_ref(keyring, 1),
+ &key_type_asymmetric, name, true);
+ if (IS_ERR(kref))
+ key = ERR_CAST(kref);
+ else
+ key = key_ref_to_ptr(kref);
+ } else {
+ key = request_key(&key_type_asymmetric, name, NULL);
+ }
+
+ if (IS_ERR(key)) {
+ if (keyring)
+ pr_err_ratelimited("Request for unknown key '%s' in '%s' keyring. err %ld\n",
+ name, keyring->description,
+ PTR_ERR(key));
+ else
+ pr_err_ratelimited("Request for unknown key '%s' err %ld\n",
+ name, PTR_ERR(key));
+
+ switch (PTR_ERR(key)) {
+ /* Hide some search errors */
+ case -EACCES:
+ case -ENOTDIR:
+ case -EAGAIN:
+ return ERR_PTR(-ENOKEY);
+ default:
+ return key;
+ }
+ }
+
+ pr_debug("%s() = 0 [%x]\n", __func__, key_serial(key));
+
+ return key;
+}
+
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ struct public_key_signature pks;
+ struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+ const struct public_key *pk;
+ struct key *key;
+ int ret;
+
+ if (siglen <= sizeof(*hdr))
+ return -EBADMSG;
+
+ siglen -= sizeof(*hdr);
+
+ if (siglen != be16_to_cpu(hdr->sig_size))
+ return -EBADMSG;
+
+ if (hdr->hash_algo >= HASH_ALGO__LAST)
+ return -ENOPKG;
+
+ key = request_asymmetric_key(keyring, be32_to_cpu(hdr->keyid));
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ memset(&pks, 0, sizeof(pks));
+
+ pks.hash_algo = hash_algo_name[hdr->hash_algo];
+
+ pk = asymmetric_key_public_key(key);
+ pks.pkey_algo = pk->pkey_algo;
+ if (!strcmp(pk->pkey_algo, "rsa")) {
+ pks.encoding = "pkcs1";
+ } else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) {
+ /* edcsa-nist-p192 etc. */
+ pks.encoding = "x962";
+ } else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
+ !strcmp(pk->pkey_algo, "sm2")) {
+ pks.encoding = "raw";
+ } else {
+ ret = -ENOPKG;
+ goto out;
+ }
+
+ pks.digest = (u8 *)data;
+ pks.digest_size = datalen;
+ pks.s = hdr->sig;
+ pks.s_size = siglen;
+ ret = verify_signature(key, &pks);
+out:
+ key_put(key);
+ pr_debug("%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * integrity_kernel_module_request - prevent crypto-pkcs1pad(rsa,*) requests
+ * @kmod_name: kernel module name
+ *
+ * We have situation, when public_key_verify_signature() in case of RSA
+ * algorithm use alg_name to store internal information in order to
+ * construct an algorithm on the fly, but crypto_larval_lookup() will try
+ * to use alg_name in order to load kernel module with same name.
+ * Since we don't have any real "crypto-pkcs1pad(rsa,*)" kernel modules,
+ * we are safe to fail such module request from crypto_larval_lookup().
+ *
+ * In this way we prevent modprobe execution during digsig verification
+ * and avoid possible deadlock if modprobe and/or it's dependencies
+ * also signed with digsig.
+ */
+int integrity_kernel_module_request(char *kmod_name)
+{
+ if (strncmp(kmod_name, "crypto-pkcs1pad(rsa,", 20) == 0)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 000000000..a6e19d23e
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config EVM
+ bool "EVM support"
+ select KEYS
+ select ENCRYPTED_KEYS
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ default n
+ help
+ EVM protects a file's security extended attributes against
+ integrity attacks.
+
+ If you are unsure how to answer this question, answer N.
+
+config EVM_ATTR_FSUUID
+ bool "FSUUID (version 2)"
+ default y
+ depends on EVM
+ help
+ Include filesystem UUID for HMAC calculation.
+
+ Default value is 'selected', which is former version 2.
+ if 'not selected', it is former version 1
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+config EVM_EXTRA_SMACK_XATTRS
+ bool "Additional SMACK xattrs"
+ depends on EVM && SECURITY_SMACK
+ default n
+ help
+ Include additional SMACK xattrs for HMAC calculation.
+
+ In addition to the original security xattrs (eg. security.selinux,
+ security.SMACK64, security.capability, and security.ima) included
+ in the HMAC calculation, enabling this option includes newly defined
+ Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and
+ security.SMACK64MMAP.
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+config EVM_ADD_XATTRS
+ bool "Add additional EVM extended attributes at runtime"
+ depends on EVM
+ default n
+ help
+ Allow userland to provide additional xattrs for HMAC calculation.
+
+ When this option is enabled, root can add additional xattrs to the
+ list used by EVM by writing them into
+ /sys/kernel/security/integrity/evm/evm_xattrs.
+
+config EVM_LOAD_X509
+ bool "Load an X509 certificate onto the '.evm' trusted keyring"
+ depends on EVM && INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ Load an X509 certificate onto the '.evm' trusted keyring.
+
+ This option enables X509 certificate loading from the kernel
+ onto the '.evm' trusted keyring. A public key can be used to
+ verify EVM integrity starting from the 'init' process.
+
+config EVM_X509_PATH
+ string "EVM X509 certificate path"
+ depends on EVM_LOAD_X509
+ default "/etc/keys/x509_evm.der"
+ help
+ This option defines X509 certificate path.
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 000000000..a56f5613b
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 000000000..f8b8c5004
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * File: evm.h
+ */
+
+#ifndef __INTEGRITY_EVM_H
+#define __INTEGRITY_EVM_H
+
+#include <linux/xattr.h>
+#include <linux/security.h>
+
+#include "../integrity.h"
+
+#define EVM_INIT_HMAC 0x0001
+#define EVM_INIT_X509 0x0002
+#define EVM_ALLOW_METADATA_WRITES 0x0004
+#define EVM_SETUP_COMPLETE 0x80000000 /* userland has signaled key load */
+
+#define EVM_KEY_MASK (EVM_INIT_HMAC | EVM_INIT_X509)
+#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP_COMPLETE | \
+ EVM_ALLOW_METADATA_WRITES)
+
+struct xattr_list {
+ struct list_head list;
+ char *name;
+ bool enabled;
+};
+
+extern int evm_initialized;
+
+#define EVM_ATTR_FSUUID 0x0001
+
+extern int evm_hmac_attrs;
+
+/* List of EVM protected security xattrs */
+extern struct list_head evm_config_xattrnames;
+
+struct evm_digest {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+} __packed;
+
+int evm_init_key(void);
+int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, struct evm_digest *data);
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char type,
+ struct evm_digest *data);
+int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+int evm_init_secfs(void);
+
+#endif
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 000000000..b9395f8ef
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * File: evm_crypto.c
+ * Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#define pr_fmt(fmt) "EVM: "fmt
+
+#include <linux/export.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static const int evmkey_len = MAX_KEY_SIZE;
+
+static struct crypto_shash *hmac_tfm;
+static struct crypto_shash *evm_tfm[HASH_ALGO__LAST];
+
+static DEFINE_MUTEX(mutex);
+
+#define EVM_SET_KEY_BUSY 0
+
+static unsigned long evm_set_key_flags;
+
+static const char evm_hmac[] = "hmac(sha1)";
+
+/**
+ * evm_set_key() - set EVM HMAC key from the kernel
+ * @key: pointer to a buffer with the key data
+ * @keylen: length of the key data
+ *
+ * This function allows setting the EVM HMAC key from the kernel
+ * without using the "encrypted" key subsystem keys. It can be used
+ * by the crypto HW kernel module which has its own way of managing
+ * keys.
+ *
+ * key length should be between 32 and 128 bytes long
+ */
+int evm_set_key(void *key, size_t keylen)
+{
+ int rc;
+
+ rc = -EBUSY;
+ if (test_and_set_bit(EVM_SET_KEY_BUSY, &evm_set_key_flags))
+ goto busy;
+ rc = -EINVAL;
+ if (keylen > MAX_KEY_SIZE)
+ goto inval;
+ memcpy(evmkey, key, keylen);
+ evm_initialized |= EVM_INIT_HMAC;
+ pr_info("key initialized\n");
+ return 0;
+inval:
+ clear_bit(EVM_SET_KEY_BUSY, &evm_set_key_flags);
+busy:
+ pr_err("key initialization failed\n");
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_set_key);
+
+static struct shash_desc *init_desc(char type, uint8_t hash_algo)
+{
+ long rc;
+ const char *algo;
+ struct crypto_shash **tfm, *tmp_tfm;
+ struct shash_desc *desc;
+
+ if (type == EVM_XATTR_HMAC) {
+ if (!(evm_initialized & EVM_INIT_HMAC)) {
+ pr_err_once("HMAC key is not set\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ tfm = &hmac_tfm;
+ algo = evm_hmac;
+ } else {
+ if (hash_algo >= HASH_ALGO__LAST)
+ return ERR_PTR(-EINVAL);
+
+ tfm = &evm_tfm[hash_algo];
+ algo = hash_algo_name[hash_algo];
+ }
+
+ if (*tfm)
+ goto alloc;
+ mutex_lock(&mutex);
+ if (*tfm)
+ goto unlock;
+
+ tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
+ if (IS_ERR(tmp_tfm)) {
+ pr_err("Can not allocate %s (reason: %ld)\n", algo,
+ PTR_ERR(tmp_tfm));
+ mutex_unlock(&mutex);
+ return ERR_CAST(tmp_tfm);
+ }
+ if (type == EVM_XATTR_HMAC) {
+ rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len);
+ if (rc) {
+ crypto_free_shash(tmp_tfm);
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ }
+ *tfm = tmp_tfm;
+unlock:
+ mutex_unlock(&mutex);
+alloc:
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->tfm = *tfm;
+
+ rc = crypto_shash_init(desc);
+ if (rc) {
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+ return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ char type, char *digest)
+{
+ struct h_misc {
+ unsigned long ino;
+ __u32 generation;
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof(hmac_misc));
+ /* Don't include the inode or generation number in portable
+ * signatures
+ */
+ if (type != EVM_XATTR_PORTABLE_DIGSIG) {
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ }
+ /* The hmac uid and gid must be encoded in the initial user
+ * namespace (not the filesystems user namespace) as encoding
+ * them in the filesystems user namespace allows an attack
+ * where first they are written in an unprivileged fuse mount
+ * of a filesystem and then the system is tricked to mount the
+ * filesystem for real on next boot and trust it because
+ * everything is signed.
+ */
+ hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
+ if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
+ type != EVM_XATTR_PORTABLE_DIGSIG)
+ crypto_shash_update(desc, (u8 *)&inode->i_sb->s_uuid, UUID_SIZE);
+ crypto_shash_final(desc, digest);
+
+ pr_debug("hmac_misc: (%zu) [%*phN]\n", sizeof(struct h_misc),
+ (int)sizeof(struct h_misc), &hmac_misc);
+}
+
+/*
+ * Dump large security xattr values as a continuous ascii hexademical string.
+ * (pr_debug is limited to 64 bytes.)
+ */
+static void dump_security_xattr(const char *prefix, const void *src,
+ size_t count)
+{
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+ char *asciihex, *p;
+
+ p = asciihex = kmalloc(count * 2 + 1, GFP_KERNEL);
+ if (!asciihex)
+ return;
+
+ p = bin2hex(p, src, count);
+ *p = 0;
+ pr_debug("%s: (%zu) %.*s\n", prefix, count, (int)count * 2, asciihex);
+ kfree(asciihex);
+#endif
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+static int evm_calc_hmac_or_hash(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len,
+ uint8_t type, struct evm_digest *data)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct xattr_list *xattr;
+ struct shash_desc *desc;
+ size_t xattr_size = 0;
+ char *xattr_value = NULL;
+ int error;
+ int size, user_space_size;
+ bool ima_present = false;
+
+ if (!(inode->i_opflags & IOP_XATTR) ||
+ inode->i_sb->s_user_ns != &init_user_ns)
+ return -EOPNOTSUPP;
+
+ desc = init_desc(type, data->hdr.algo);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ data->hdr.length = crypto_shash_digestsize(desc->tfm);
+
+ error = -ENODATA;
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ bool is_ima = false;
+
+ if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
+ is_ima = true;
+
+ /*
+ * Skip non-enabled xattrs for locally calculated
+ * signatures/HMACs.
+ */
+ if (type != EVM_XATTR_PORTABLE_DIGSIG && !xattr->enabled)
+ continue;
+
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(xattr->name, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
+ if (is_ima)
+ ima_present = true;
+
+ if (req_xattr_value_len < 64)
+ pr_debug("%s: (%zu) [%*phN]\n", req_xattr_name,
+ req_xattr_value_len,
+ (int)req_xattr_value_len,
+ req_xattr_value);
+ else
+ dump_security_xattr(req_xattr_name,
+ req_xattr_value,
+ req_xattr_value_len);
+ continue;
+ }
+ size = vfs_getxattr_alloc(&init_user_ns, dentry, xattr->name,
+ &xattr_value, xattr_size, GFP_NOFS);
+ if (size == -ENOMEM) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (size < 0)
+ continue;
+
+ user_space_size = vfs_getxattr(&init_user_ns, dentry,
+ xattr->name, NULL, 0);
+ if (user_space_size != size)
+ pr_debug("file %s: xattr %s size mismatch (kernel: %d, user: %d)\n",
+ dentry->d_name.name, xattr->name, size,
+ user_space_size);
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ if (is_ima)
+ ima_present = true;
+
+ if (xattr_size < 64)
+ pr_debug("%s: (%zu) [%*phN]", xattr->name, xattr_size,
+ (int)xattr_size, xattr_value);
+ else
+ dump_security_xattr(xattr->name, xattr_value,
+ xattr_size);
+ }
+ hmac_add_misc(desc, inode, type, data->digest);
+
+ /* Portable EVM signatures must include an IMA hash */
+ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
+ error = -EPERM;
+out:
+ kfree(xattr_value);
+ kfree(desc);
+ return error;
+}
+
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ struct evm_digest *data)
+{
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+ req_xattr_value_len, EVM_XATTR_HMAC, data);
+}
+
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char type, struct evm_digest *data)
+{
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+ req_xattr_value_len, type, data);
+}
+
+static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
+{
+ const struct evm_ima_xattr_data *xattr_data = NULL;
+ struct integrity_iint_cache *iint;
+ int rc = 0;
+
+ iint = integrity_iint_find(inode);
+ if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG))
+ return 1;
+
+ /* Do this the hard way */
+ rc = vfs_getxattr_alloc(&init_user_ns, dentry, XATTR_NAME_EVM,
+ (char **)&xattr_data, 0, GFP_NOFS);
+ if (rc <= 0) {
+ if (rc == -ENODATA)
+ return 0;
+ return rc;
+ }
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG)
+ rc = 1;
+ else
+ rc = 0;
+
+ kfree(xattr_data);
+ return rc;
+}
+
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct evm_digest data;
+ int rc = 0;
+
+ /*
+ * Don't permit any transformation of the EVM xattr if the signature
+ * is of an immutable type
+ */
+ rc = evm_is_immutable(dentry, inode);
+ if (rc < 0)
+ return rc;
+ if (rc)
+ return -EPERM;
+
+ data.hdr.algo = HASH_ALGO_SHA1;
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, &data);
+ if (rc == 0) {
+ data.hdr.xattr.sha1.type = EVM_XATTR_HMAC;
+ rc = __vfs_setxattr_noperm(&init_user_ns, dentry,
+ XATTR_NAME_EVM,
+ &data.hdr.xattr.data[1],
+ SHA1_DIGEST_SIZE + 1, 0);
+ } else if (rc == -ENODATA && (inode->i_opflags & IOP_XATTR)) {
+ rc = __vfs_removexattr(&init_user_ns, dentry, XATTR_NAME_EVM);
+ }
+ return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ char *hmac_val)
+{
+ struct shash_desc *desc;
+
+ desc = init_desc(EVM_XATTR_HMAC, HASH_ALGO_SHA1);
+ if (IS_ERR(desc)) {
+ pr_info("init_desc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+ hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val);
+ kfree(desc);
+ return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+ struct key *evm_key;
+ struct encrypted_key_payload *ekp;
+ int rc;
+
+ evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+ if (IS_ERR(evm_key))
+ return -ENOENT;
+
+ down_read(&evm_key->sem);
+ ekp = evm_key->payload.data[0];
+
+ rc = evm_set_key(ekp->decrypted_data, ekp->decrypted_datalen);
+
+ /* burn the original key contents */
+ memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+ up_read(&evm_key->sem);
+ key_put(evm_key);
+ return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 000000000..a338f1944
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,919 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * File: evm_main.c
+ * implements evm_inode_setxattr, evm_inode_post_setxattr,
+ * evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#define pr_fmt(fmt) "EVM: "fmt
+
+#include <linux/init.h>
+#include <linux/crypto.h>
+#include <linux/audit.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <linux/magic.h>
+#include <linux/posix_acl_xattr.h>
+
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
+#include <crypto/algapi.h>
+#include "evm.h"
+
+int evm_initialized;
+
+static const char * const integrity_status_msg[] = {
+ "pass", "pass_immutable", "fail", "fail_immutable", "no_label",
+ "no_xattrs", "unknown"
+};
+int evm_hmac_attrs;
+
+static struct xattr_list evm_config_default_xattrnames[] = {
+ {
+ .name = XATTR_NAME_SELINUX,
+ .enabled = IS_ENABLED(CONFIG_SECURITY_SELINUX)
+ },
+ {
+ .name = XATTR_NAME_SMACK,
+ .enabled = IS_ENABLED(CONFIG_SECURITY_SMACK)
+ },
+ {
+ .name = XATTR_NAME_SMACKEXEC,
+ .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
+ },
+ {
+ .name = XATTR_NAME_SMACKTRANSMUTE,
+ .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
+ },
+ {
+ .name = XATTR_NAME_SMACKMMAP,
+ .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
+ },
+ {
+ .name = XATTR_NAME_APPARMOR,
+ .enabled = IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+ },
+ {
+ .name = XATTR_NAME_IMA,
+ .enabled = IS_ENABLED(CONFIG_IMA_APPRAISE)
+ },
+ {
+ .name = XATTR_NAME_CAPS,
+ .enabled = true
+ },
+};
+
+LIST_HEAD(evm_config_xattrnames);
+
+static int evm_fixmode __ro_after_init;
+static int __init evm_set_fixmode(char *str)
+{
+ if (strncmp(str, "fix", 3) == 0)
+ evm_fixmode = 1;
+ else
+ pr_err("invalid \"%s\" mode", str);
+
+ return 1;
+}
+__setup("evm=", evm_set_fixmode);
+
+static void __init evm_init_config(void)
+{
+ int i, xattrs;
+
+ xattrs = ARRAY_SIZE(evm_config_default_xattrnames);
+
+ pr_info("Initialising EVM extended attributes:\n");
+ for (i = 0; i < xattrs; i++) {
+ pr_info("%s%s\n", evm_config_default_xattrnames[i].name,
+ !evm_config_default_xattrnames[i].enabled ?
+ " (disabled)" : "");
+ list_add_tail(&evm_config_default_xattrnames[i].list,
+ &evm_config_xattrnames);
+ }
+
+#ifdef CONFIG_EVM_ATTR_FSUUID
+ evm_hmac_attrs |= EVM_ATTR_FSUUID;
+#endif
+ pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
+}
+
+static bool evm_key_loaded(void)
+{
+ return (bool)(evm_initialized & EVM_KEY_MASK);
+}
+
+/*
+ * This function determines whether or not it is safe to ignore verification
+ * errors, based on the ability of EVM to calculate HMACs. If the HMAC key
+ * is not loaded, and it cannot be loaded in the future due to the
+ * EVM_SETUP_COMPLETE initialization flag, allowing an operation despite the
+ * attrs/xattrs being found invalid will not make them valid.
+ */
+static bool evm_hmac_disabled(void)
+{
+ if (evm_initialized & EVM_INIT_HMAC)
+ return false;
+
+ if (!(evm_initialized & EVM_SETUP_COMPLETE))
+ return false;
+
+ return true;
+}
+
+static int evm_find_protected_xattrs(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct xattr_list *xattr;
+ int error;
+ int count = 0;
+
+ if (!(inode->i_opflags & IOP_XATTR))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
+ if (error < 0) {
+ if (error == -ENODATA)
+ continue;
+ return error;
+ }
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ * HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ const char *xattr_name,
+ char *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ struct evm_ima_xattr_data *xattr_data = NULL;
+ struct signature_v2_hdr *hdr;
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ struct evm_digest digest;
+ struct inode *inode;
+ int rc, xattr_len, evm_immutable = 0;
+
+ if (iint && (iint->evm_status == INTEGRITY_PASS ||
+ iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+
+ /* first need to know the sig type */
+ rc = vfs_getxattr_alloc(&init_user_ns, dentry, XATTR_NAME_EVM,
+ (char **)&xattr_data, 0, GFP_NOFS);
+ if (rc <= 0) {
+ evm_status = INTEGRITY_FAIL;
+ if (rc == -ENODATA) {
+ rc = evm_find_protected_xattrs(dentry);
+ if (rc > 0)
+ evm_status = INTEGRITY_NOLABEL;
+ else if (rc == 0)
+ evm_status = INTEGRITY_NOXATTRS; /* new file */
+ } else if (rc == -EOPNOTSUPP) {
+ evm_status = INTEGRITY_UNKNOWN;
+ }
+ goto out;
+ }
+
+ xattr_len = rc;
+
+ /* check value type */
+ switch (xattr_data->type) {
+ case EVM_XATTR_HMAC:
+ if (xattr_len != sizeof(struct evm_xattr)) {
+ evm_status = INTEGRITY_FAIL;
+ goto out;
+ }
+
+ digest.hdr.algo = HASH_ALGO_SHA1;
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, &digest);
+ if (rc)
+ break;
+ rc = crypto_memneq(xattr_data->data, digest.digest,
+ SHA1_DIGEST_SIZE);
+ if (rc)
+ rc = -EINVAL;
+ break;
+ case EVM_XATTR_PORTABLE_DIGSIG:
+ evm_immutable = 1;
+ fallthrough;
+ case EVM_IMA_XATTR_DIGSIG:
+ /* accept xattr with non-empty signature field */
+ if (xattr_len <= sizeof(struct signature_v2_hdr)) {
+ evm_status = INTEGRITY_FAIL;
+ goto out;
+ }
+
+ hdr = (struct signature_v2_hdr *)xattr_data;
+ digest.hdr.algo = hdr->hash_algo;
+ rc = evm_calc_hash(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data->type, &digest);
+ if (rc)
+ break;
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
+ (const char *)xattr_data, xattr_len,
+ digest.digest, digest.hdr.length);
+ if (!rc) {
+ inode = d_backing_inode(dentry);
+
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) {
+ if (iint)
+ iint->flags |= EVM_IMMUTABLE_DIGSIG;
+ evm_status = INTEGRITY_PASS_IMMUTABLE;
+ } else if (!IS_RDONLY(inode) &&
+ !(inode->i_sb->s_readonly_remount) &&
+ !IS_IMMUTABLE(inode)) {
+ evm_update_evmxattr(dentry, xattr_name,
+ xattr_value,
+ xattr_value_len);
+ }
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ if (rc == -ENODATA)
+ evm_status = INTEGRITY_NOXATTRS;
+ else if (evm_immutable)
+ evm_status = INTEGRITY_FAIL_IMMUTABLE;
+ else
+ evm_status = INTEGRITY_FAIL;
+ }
+ pr_debug("digest: (%d) [%*phN]\n", digest.hdr.length, digest.hdr.length,
+ digest.digest);
+out:
+ if (iint)
+ iint->evm_status = evm_status;
+ kfree(xattr_data);
+ return evm_status;
+}
+
+static int evm_protected_xattr_common(const char *req_xattr_name,
+ bool all_xattrs)
+{
+ int namelen;
+ int found = 0;
+ struct xattr_list *xattr;
+
+ namelen = strlen(req_xattr_name);
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ if (!all_xattrs && !xattr->enabled)
+ continue;
+
+ if ((strlen(xattr->name) == namelen)
+ && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
+ found = 1;
+ break;
+ }
+ if (strncmp(req_xattr_name,
+ xattr->name + XATTR_SECURITY_PREFIX_LEN,
+ strlen(req_xattr_name)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ return evm_protected_xattr_common(req_xattr_name, false);
+}
+
+int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+{
+ return evm_protected_xattr_common(req_xattr_name, true);
+}
+
+/**
+ * evm_read_protected_xattrs - read EVM protected xattr names, lengths, values
+ * @dentry: dentry of the read xattrs
+ * @buffer: buffer xattr names, lengths or values are copied to
+ * @buffer_size: size of buffer
+ * @type: n: names, l: lengths, v: values
+ * @canonical_fmt: data format (true: little endian, false: native format)
+ *
+ * Read protected xattr names (separated by |), lengths (u32) or values for a
+ * given dentry and return the total size of copied data. If buffer is NULL,
+ * just return the total size.
+ *
+ * Returns the total size on success, a negative value on error.
+ */
+int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type, bool canonical_fmt)
+{
+ struct xattr_list *xattr;
+ int rc, size, total_size = 0;
+
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ rc = __vfs_getxattr(dentry, d_backing_inode(dentry),
+ xattr->name, NULL, 0);
+ if (rc < 0 && rc == -ENODATA)
+ continue;
+ else if (rc < 0)
+ return rc;
+
+ switch (type) {
+ case 'n':
+ size = strlen(xattr->name) + 1;
+ if (buffer) {
+ if (total_size)
+ *(buffer + total_size - 1) = '|';
+
+ memcpy(buffer + total_size, xattr->name, size);
+ }
+ break;
+ case 'l':
+ size = sizeof(u32);
+ if (buffer) {
+ if (canonical_fmt)
+ rc = (__force int)cpu_to_le32(rc);
+
+ *(u32 *)(buffer + total_size) = rc;
+ }
+ break;
+ case 'v':
+ size = rc;
+ if (buffer) {
+ rc = __vfs_getxattr(dentry,
+ d_backing_inode(dentry), xattr->name,
+ buffer + total_size,
+ buffer_size - total_size);
+ if (rc < 0)
+ return rc;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ total_size += size;
+ }
+
+ return total_size;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ * @iint: inode integrity metadata
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value, size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
+ return INTEGRITY_UNKNOWN;
+
+ if (!iint) {
+ iint = integrity_iint_find(d_backing_inode(dentry));
+ if (!iint)
+ return INTEGRITY_UNKNOWN;
+ }
+ return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+
+ if (!evm_key_loaded() || !S_ISREG(inode->i_mode) || evm_fixmode)
+ return INTEGRITY_PASS;
+ return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_xattr_acl_change - check if passed ACL changes the inode mode
+ * @mnt_userns: user namespace of the idmapped mount
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Check if passed ACL changes the inode mode, which is protected by EVM.
+ *
+ * Returns 1 if passed ACL causes inode mode change, 0 otherwise.
+ */
+static int evm_xattr_acl_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+#ifdef CONFIG_FS_POSIX_ACL
+ umode_t mode;
+ struct posix_acl *acl = NULL, *acl_res;
+ struct inode *inode = d_backing_inode(dentry);
+ int rc;
+
+ /*
+ * An earlier comment here mentioned that the idmappings for
+ * ACL_{GROUP,USER} don't matter since EVM is only interested in the
+ * mode stored as part of POSIX ACLs. Nonetheless, if it must translate
+ * from the uapi POSIX ACL representation to the VFS internal POSIX ACL
+ * representation it should do so correctly. There's no guarantee that
+ * we won't change POSIX ACLs in a way that ACL_{GROUP,USER} matters
+ * for the mode at some point and it's difficult to keep track of all
+ * the LSM and integrity modules and what they do to POSIX ACLs.
+ *
+ * Frankly, EVM shouldn't try to interpret the uapi struct for POSIX
+ * ACLs it received. It requires knowledge that only the VFS is
+ * guaranteed to have.
+ */
+ acl = vfs_set_acl_prepare(mnt_userns, i_user_ns(inode),
+ xattr_value, xattr_value_len);
+ if (IS_ERR_OR_NULL(acl))
+ return 1;
+
+ acl_res = acl;
+ /*
+ * Passing mnt_userns is necessary to correctly determine the GID in
+ * an idmapped mount, as the GID is used to clear the setgid bit in
+ * the inode mode.
+ */
+ rc = posix_acl_update_mode(mnt_userns, inode, &mode, &acl_res);
+
+ posix_acl_release(acl);
+
+ if (rc)
+ return 1;
+
+ if (inode->i_mode != mode)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * evm_xattr_change - check if passed xattr value differs from current value
+ * @mnt_userns: user namespace of the idmapped mount
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Check if passed xattr value differs from current value.
+ *
+ * Returns 1 if passed xattr value differs from current value, 0 otherwise.
+ */
+static int evm_xattr_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ char *xattr_data = NULL;
+ int rc = 0;
+
+ if (posix_xattr_acl(xattr_name))
+ return evm_xattr_acl_change(mnt_userns, dentry, xattr_name,
+ xattr_value, xattr_value_len);
+
+ rc = vfs_getxattr_alloc(&init_user_ns, dentry, xattr_name, &xattr_data,
+ 0, GFP_NOFS);
+ if (rc < 0)
+ return 1;
+
+ if (rc == xattr_value_len)
+ rc = !!memcmp(xattr_value, xattr_data, rc);
+ else
+ rc = 1;
+
+ kfree(xattr_data);
+ return rc;
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated unless the EVM signature is immutable.
+ */
+static int evm_protect_xattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ enum integrity_status evm_status;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ } else if (!evm_protected_xattr(xattr_name)) {
+ if (!posix_xattr_acl(xattr_name))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ goto out;
+ }
+
+ evm_status = evm_verify_current_integrity(dentry);
+ if (evm_status == INTEGRITY_NOXATTRS) {
+ struct integrity_iint_cache *iint;
+
+ /* Exception if the HMAC is not going to be calculated. */
+ if (evm_hmac_disabled())
+ return 0;
+
+ iint = integrity_iint_find(d_backing_inode(dentry));
+ if (iint && (iint->flags & IMA_NEW_FILE))
+ return 0;
+
+ /* exception for pseudo filesystems */
+ if (dentry->d_sb->s_magic == TMPFS_MAGIC
+ || dentry->d_sb->s_magic == SYSFS_MAGIC)
+ return 0;
+
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
+ dentry->d_inode, dentry->d_name.name,
+ "update_metadata",
+ integrity_status_msg[evm_status],
+ -EPERM, 0);
+ }
+out:
+ /* Exception if the HMAC is not going to be calculated. */
+ if (evm_hmac_disabled() && (evm_status == INTEGRITY_NOLABEL ||
+ evm_status == INTEGRITY_UNKNOWN))
+ return 0;
+
+ /*
+ * Writing other xattrs is safe for portable signatures, as portable
+ * signatures are immutable and can never be updated.
+ */
+ if (evm_status == INTEGRITY_FAIL_IMMUTABLE)
+ return 0;
+
+ if (evm_status == INTEGRITY_PASS_IMMUTABLE &&
+ !evm_xattr_change(mnt_userns, dentry, xattr_name, xattr_value,
+ xattr_value_len))
+ return 0;
+
+ if (evm_status != INTEGRITY_PASS &&
+ evm_status != INTEGRITY_PASS_IMMUTABLE)
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status],
+ -EPERM, 0);
+ return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @mnt_userns: user namespace of the idmapped mount
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid. As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value. Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
+ */
+int evm_inode_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *xattr_name, const void *xattr_value,
+ size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!xattr_value_len)
+ return -EINVAL;
+ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG &&
+ xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
+ return -EPERM;
+ }
+ return evm_protect_xattr(mnt_userns, dentry, xattr_name, xattr_value,
+ xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @mnt_userns: user namespace of the idmapped mount
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name)
+{
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
+ return evm_protect_xattr(mnt_userns, dentry, xattr_name, NULL, 0);
+}
+
+static void evm_reset_status(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ iint = integrity_iint_find(inode);
+ if (iint)
+ iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+/**
+ * evm_revalidate_status - report whether EVM status re-validation is necessary
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Report whether callers of evm_verifyxattr() should re-validate the
+ * EVM status.
+ *
+ * Return true if re-validation is necessary, false otherwise.
+ */
+bool evm_revalidate_status(const char *xattr_name)
+{
+ if (!evm_key_loaded())
+ return false;
+
+ /* evm_inode_post_setattr() passes NULL */
+ if (!xattr_name)
+ return true;
+
+ if (!evm_protected_xattr(xattr_name) && !posix_xattr_acl(xattr_name) &&
+ strcmp(xattr_name, XATTR_NAME_EVM))
+ return false;
+
+ return true;
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm(). The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (!evm_revalidate_status(xattr_name))
+ return;
+
+ evm_reset_status(dentry->d_inode);
+
+ if (!strcmp(xattr_name, XATTR_NAME_EVM))
+ return;
+
+ if (!(evm_initialized & EVM_INIT_HMAC))
+ return;
+
+ evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * vfs_removexattr() which takes the i_mutex.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ if (!evm_revalidate_status(xattr_name))
+ return;
+
+ evm_reset_status(dentry->d_inode);
+
+ if (!strcmp(xattr_name, XATTR_NAME_EVM))
+ return;
+
+ if (!(evm_initialized & EVM_INIT_HMAC))
+ return;
+
+ evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+}
+
+static int evm_attr_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ unsigned int ia_valid = attr->ia_valid;
+
+ if (!i_uid_needs_update(mnt_userns, attr, inode) &&
+ !i_gid_needs_update(mnt_userns, attr, inode) &&
+ (!(ia_valid & ATTR_MODE) || attr->ia_mode == inode->i_mode))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @idmap: idmap of the mount
+ * @dentry: pointer to the affected dentry
+ * @attr: iattr structure containing the new file attributes
+ *
+ * Permit update of file attributes when files have a valid EVM signature,
+ * except in the case of them having an immutable portable signature.
+ */
+int evm_inode_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ enum integrity_status evm_status;
+
+ /* Policy permits modification of the protected attrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
+ if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ /*
+ * Writing attrs is safe for portable signatures, as portable signatures
+ * are immutable and can never be updated.
+ */
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS) ||
+ (evm_status == INTEGRITY_FAIL_IMMUTABLE) ||
+ (evm_hmac_disabled() && (evm_status == INTEGRITY_NOLABEL ||
+ evm_status == INTEGRITY_UNKNOWN)))
+ return 0;
+
+ if (evm_status == INTEGRITY_PASS_IMMUTABLE &&
+ !evm_attr_change(mnt_userns, dentry, attr))
+ return 0;
+
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status], -EPERM, 0);
+ return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ if (!evm_revalidate_status(NULL))
+ return;
+
+ evm_reset_status(dentry->d_inode);
+
+ if (!(evm_initialized & EVM_INIT_HMAC))
+ return;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm HMAC value
+ */
+int evm_inode_init_security(struct inode *inode,
+ const struct xattr *lsm_xattr,
+ struct xattr *evm_xattr)
+{
+ struct evm_xattr *xattr_data;
+ int rc;
+
+ if (!(evm_initialized & EVM_INIT_HMAC) ||
+ !evm_protected_xattr(lsm_xattr->name))
+ return 0;
+
+ xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+ if (!xattr_data)
+ return -ENOMEM;
+
+ xattr_data->data.type = EVM_XATTR_HMAC;
+ rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+ if (rc < 0)
+ goto out;
+
+ evm_xattr->value = xattr_data;
+ evm_xattr->value_len = sizeof(*xattr_data);
+ evm_xattr->name = XATTR_EVM_SUFFIX;
+ return 0;
+out:
+ kfree(xattr_data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+#ifdef CONFIG_EVM_LOAD_X509
+void __init evm_load_x509(void)
+{
+ int rc;
+
+ rc = integrity_load_x509(INTEGRITY_KEYRING_EVM, CONFIG_EVM_X509_PATH);
+ if (!rc)
+ evm_initialized |= EVM_INIT_X509;
+}
+#endif
+
+static int __init init_evm(void)
+{
+ int error;
+ struct list_head *pos, *q;
+
+ evm_init_config();
+
+ error = integrity_init_keyring(INTEGRITY_KEYRING_EVM);
+ if (error)
+ goto error;
+
+ error = evm_init_secfs();
+ if (error < 0) {
+ pr_info("Error registering secfs\n");
+ goto error;
+ }
+
+error:
+ if (error != 0) {
+ if (!list_empty(&evm_config_xattrnames)) {
+ list_for_each_safe(pos, q, &evm_config_xattrnames)
+ list_del(pos);
+ }
+ }
+
+ return error;
+}
+
+late_initcall(init_evm);
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 000000000..37275800c
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#include <linux/xattr.h>
+#include <linux/evm.h>
+
+int posix_xattr_acl(const char *xattr)
+{
+ int xattr_len = strlen(xattr);
+
+ if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+ return 1;
+ if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+ return 1;
+ return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 000000000..8a9db7dfc
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: evm_secfs.c
+ * - Used to signal when key is on keyring
+ * - Get the key and enable EVM
+ */
+
+#include <linux/audit.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include "evm.h"
+
+static struct dentry *evm_dir;
+static struct dentry *evm_init_tpm;
+static struct dentry *evm_symlink;
+
+#ifdef CONFIG_EVM_ADD_XATTRS
+static struct dentry *evm_xattrs;
+static DEFINE_MUTEX(xattr_list_mutex);
+static int evm_xattrs_locked;
+#endif
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP_COMPLETE));
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int i;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
+ return -EPERM;
+
+ ret = kstrtouint_from_user(buf, count, 0, &i);
+
+ if (ret)
+ return ret;
+
+ /* Reject invalid values */
+ if (!i || (i & ~EVM_INIT_MASK) != 0)
+ return -EINVAL;
+
+ /*
+ * Don't allow a request to enable metadata writes if
+ * an HMAC key is loaded.
+ */
+ if ((i & EVM_ALLOW_METADATA_WRITES) &&
+ (evm_initialized & EVM_INIT_HMAC) != 0)
+ return -EPERM;
+
+ if (i & EVM_INIT_HMAC) {
+ ret = evm_init_key();
+ if (ret != 0)
+ return ret;
+ /* Forbid further writes after the symmetric key is loaded */
+ i |= EVM_SETUP_COMPLETE;
+ }
+
+ evm_initialized |= i;
+
+ /* Don't allow protected metadata modification if a symmetric key
+ * is loaded
+ */
+ if (evm_initialized & EVM_INIT_HMAC)
+ evm_initialized &= ~(EVM_ALLOW_METADATA_WRITES);
+
+ return count;
+}
+
+static const struct file_operations evm_key_ops = {
+ .read = evm_read_key,
+ .write = evm_write_key,
+};
+
+#ifdef CONFIG_EVM_ADD_XATTRS
+/**
+ * evm_read_xattrs - read() for <securityfs>/evm_xattrs
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_xattrs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *temp;
+ int offset = 0;
+ ssize_t rc, size = 0;
+ struct xattr_list *xattr;
+
+ if (*ppos != 0)
+ return 0;
+
+ rc = mutex_lock_interruptible(&xattr_list_mutex);
+ if (rc)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ if (!xattr->enabled)
+ continue;
+
+ size += strlen(xattr->name) + 1;
+ }
+
+ temp = kmalloc(size + 1, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&xattr_list_mutex);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ if (!xattr->enabled)
+ continue;
+
+ sprintf(temp + offset, "%s\n", xattr->name);
+ offset += strlen(xattr->name) + 1;
+ }
+
+ mutex_unlock(&xattr_list_mutex);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ kfree(temp);
+
+ return rc;
+}
+
+/**
+ * evm_write_xattrs - write() for <securityfs>/evm_xattrs
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct xattr_list *xattr, *tmp;
+ struct audit_buffer *ab;
+ struct iattr newattrs;
+ struct inode *inode;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_xattrs_locked)
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (count > XATTR_NAME_MAX)
+ return -E2BIG;
+
+ ab = audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_EVM_XATTR);
+ if (!ab && IS_ENABLED(CONFIG_AUDIT))
+ return -ENOMEM;
+
+ xattr = kmalloc(sizeof(struct xattr_list), GFP_KERNEL);
+ if (!xattr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ xattr->enabled = true;
+ xattr->name = memdup_user_nul(buf, count);
+ if (IS_ERR(xattr->name)) {
+ err = PTR_ERR(xattr->name);
+ xattr->name = NULL;
+ goto out;
+ }
+
+ /* Remove any trailing newline */
+ len = strlen(xattr->name);
+ if (len && xattr->name[len-1] == '\n')
+ xattr->name[len-1] = '\0';
+
+ audit_log_format(ab, "xattr=");
+ audit_log_untrustedstring(ab, xattr->name);
+
+ if (strcmp(xattr->name, ".") == 0) {
+ evm_xattrs_locked = 1;
+ newattrs.ia_mode = S_IFREG | 0440;
+ newattrs.ia_valid = ATTR_MODE;
+ inode = evm_xattrs->d_inode;
+ inode_lock(inode);
+ err = simple_setattr(&init_user_ns, evm_xattrs, &newattrs);
+ inode_unlock(inode);
+ if (!err)
+ err = count;
+ goto out;
+ }
+
+ if (strncmp(xattr->name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) != 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * xattr_list_mutex guards against races in evm_read_xattrs().
+ * Entries are only added to the evm_config_xattrnames list
+ * and never deleted. Therefore, the list is traversed
+ * using list_for_each_entry_lockless() without holding
+ * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs()
+ * and evm_protected_xattr().
+ */
+ mutex_lock(&xattr_list_mutex);
+ list_for_each_entry(tmp, &evm_config_xattrnames, list) {
+ if (strcmp(xattr->name, tmp->name) == 0) {
+ err = -EEXIST;
+ if (!tmp->enabled) {
+ tmp->enabled = true;
+ err = count;
+ }
+ mutex_unlock(&xattr_list_mutex);
+ goto out;
+ }
+ }
+ list_add_tail_rcu(&xattr->list, &evm_config_xattrnames);
+ mutex_unlock(&xattr_list_mutex);
+
+ audit_log_format(ab, " res=0");
+ audit_log_end(ab);
+ return count;
+out:
+ audit_log_format(ab, " res=%d", (err < 0) ? err : 0);
+ audit_log_end(ab);
+ if (xattr) {
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+ return err;
+}
+
+static const struct file_operations evm_xattr_ops = {
+ .read = evm_read_xattrs,
+ .write = evm_write_xattrs,
+};
+
+static int evm_init_xattrs(void)
+{
+ evm_xattrs = securityfs_create_file("evm_xattrs", 0660, evm_dir, NULL,
+ &evm_xattr_ops);
+ if (!evm_xattrs || IS_ERR(evm_xattrs))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int evm_init_xattrs(void)
+{
+ return 0;
+}
+#endif
+
+int __init evm_init_secfs(void)
+{
+ int error = 0;
+
+ evm_dir = securityfs_create_dir("evm", integrity_dir);
+ if (!evm_dir || IS_ERR(evm_dir))
+ return -EFAULT;
+
+ evm_init_tpm = securityfs_create_file("evm", 0660,
+ evm_dir, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ evm_symlink = securityfs_create_symlink("evm", NULL,
+ "integrity/evm/evm", NULL);
+ if (!evm_symlink || IS_ERR(evm_symlink)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ if (evm_init_xattrs() != 0) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+out:
+ securityfs_remove(evm_symlink);
+ securityfs_remove(evm_init_tpm);
+ securityfs_remove(evm_dir);
+ return error;
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 000000000..cb251ab0e
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: integrity_iint.c
+ * - implements the integrity hooks: integrity_inode_alloc,
+ * integrity_inode_free
+ * - cache integrity information associated with an inode
+ * using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/security.h>
+#include <linux/lsm_hooks.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_RWLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+struct dentry *integrity_dir;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ struct rb_node *n = integrity_iint_tree.rb_node;
+
+ while (n) {
+ iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ return iint;
+ }
+
+ return NULL;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return NULL;
+
+ read_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ read_unlock(&integrity_iint_lock);
+
+ return iint;
+}
+
+#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
+
+/*
+ * It is not clear that IMA should be nested at all, but as long is it measures
+ * files both on overlayfs and on underlying fs, we need to annotate the iint
+ * mutex to avoid lockdep false positives related to IMA + overlayfs.
+ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
+ */
+static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
+ struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
+
+ int depth = inode->i_sb->s_stack_depth;
+
+ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
+ depth = 0;
+
+ lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
+#endif
+}
+
+static void iint_init_always(struct integrity_iint_cache *iint,
+ struct inode *inode)
+{
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->atomic_flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_read_status = INTEGRITY_UNKNOWN;
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
+ mutex_init(&iint->mutex);
+ iint_lockdep_annotate(iint, inode);
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ kfree(iint->ima_hash);
+ mutex_destroy(&iint->mutex);
+ kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_get - find or allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ * @return: allocated iint
+ *
+ * Caller must lock i_mutex
+ */
+struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+{
+ struct rb_node **p;
+ struct rb_node *node, *parent = NULL;
+ struct integrity_iint_cache *iint, *test_iint;
+
+ /*
+ * The integrity's "iint_cache" is initialized at security_init(),
+ * unless it is not included in the ordered list of LSMs enabled
+ * on the boot command line.
+ */
+ if (!iint_cache)
+ panic("%s: lsm=integrity required.\n", __func__);
+
+ iint = integrity_iint_find(inode);
+ if (iint)
+ return iint;
+
+ iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!iint)
+ return NULL;
+
+ iint_init_always(iint, inode);
+
+ write_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct integrity_iint_cache,
+ rb_node);
+ if (inode < test_iint->inode) {
+ p = &(*p)->rb_left;
+ } else if (inode > test_iint->inode) {
+ p = &(*p)->rb_right;
+ } else {
+ write_unlock(&integrity_iint_lock);
+ kmem_cache_free(iint_cache, iint);
+ return test_iint;
+ }
+ }
+
+ iint->inode = inode;
+ node = &iint->rb_node;
+ inode->i_flags |= S_IMA;
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, &integrity_iint_tree);
+
+ write_unlock(&integrity_iint_lock);
+ return iint;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return;
+
+ write_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ rb_erase(&iint->rb_node, &integrity_iint_tree);
+ write_unlock(&integrity_iint_lock);
+
+ iint_free(iint);
+}
+
+static void iint_init_once(void *foo)
+{
+ struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+
+ memset(iint, 0, sizeof(*iint));
+}
+
+static int __init integrity_iintcache_init(void)
+{
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+ 0, SLAB_PANIC, iint_init_once);
+ return 0;
+}
+DEFINE_LSM(integrity) = {
+ .name = "integrity",
+ .init = integrity_iintcache_init,
+};
+
+
+/*
+ * integrity_kernel_read - read data from the file
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+int integrity_kernel_read(struct file *file, loff_t offset,
+ void *addr, unsigned long count)
+{
+ return __kernel_read(file, addr, count, &offset);
+}
+
+/*
+ * integrity_load_keys - load integrity keys hook
+ *
+ * Hooks is called from init/main.c:kernel_init_freeable()
+ * when rootfs is ready
+ */
+void __init integrity_load_keys(void)
+{
+ ima_load_x509();
+
+ if (!IS_ENABLED(CONFIG_IMA_LOAD_X509))
+ evm_load_x509();
+}
+
+static int __init integrity_fs_init(void)
+{
+ integrity_dir = securityfs_create_dir("integrity", NULL);
+ if (IS_ERR(integrity_dir)) {
+ int ret = PTR_ERR(integrity_dir);
+
+ if (ret != -ENODEV)
+ pr_err("Unable to create integrity sysfs dir: %d\n",
+ ret);
+ integrity_dir = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+late_initcall(integrity_fs_init)
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
new file mode 100644
index 000000000..6ef7bde55
--- /dev/null
+++ b/security/integrity/ima/Kconfig
@@ -0,0 +1,322 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# IBM Integrity Measurement Architecture
+#
+config IMA
+ bool "Integrity Measurement Architecture(IMA)"
+ select SECURITYFS
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ select TCG_TPM if HAS_IOMEM
+ select TCG_TIS if TCG_TPM && X86
+ select TCG_CRB if TCG_TPM && ACPI
+ select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+ select INTEGRITY_AUDIT if AUDIT
+ help
+ The Trusted Computing Group(TCG) runtime Integrity
+ Measurement Architecture(IMA) maintains a list of hash
+ values of executables and other sensitive system files,
+ as they are read or executed. If an attacker manages
+ to change the contents of an important system file
+ being measured, we can tell.
+
+ If your system has a TPM chip, then IMA also maintains
+ an aggregate integrity value over this list inside the
+ TPM hardware, so that the TPM can prove to a third party
+ whether or not critical system files have been modified.
+ Read <https://www.usenix.org/events/sec04/tech/sailer.html>
+ to learn more about IMA.
+ If unsure, say N.
+
+if IMA
+
+config IMA_KEXEC
+ bool "Enable carrying the IMA measurement list across a soft boot"
+ depends on TCG_TPM && HAVE_IMA_KEXEC
+ default n
+ help
+ TPM PCRs are only reset on a hard reboot. In order to validate
+ a TPM's quote after a soft boot, the IMA measurement list of the
+ running kernel must be saved and restored on boot.
+
+ Depending on the IMA policy, the measurement list can grow to
+ be very large.
+
+config IMA_MEASURE_PCR_IDX
+ int
+ range 8 14
+ default 10
+ help
+ IMA_MEASURE_PCR_IDX determines the TPM PCR register index
+ that IMA uses to maintain the integrity aggregate of the
+ measurement list. If unsure, use the default 10.
+
+config IMA_LSM_RULES
+ bool
+ depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
+ default y
+ help
+ Disabling this option will disregard LSM based policy rules.
+
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames. The configured default template can be replaced
+ by specifying "ima_template=" on the boot command line.
+
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1=y
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256=y
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512=y
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512=y
+
+ config IMA_DEFAULT_HASH_SM3
+ bool "SM3"
+ depends on CRYPTO_SM3_GENERIC=y
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+ default "sm3" if IMA_DEFAULT_HASH_SM3
+
+config IMA_WRITE_POLICY
+ bool "Enable multiple writes to the IMA policy"
+ default n
+ help
+ IMA policy can now be updated multiple times. The new rules get
+ appended to the original policy. Have in mind that the rules are
+ scanned in FIFO order so be careful when you design and add new ones.
+
+ If unsure, say N.
+
+config IMA_READ_POLICY
+ bool "Enable reading back the current IMA policy"
+ default y if IMA_WRITE_POLICY
+ default n if !IMA_WRITE_POLICY
+ help
+ It is often useful to be able to read back the IMA policy. It is
+ even more important after introducing CONFIG_IMA_WRITE_POLICY.
+ This option allows the root user to see the current policy rules.
+
+config IMA_APPRAISE
+ bool "Appraise integrity measurements"
+ default n
+ help
+ This option enables local measurement integrity appraisal.
+ It requires the system to be labeled with a security extended
+ attribute containing the file hash measurement. To protect
+ the security extended attributes from offline attack, enable
+ and configure EVM.
+
+ For more information on integrity appraisal refer to:
+ <http://linux-ima.sourceforge.net>
+ If unsure, say N.
+
+config IMA_ARCH_POLICY
+ bool "Enable loading an IMA architecture specific policy"
+ depends on (KEXEC_SIG && IMA) || IMA_APPRAISE \
+ && INTEGRITY_ASYMMETRIC_KEYS
+ default n
+ help
+ This option enables loading an IMA architecture specific policy
+ based on run time secure boot flags.
+
+config IMA_APPRAISE_BUILD_POLICY
+ bool "IMA build time configured policy rules"
+ depends on IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS
+ default n
+ help
+ This option defines an IMA appraisal policy at build time, which
+ is enforced at run time without having to specify a builtin
+ policy name on the boot command line. The build time appraisal
+ policy rules persist after loading a custom policy.
+
+ Depending on the rules configured, this policy may require kernel
+ modules, firmware, the kexec kernel image, and/or the IMA policy
+ to be signed. Unsigned files might prevent the system from
+ booting or applications from working properly.
+
+config IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ bool "Appraise firmware signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ This option defines a policy requiring all firmware to be signed,
+ including the regulatory.db. If both this option and
+ CFG80211_REQUIRE_SIGNED_REGDB are enabled, then both signature
+ verification methods are necessary.
+
+config IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ bool "Appraise kexec kernel image signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kexec'ed kernel images to
+ be signed and verified by a public key on the trusted IMA
+ keyring.
+
+ Kernel image signatures can not be verified by the original
+ kexec_load syscall. Enabling this rule will prevent its
+ usage.
+
+config IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ bool "Appraise kernel modules signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kernel modules to be signed
+ and verified by a public key on the trusted IMA keyring.
+
+ Kernel module signatures can only be verified by IMA-appraisal,
+ via the finit_module syscall. Enabling this rule will prevent
+ the usage of the init_module syscall.
+
+config IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ bool "Appraise IMA policy signature"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require the IMA policy to be signed and
+ and verified by a key on the trusted IMA keyring.
+
+config IMA_APPRAISE_BOOTPARAM
+ bool "ima_appraise boot parameter"
+ depends on IMA_APPRAISE
+ default y
+ help
+ This option enables the different "ima_appraise=" modes
+ (eg. fix, log) from the boot command line.
+
+config IMA_APPRAISE_MODSIG
+ bool "Support module-style signatures for appraisal"
+ depends on IMA_APPRAISE
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select PKCS7_MESSAGE_PARSER
+ select MODULE_SIG_FORMAT
+ default n
+ help
+ Adds support for signatures appended to files. The format of the
+ appended signature is the same used for signed kernel modules.
+ The modsig keyword can be used in the IMA policy to allow a hook
+ to accept such signatures.
+
+config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on SECONDARY_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ Keys may be added to the IMA or IMA blacklist keyrings, if the
+ key is validly signed by a CA cert in the system built-in or
+ secondary trusted keyrings.
+
+ Intermediate keys between those the kernel has compiled in and the
+ IMA keys to be added may be added to the system secondary keyring,
+ provided they are validly signed by a key already resident in the
+ built-in or secondary trusted keyrings.
+
+config IMA_BLACKLIST_KEYRING
+ bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ This option creates an IMA blacklist keyring, which contains all
+ revoked IMA keys. It is consulted before any other keyring. If
+ the search is successful the requested operation is rejected and
+ an error is returned to the caller.
+
+config IMA_LOAD_X509
+ bool "Load X509 certificate onto the '.ima' trusted keyring"
+ depends on INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ File signature verification is based on the public keys
+ loaded on the .ima trusted keyring. These public keys are
+ X509 certificates signed by a trusted key on the
+ .system keyring. This option enables X509 certificate
+ loading from the kernel onto the '.ima' trusted keyring.
+
+config IMA_X509_PATH
+ string "IMA X509 certificate path"
+ depends on IMA_LOAD_X509
+ default "/etc/keys/x509_ima.der"
+ help
+ This option defines IMA X509 certificate path.
+
+config IMA_APPRAISE_SIGNED_INIT
+ bool "Require signed user-space initialization"
+ depends on IMA_LOAD_X509
+ default n
+ help
+ This option requires user-space init to be signed.
+
+config IMA_MEASURE_ASYMMETRIC_KEYS
+ bool
+ depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+ default y
+
+config IMA_QUEUE_EARLY_BOOT_KEYS
+ bool
+ depends on IMA_MEASURE_ASYMMETRIC_KEYS
+ depends on SYSTEM_TRUSTED_KEYRING
+ default y
+
+config IMA_SECURE_AND_OR_TRUSTED_BOOT
+ bool
+ depends on IMA_ARCH_POLICY
+ help
+ This option is selected by architectures to enable secure and/or
+ trusted boot based on IMA runtime policies.
+
+config IMA_DISABLE_HTABLE
+ bool "Disable htable to allow measurement of duplicate records"
+ default n
+ help
+ This option disables htable to allow measurement of duplicate records.
+
+endif
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
new file mode 100644
index 000000000..2499f2485
--- /dev/null
+++ b/security/integrity/ima/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for building Trusted Computing Group's(TCG) runtime Integrity
+# Measurement Architecture(IMA).
+#
+
+obj-$(CONFIG_IMA) += ima.o
+
+ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
+ ima_policy.o ima_template.o ima_template_lib.o
+ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_IMA_APPRAISE_MODSIG) += ima_modsig.o
+ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
+ima-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
+ima-$(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) += ima_asymmetric_keys.o
+ima-$(CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS) += ima_queue_keys.o
+
+ifeq ($(CONFIG_EFI),y)
+ima-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_efi.o
+endif
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
new file mode 100644
index 000000000..be965a871
--- /dev/null
+++ b/security/integrity/ima/ima.h
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima.h
+ * internal Integrity Measurement Architecture (IMA) definitions
+ */
+
+#ifndef __LINUX_IMA_H
+#define __LINUX_IMA_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/hash.h>
+#include <linux/tpm.h>
+#include <linux/audit.h>
+#include <crypto/hash_info.h>
+
+#include "../integrity.h"
+
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
+
+/* digest size for IMA, fits SHA1 or MD5 */
+#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
+#define IMA_EVENT_NAME_LEN_MAX 255
+
+#define IMA_HASH_BITS 10
+#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
+#define NR_BANKS(chip) ((chip != NULL) ? chip->nr_allocated_banks : 0)
+
+/* current content of the policy */
+extern int ima_policy_flag;
+
+/* bitset of digests algorithms allowed in the setxattr hook */
+extern atomic_t ima_setxattr_allowed_hash_algorithms;
+
+/* set during initialization */
+extern int ima_hash_algo __ro_after_init;
+extern int ima_sha1_idx __ro_after_init;
+extern int ima_hash_algo_idx __ro_after_init;
+extern int ima_extra_slots __ro_after_init;
+extern int ima_appraise;
+extern struct tpm_chip *ima_tpm_chip;
+extern const char boot_aggregate_name[];
+
+/* IMA event related data */
+struct ima_event_data {
+ struct integrity_iint_cache *iint;
+ struct file *file;
+ const unsigned char *filename;
+ struct evm_ima_xattr_data *xattr_value;
+ int xattr_len;
+ const struct modsig *modsig;
+ const char *violation;
+ const void *buf;
+ int buf_len;
+};
+
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
+
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init)(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+ void (*field_show)(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ struct list_head list;
+ char *name;
+ char *fmt;
+ int num_fields;
+ const struct ima_template_field **fields;
+};
+
+struct ima_template_entry {
+ int pcr;
+ struct tpm_digest *digests;
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[]; /* template related data */
+};
+
+struct ima_queue_entry {
+ struct hlist_node hnext; /* place in hash collision list */
+ struct list_head later; /* place in ima_measurements list */
+ struct ima_template_entry *entry;
+};
+extern struct list_head ima_measurements; /* list of all measurements */
+
+/* Some details preceding the binary serialized measurement list */
+struct ima_kexec_hdr {
+ u16 version;
+ u16 _reserved0;
+ u32 _reserved1;
+ u64 buffer_size;
+ u64 count;
+};
+
+extern const int read_idmap[];
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+void ima_load_kexec_buffer(void);
+#else
+static inline void ima_load_kexec_buffer(void) {}
+#endif /* CONFIG_HAVE_IMA_KEXEC */
+
+/*
+ * The default binary_runtime_measurements list format is defined as the
+ * platform native format. The canonical format is defined as little-endian.
+ */
+extern bool ima_canonical_fmt;
+
+/* Internal IMA function definitions */
+int ima_init(void);
+int ima_fs_init(void);
+int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_buffer_hash(const void *buf, loff_t len,
+ struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_entry *entry);
+int ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct integrity_iint_cache *iint,
+ const char *op, const char *cause);
+int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields);
+struct ima_template_desc *ima_template_desc_current(void);
+struct ima_template_desc *ima_template_desc_buf(void);
+struct ima_template_desc *lookup_template_desc(const char *name);
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template);
+int ima_restore_measurement_entry(struct ima_template_entry *entry);
+int ima_restore_measurement_list(loff_t bufsize, void *buf);
+int ima_measurements_show(struct seq_file *m, void *v);
+unsigned long ima_get_binary_runtime_size(void);
+int ima_init_template(void);
+void ima_init_template_list(void);
+int __init ima_init_digests(void);
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data);
+
+/*
+ * used to protect h_table and sha_table
+ */
+extern spinlock_t ima_queue_lock;
+
+struct ima_h_table {
+ atomic_long_t len; /* number of stored measurements in the list */
+ atomic_long_t violations;
+ struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+};
+extern struct ima_h_table ima_htable;
+
+static inline unsigned int ima_hash_key(u8 *digest)
+{
+ /* there is no point in taking a hash of part of a digest */
+ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
+}
+
+#define __ima_hooks(hook) \
+ hook(NONE, none) \
+ hook(FILE_CHECK, file) \
+ hook(MMAP_CHECK, mmap) \
+ hook(BPRM_CHECK, bprm) \
+ hook(CREDS_CHECK, creds) \
+ hook(POST_SETATTR, post_setattr) \
+ hook(MODULE_CHECK, module) \
+ hook(FIRMWARE_CHECK, firmware) \
+ hook(KEXEC_KERNEL_CHECK, kexec_kernel) \
+ hook(KEXEC_INITRAMFS_CHECK, kexec_initramfs) \
+ hook(POLICY_CHECK, policy) \
+ hook(KEXEC_CMDLINE, kexec_cmdline) \
+ hook(KEY_CHECK, key) \
+ hook(CRITICAL_DATA, critical_data) \
+ hook(SETXATTR_CHECK, setxattr_check) \
+ hook(MAX_CHECK, none)
+
+#define __ima_hook_enumify(ENUM, str) ENUM,
+#define __ima_stringify(arg) (#arg)
+#define __ima_hook_measuring_stringify(ENUM, str) \
+ (__ima_stringify(measuring_ ##str)),
+
+enum ima_hooks {
+ __ima_hooks(__ima_hook_enumify)
+};
+
+static const char * const ima_hooks_measure_str[] = {
+ __ima_hooks(__ima_hook_measuring_stringify)
+};
+
+static inline const char *func_measure_str(enum ima_hooks func)
+{
+ if (func >= MAX_CHECK)
+ return ima_hooks_measure_str[NONE];
+
+ return ima_hooks_measure_str[func];
+}
+
+extern const char *const func_tokens[];
+
+struct modsig;
+
+#ifdef CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS
+/*
+ * To track keys that need to be measured.
+ */
+struct ima_key_entry {
+ struct list_head list;
+ void *payload;
+ size_t payload_len;
+ char *keyring_name;
+};
+void ima_init_key_queue(void);
+bool ima_should_queue_key(void);
+bool ima_queue_key(struct key *keyring, const void *payload,
+ size_t payload_len);
+void ima_process_queued_keys(void);
+#else
+static inline void ima_init_key_queue(void) {}
+static inline bool ima_should_queue_key(void) { return false; }
+static inline bool ima_queue_key(struct key *keyring,
+ const void *payload,
+ size_t payload_len) { return false; }
+static inline void ima_process_queued_keys(void) {}
+#endif /* CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS */
+
+/* LIM API function definitions */
+int ima_get_action(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct cred *cred, u32 secid, int mask,
+ enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos);
+int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file, void *buf, loff_t size,
+ enum hash_algo algo, struct modsig *modsig);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig, int pcr,
+ struct ima_template_desc *template_desc);
+int process_buffer_measurement(struct user_namespace *mnt_userns,
+ struct inode *inode, const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr, const char *func_data,
+ bool buf_hash, u8 *digest, size_t digest_len);
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+ const unsigned char *filename);
+int ima_alloc_init_template(struct ima_event_data *event_data,
+ struct ima_template_entry **entry,
+ struct ima_template_desc *template_desc);
+int ima_store_template(struct ima_template_entry *entry, int violation,
+ struct inode *inode,
+ const unsigned char *filename, int pcr);
+void ima_free_template_entry(struct ima_template_entry *entry);
+const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
+
+/* IMA policy related functions */
+int ima_match_policy(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct cred *cred, u32 secid, enum ima_hooks func,
+ int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos);
+void ima_init_policy(void);
+void ima_update_policy(void);
+void ima_update_policy_flags(void);
+ssize_t ima_parse_add_rule(char *);
+void ima_delete_rules(void);
+int ima_check_policy(void);
+void *ima_policy_start(struct seq_file *m, loff_t *pos);
+void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos);
+void ima_policy_stop(struct seq_file *m, void *v);
+int ima_policy_show(struct seq_file *m, void *v);
+
+/* Appraise integrity measurements */
+#define IMA_APPRAISE_ENFORCE 0x01
+#define IMA_APPRAISE_FIX 0x02
+#define IMA_APPRAISE_LOG 0x04
+#define IMA_APPRAISE_MODULES 0x08
+#define IMA_APPRAISE_FIRMWARE 0x10
+#define IMA_APPRAISE_POLICY 0x20
+#define IMA_APPRAISE_KEXEC 0x40
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr);
+int ima_appraise_measurement(enum ima_hooks func,
+ struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig);
+int ima_must_appraise(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask, enum ima_hooks func);
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ enum ima_hooks func);
+enum hash_algo ima_get_hash_algo(const struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value);
+
+#else
+static inline int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ return 0;
+}
+
+static inline int ima_appraise_measurement(enum ima_hooks func,
+ struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len,
+ const struct modsig *modsig)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline int ima_must_appraise(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask,
+ enum ima_hooks func)
+{
+ return 0;
+}
+
+static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+ struct file *file)
+{
+}
+
+static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache
+ *iint,
+ enum ima_hooks func)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline enum hash_algo
+ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len)
+{
+ return ima_hash_algo;
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ return 0;
+}
+
+#endif /* CONFIG_IMA_APPRAISE */
+
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig);
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size);
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size);
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len);
+void ima_free_modsig(struct modsig *modsig);
+#else
+static inline int ima_read_modsig(enum ima_hooks func, const void *buf,
+ loff_t buf_len, struct modsig **modsig)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_collect_modsig(struct modsig *modsig, const void *buf,
+ loff_t size)
+{
+}
+
+static inline int ima_get_modsig_digest(const struct modsig *modsig,
+ enum hash_algo *algo, const u8 **digest,
+ u32 *digest_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ima_get_raw_modsig(const struct modsig *modsig,
+ const void **data, u32 *data_len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_free_modsig(struct modsig *modsig)
+{
+}
+#endif /* CONFIG_IMA_APPRAISE_MODSIG */
+
+/* LSM based policy rules require audit */
+#ifdef CONFIG_IMA_LSM_RULES
+
+#define ima_filter_rule_init security_audit_rule_init
+#define ima_filter_rule_free security_audit_rule_free
+#define ima_filter_rule_match security_audit_rule_match
+
+#else
+
+static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+ void **lsmrule)
+{
+ return -EINVAL;
+}
+
+static inline void ima_filter_rule_free(void *lsmrule)
+{
+}
+
+static inline int ima_filter_rule_match(u32 secid, u32 field, u32 op,
+ void *lsmrule)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_IMA_LSM_RULES */
+
+#ifdef CONFIG_IMA_READ_POLICY
+#define POLICY_FILE_FLAGS (S_IWUSR | S_IRUSR)
+#else
+#define POLICY_FILE_FLAGS S_IWUSR
+#endif /* CONFIG_IMA_READ_POLICY */
+
+#endif /* __LINUX_IMA_H */
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
new file mode 100644
index 000000000..026c8c9db
--- /dev/null
+++ b/security/integrity/ima/ima_api.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima_api.c
+ * Implements must_appraise_or_measure, collect_measurement,
+ * appraise_measurement, store_measurement and store_template.
+ */
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <linux/iversion.h>
+#include <linux/fsverity.h>
+
+#include "ima.h"
+
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < entry->template_desc->num_fields; i++)
+ kfree(entry->template_data[i].data);
+
+ kfree(entry->digests);
+ kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct ima_event_data *event_data,
+ struct ima_template_entry **entry,
+ struct ima_template_desc *desc)
+{
+ struct ima_template_desc *template_desc;
+ struct tpm_digest *digests;
+ int i, result = 0;
+
+ if (desc)
+ template_desc = desc;
+ else
+ template_desc = ima_template_desc_current();
+
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*digests), GFP_NOFS);
+ if (!digests) {
+ kfree(*entry);
+ *entry = NULL;
+ return -ENOMEM;
+ }
+
+ (*entry)->digests = digests;
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ const struct ima_template_field *field =
+ template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(event_data,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ return 0;
+out:
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ return result;
+}
+
+/*
+ * ima_store_template - store ima template measurements
+ *
+ * Calculate the hash of a template entry, add the template entry
+ * to an ordered list of measurement entries maintained inside the kernel,
+ * and also update the aggregate integrity value (maintained inside the
+ * configured TPM PCR) over the hashes of the current list of measurement
+ * entries.
+ *
+ * Applications retrieve the current kernel-held measurement list through
+ * the securityfs entries in /sys/kernel/security/ima. The signed aggregate
+ * TPM PCR (called quote) can be retrieved using a TPM user space library
+ * and is used to validate the measurement list.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int ima_store_template(struct ima_template_entry *entry,
+ int violation, struct inode *inode,
+ const unsigned char *filename, int pcr)
+{
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "hashing_error";
+ char *template_name = entry->template_desc->name;
+ int result;
+
+ if (!violation) {
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ entry);
+ if (result < 0) {
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
+ template_name, op,
+ audit_cause, result, 0);
+ return result;
+ }
+ }
+ entry->pcr = pcr;
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
+ return result;
+}
+
+/*
+ * ima_add_violation - add violation to measurement list.
+ *
+ * Violations are flagged in the measurement list with zero hash values.
+ * By extending the PCR with 0xFF's instead of with zeroes, the PCR
+ * value is invalidated.
+ */
+void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct integrity_iint_cache *iint,
+ const char *op, const char *cause)
+{
+ struct ima_template_entry *entry;
+ struct inode *inode = file_inode(file);
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .violation = cause };
+ int violation = 1;
+ int result;
+
+ /* can overflow, only indicator */
+ atomic_long_inc(&ima_htable.violations);
+
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
+ if (result < 0) {
+ result = -ENOMEM;
+ goto err_out;
+ }
+ result = ima_store_template(entry, violation, inode,
+ filename, CONFIG_IMA_MEASURE_PCR_IDX);
+ if (result < 0)
+ ima_free_template_entry(entry);
+err_out:
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+ op, cause, result, 0);
+}
+
+/**
+ * ima_get_action - appraise & measure decision based on policy.
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: pointer to the inode associated with the object being validated
+ * @cred: pointer to credentials structure to validate
+ * @secid: secid of the task being validated
+ * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC,
+ * MAY_APPEND)
+ * @func: caller identifier
+ * @pcr: pointer filled in if matched measure policy sets pcr=
+ * @template_desc: pointer filled in if matched measure policy sets template=
+ * @func_data: func specific data, may be NULL
+ * @allowed_algos: allowlist of hash algorithms for the IMA xattr
+ *
+ * The policy is defined in terms of keypairs:
+ * subj=, obj=, type=, func=, mask=, fsmagic=
+ * subj,obj, and type: are LSM specific.
+ * func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK
+ * | KEXEC_CMDLINE | KEY_CHECK | CRITICAL_DATA
+ * mask: contains the permission mask
+ * fsmagic: hex value
+ *
+ * Returns IMA_MEASURE, IMA_APPRAISE mask.
+ *
+ */
+int ima_get_action(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct cred *cred, u32 secid, int mask,
+ enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos)
+{
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
+
+ flags &= ima_policy_flag;
+
+ return ima_match_policy(mnt_userns, inode, cred, secid, func, mask,
+ flags, pcr, template_desc, func_data,
+ allowed_algos);
+}
+
+static int ima_get_verity_digest(struct integrity_iint_cache *iint,
+ struct ima_max_digest_data *hash)
+{
+ enum hash_algo verity_alg;
+ int ret;
+
+ /*
+ * On failure, 'measure' policy rules will result in a file data
+ * hash containing 0's.
+ */
+ ret = fsverity_get_digest(iint->inode, hash->digest, &verity_alg);
+ if (ret)
+ return ret;
+
+ /*
+ * Unlike in the case of actually calculating the file hash, in
+ * the fsverity case regardless of the hash algorithm, return
+ * the verity digest to be included in the measurement list. A
+ * mismatch between the verity algorithm and the xattr signature
+ * algorithm, if one exists, will be detected later.
+ */
+ hash->hdr.algo = verity_alg;
+ hash->hdr.length = hash_digest_size[verity_alg];
+ return 0;
+}
+
+/*
+ * ima_collect_measurement - collect file measurement
+ *
+ * Calculate the file hash, if it doesn't already exist,
+ * storing the measurement and i_version in the iint.
+ *
+ * Must be called with iint->mutex held.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file, void *buf, loff_t size,
+ enum hash_algo algo, struct modsig *modsig)
+{
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
+ struct inode *real_inode = d_real_inode(file_dentry(file));
+ const char *filename = file->f_path.dentry->d_name.name;
+ struct ima_max_digest_data hash;
+ int result = 0;
+ int length;
+ void *tmpbuf;
+ u64 i_version;
+
+ /*
+ * Always collect the modsig, because IMA might have already collected
+ * the file digest without collecting the modsig in a previous
+ * measurement rule.
+ */
+ if (modsig)
+ ima_collect_modsig(modsig, buf, size);
+
+ if (iint->flags & IMA_COLLECTED)
+ goto out;
+
+ /*
+ * Detecting file change is based on i_version. On filesystems
+ * which do not support i_version, support was originally limited
+ * to an initial measurement/appraisal/audit, but was modified to
+ * assume the file changed.
+ */
+ i_version = inode_query_iversion(inode);
+ hash.hdr.algo = algo;
+ hash.hdr.length = hash_digest_size[algo];
+
+ /* Initialize hash digest to 0's in case of failure */
+ memset(&hash.digest, 0, sizeof(hash.digest));
+
+ if (iint->flags & IMA_VERITY_REQUIRED) {
+ result = ima_get_verity_digest(iint, &hash);
+ switch (result) {
+ case 0:
+ break;
+ case -ENODATA:
+ audit_cause = "no-verity-digest";
+ break;
+ default:
+ audit_cause = "invalid-verity-digest";
+ break;
+ }
+ } else if (buf) {
+ result = ima_calc_buffer_hash(buf, size, &hash.hdr);
+ } else {
+ result = ima_calc_file_hash(file, &hash.hdr);
+ }
+
+ if (result && result != -EBADF && result != -EINVAL)
+ goto out;
+
+ length = sizeof(hash.hdr) + hash.hdr.length;
+ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
+ if (!tmpbuf) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+ if (real_inode != inode) {
+ iint->real_ino = real_inode->i_ino;
+ iint->real_dev = real_inode->i_sb->s_dev;
+ }
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+ iint->flags |= IMA_COLLECTED;
+out:
+ if (result) {
+ if (file->f_flags & O_DIRECT)
+ audit_cause = "failed(directio)";
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data", audit_cause,
+ result, 0);
+ }
+ return result;
+}
+
+/*
+ * ima_store_measurement - store file measurement
+ *
+ * Create an "ima" template and then store the template by calling
+ * ima_store_template.
+ *
+ * We only get here if the inode has not already been measured,
+ * but the measurement could already exist:
+ * - multiple copies of the same file on either the same or
+ * different filesystems.
+ * - the inode was previously flushed as well as the iint info,
+ * containing the hashing info.
+ *
+ * Must be called with iint->mutex held.
+ */
+void ima_store_measurement(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig, int pcr,
+ struct ima_template_desc *template_desc)
+{
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "ENOMEM";
+ int result = -ENOMEM;
+ struct inode *inode = file_inode(file);
+ struct ima_template_entry *entry;
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .xattr_value = xattr_value,
+ .xattr_len = xattr_len,
+ .modsig = modsig };
+ int violation = 0;
+
+ /*
+ * We still need to store the measurement in the case of MODSIG because
+ * we only have its contents to put in the list at the time of
+ * appraisal, but a file measurement from earlier might already exist in
+ * the measurement list.
+ */
+ if (iint->measured_pcrs & (0x1 << pcr) && !modsig)
+ return;
+
+ result = ima_alloc_init_template(&event_data, &entry, template_desc);
+ if (result < 0) {
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+ op, audit_cause, result, 0);
+ return;
+ }
+
+ result = ima_store_template(entry, violation, inode, filename, pcr);
+ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
+ iint->flags |= IMA_MEASURED;
+ iint->measured_pcrs |= (0x1 << pcr);
+ }
+ if (result < 0)
+ ima_free_template_entry(entry);
+}
+
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+ const unsigned char *filename)
+{
+ struct audit_buffer *ab;
+ char *hash;
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+ int i;
+
+ if (iint->flags & IMA_AUDITED)
+ return;
+
+ hash = kzalloc((iint->ima_hash->length * 2) + 1, GFP_KERNEL);
+ if (!hash)
+ return;
+
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
+ hash[i * 2] = '\0';
+
+ ab = audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_RULE);
+ if (!ab)
+ goto out;
+
+ audit_log_format(ab, "file=");
+ audit_log_untrustedstring(ab, filename);
+ audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash);
+
+ audit_log_task_info(ab);
+ audit_log_end(ab);
+
+ iint->flags |= IMA_AUDITED;
+out:
+ kfree(hash);
+ return;
+}
+
+/*
+ * ima_d_path - return a pointer to the full pathname
+ *
+ * Attempt to return a pointer to the full pathname for use in the
+ * IMA measurement list, IMA audit records, and auditing logs.
+ *
+ * On failure, return a pointer to a copy of the filename, not dname.
+ * Returning a pointer to dname, could result in using the pointer
+ * after the memory has been freed.
+ */
+const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+{
+ char *pathname = NULL;
+
+ *pathbuf = __getname();
+ if (*pathbuf) {
+ pathname = d_absolute_path(path, *pathbuf, PATH_MAX);
+ if (IS_ERR(pathname)) {
+ __putname(*pathbuf);
+ *pathbuf = NULL;
+ pathname = NULL;
+ }
+ }
+
+ if (!pathname) {
+ strscpy(namebuf, path->dentry->d_name.name, NAME_MAX);
+ pathname = namebuf;
+ }
+
+ return pathname;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
new file mode 100644
index 000000000..3e0fbbd99
--- /dev/null
+++ b/security/integrity/ima/ima_appraise.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/magic.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+#include <linux/fsverity.h>
+#include <keys/system_keyring.h>
+#include <uapi/linux/fsverity.h>
+
+#include "ima.h"
+
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+static char *ima_appraise_cmdline_default __initdata;
+core_param(ima_appraise, ima_appraise_cmdline_default, charp, 0);
+
+void __init ima_appraise_parse_cmdline(void)
+{
+ const char *str = ima_appraise_cmdline_default;
+ bool sb_state = arch_ima_get_secureboot();
+ int appraisal_state = ima_appraise;
+
+ if (!str)
+ return;
+
+ if (strncmp(str, "off", 3) == 0)
+ appraisal_state = 0;
+ else if (strncmp(str, "log", 3) == 0)
+ appraisal_state = IMA_APPRAISE_LOG;
+ else if (strncmp(str, "fix", 3) == 0)
+ appraisal_state = IMA_APPRAISE_FIX;
+ else if (strncmp(str, "enforce", 7) == 0)
+ appraisal_state = IMA_APPRAISE_ENFORCE;
+ else
+ pr_err("invalid \"%s\" appraise option", str);
+
+ /* If appraisal state was changed, but secure boot is enabled,
+ * keep its default */
+ if (sb_state) {
+ if (!(appraisal_state & IMA_APPRAISE_ENFORCE))
+ pr_info("Secure boot enabled: ignoring ima_appraise=%s option",
+ str);
+ } else {
+ ima_appraise = appraisal_state;
+ }
+}
+#endif
+
+/*
+ * is_ima_appraise_enabled - return appraise status
+ *
+ * Only return enabled, if not in ima_appraise="fix" or "log" modes.
+ */
+bool is_ima_appraise_enabled(void)
+{
+ return ima_appraise & IMA_APPRAISE_ENFORCE;
+}
+
+/*
+ * ima_must_appraise - set appraise flag
+ *
+ * Return 1 to appraise or hash
+ */
+int ima_must_appraise(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask, enum ima_hooks func)
+{
+ u32 secid;
+
+ if (!ima_appraise)
+ return 0;
+
+ security_current_getsecid_subj(&secid);
+ return ima_match_policy(mnt_userns, inode, current_cred(), secid,
+ func, mask, IMA_APPRAISE | IMA_HASH, NULL,
+ NULL, NULL, NULL);
+}
+
+static int ima_fix_xattr(struct dentry *dentry,
+ struct integrity_iint_cache *iint)
+{
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(&init_user_ns, dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
+}
+
+/* Return specific func appraised cached result */
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ enum ima_hooks func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ return iint->ima_mmap_status;
+ case BPRM_CHECK:
+ return iint->ima_bprm_status;
+ case CREDS_CHECK:
+ return iint->ima_creds_status;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ return iint->ima_file_status;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ return iint->ima_read_status;
+ }
+}
+
+static void ima_set_cache_status(struct integrity_iint_cache *iint,
+ enum ima_hooks func,
+ enum integrity_status status)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->ima_mmap_status = status;
+ break;
+ case BPRM_CHECK:
+ iint->ima_bprm_status = status;
+ break;
+ case CREDS_CHECK:
+ iint->ima_creds_status = status;
+ break;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ iint->ima_file_status = status;
+ break;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ iint->ima_read_status = status;
+ break;
+ }
+}
+
+static void ima_cache_flags(struct integrity_iint_cache *iint,
+ enum ima_hooks func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
+ break;
+ case BPRM_CHECK:
+ iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED);
+ break;
+ case CREDS_CHECK:
+ iint->flags |= (IMA_CREDS_APPRAISED | IMA_APPRAISED);
+ break;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED);
+ break;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ iint->flags |= (IMA_READ_APPRAISED | IMA_APPRAISED);
+ break;
+ }
+}
+
+enum hash_algo ima_get_hash_algo(const struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ struct signature_v2_hdr *sig;
+ enum hash_algo ret;
+
+ if (!xattr_value || xattr_len < 2)
+ /* return default hash algo */
+ return ima_hash_algo;
+
+ switch (xattr_value->type) {
+ case IMA_VERITY_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 3 || xattr_len <= sizeof(*sig) ||
+ sig->hash_algo >= HASH_ALGO__LAST)
+ return ima_hash_algo;
+ return sig->hash_algo;
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig)
+ || sig->hash_algo >= HASH_ALGO__LAST)
+ return ima_hash_algo;
+ return sig->hash_algo;
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ ret = xattr_value->data[0];
+ if (ret < HASH_ALGO__LAST)
+ return ret;
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->data[16], &zero, 4))
+ return HASH_ALGO_MD5;
+ else
+ return HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ return HASH_ALGO_MD5;
+ break;
+ }
+
+ /* return default hash algo */
+ return ima_hash_algo;
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ ssize_t ret;
+
+ ret = vfs_getxattr_alloc(&init_user_ns, dentry, XATTR_NAME_IMA,
+ (char **)xattr_value, 0, GFP_NOFS);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ return ret;
+}
+
+/*
+ * calc_file_id_hash - calculate the hash of the ima_file_id struct data
+ * @type: xattr type [enum evm_ima_xattr_type]
+ * @algo: hash algorithm [enum hash_algo]
+ * @digest: pointer to the digest to be hashed
+ * @hash: (out) pointer to the hash
+ *
+ * IMA signature version 3 disambiguates the data that is signed by
+ * indirectly signing the hash of the ima_file_id structure data.
+ *
+ * Signing the ima_file_id struct is currently only supported for
+ * IMA_VERITY_DIGSIG type xattrs.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int calc_file_id_hash(enum evm_ima_xattr_type type,
+ enum hash_algo algo, const u8 *digest,
+ struct ima_digest_data *hash)
+{
+ struct ima_file_id file_id = {
+ .hash_type = IMA_VERITY_DIGSIG, .hash_algorithm = algo};
+ unsigned int unused = HASH_MAX_DIGESTSIZE - hash_digest_size[algo];
+
+ if (type != IMA_VERITY_DIGSIG)
+ return -EINVAL;
+
+ memcpy(file_id.hash, digest, hash_digest_size[algo]);
+
+ hash->algo = algo;
+ hash->length = hash_digest_size[algo];
+
+ return ima_calc_buffer_hash(&file_id, sizeof(file_id) - unused, hash);
+}
+
+/*
+ * xattr_verify - verify xattr digest or signature
+ *
+ * Verify whether the hash or signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int xattr_verify(enum ima_hooks func, struct integrity_iint_cache *iint,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ enum integrity_status *status, const char **cause)
+{
+ struct ima_max_digest_data hash;
+ struct signature_v2_hdr *sig;
+ int rc = -EINVAL, hash_start = 0;
+ int mask;
+
+ switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ fallthrough;
+ case IMA_XATTR_DIGEST:
+ if (*status != INTEGRITY_PASS_IMMUTABLE) {
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ if (iint->flags & IMA_VERITY_REQUIRED)
+ *cause = "verity-signature-required";
+ else
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ } else {
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ }
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /*
+ * xattr length may be longer. md5 hash in previous
+ * version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->data[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
+ if (rc) {
+ *cause = "invalid-hash";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ *status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+
+ mask = IMA_DIGSIG_REQUIRED | IMA_VERITY_REQUIRED;
+ if ((iint->flags & mask) == mask) {
+ *cause = "verity-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+
+ sig = (typeof(sig))xattr_value;
+ if (sig->version >= 3) {
+ *cause = "invalid-signature-version";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc == -EOPNOTSUPP) {
+ *status = INTEGRITY_UNKNOWN;
+ break;
+ }
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+ break;
+ case IMA_VERITY_DIGSIG:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ if (!(iint->flags & IMA_VERITY_REQUIRED)) {
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ }
+
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 3) {
+ *cause = "invalid-signature-version";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+
+ rc = calc_file_id_hash(IMA_VERITY_DIGSIG, iint->ima_hash->algo,
+ iint->ima_hash->digest, &hash.hdr);
+ if (rc) {
+ *cause = "sigv3-hashing-error";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value,
+ xattr_len, hash.digest,
+ hash.hdr.length);
+ if (rc) {
+ *cause = "invalid-verity-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+
+ break;
+ default:
+ *status = INTEGRITY_UNKNOWN;
+ *cause = "unknown-ima-data";
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * modsig_verify - verify modsig signature
+ *
+ * Verify whether the signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int modsig_verify(enum ima_hooks func, const struct modsig *modsig,
+ enum integrity_status *status, const char **cause)
+{
+ int rc;
+
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_IMA, modsig);
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ modsig);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+
+ return rc;
+}
+
+/*
+ * ima_check_blacklist - determine if the binary is blacklisted.
+ *
+ * Add the hash of the blacklisted binary to the measurement list, based
+ * on policy.
+ *
+ * Returns -EPERM if the hash is blacklisted.
+ */
+int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ enum hash_algo hash_algo;
+ const u8 *digest = NULL;
+ u32 digestsize = 0;
+ int rc = 0;
+
+ if (!(iint->flags & IMA_CHECK_BLACKLIST))
+ return 0;
+
+ if (iint->flags & IMA_MODSIG_ALLOWED && modsig) {
+ ima_get_modsig_digest(modsig, &hash_algo, &digest, &digestsize);
+
+ rc = is_binary_blacklisted(digest, digestsize);
+ if ((rc == -EPERM) && (iint->flags & IMA_MEASURE))
+ process_buffer_measurement(&init_user_ns, NULL, digest, digestsize,
+ "blacklisted-hash", NONE,
+ pcr, NULL, false, NULL, 0);
+ }
+
+ return rc;
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(enum ima_hooks func,
+ struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig)
+{
+ static const char op[] = "appraise_data";
+ const char *cause = "unknown";
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = d_backing_inode(dentry);
+ enum integrity_status status = INTEGRITY_UNKNOWN;
+ int rc = xattr_len;
+ bool try_modsig = iint->flags & IMA_MODSIG_ALLOWED && modsig;
+
+ /* If not appraising a modsig, we need an xattr. */
+ if (!(inode->i_opflags & IOP_XATTR) && !try_modsig)
+ return INTEGRITY_UNKNOWN;
+
+ /* If reading the xattr failed and there's no modsig, error out. */
+ if (rc <= 0 && !try_modsig) {
+ if (rc && rc != -ENODATA)
+ goto out;
+
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ if (iint->flags & IMA_VERITY_REQUIRED)
+ cause = "verity-signature-required";
+ else
+ cause = "IMA-signature-required";
+ } else {
+ cause = "missing-hash";
+ }
+
+ status = INTEGRITY_NOLABEL;
+ if (file->f_mode & FMODE_CREATED)
+ iint->flags |= IMA_NEW_FILE;
+ if ((iint->flags & IMA_NEW_FILE) &&
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
+ status = INTEGRITY_PASS;
+ goto out;
+ }
+
+ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value,
+ rc < 0 ? 0 : rc, iint);
+ switch (status) {
+ case INTEGRITY_PASS:
+ case INTEGRITY_PASS_IMMUTABLE:
+ case INTEGRITY_UNKNOWN:
+ break;
+ case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */
+ /* It's fine not to have xattrs when using a modsig. */
+ if (try_modsig)
+ break;
+ fallthrough;
+ case INTEGRITY_NOLABEL: /* No security.evm xattr. */
+ cause = "missing-HMAC";
+ goto out;
+ case INTEGRITY_FAIL_IMMUTABLE:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ cause = "invalid-fail-immutable";
+ goto out;
+ case INTEGRITY_FAIL: /* Invalid HMAC/signature. */
+ cause = "invalid-HMAC";
+ goto out;
+ default:
+ WARN_ONCE(true, "Unexpected integrity status %d\n", status);
+ }
+
+ if (xattr_value)
+ rc = xattr_verify(func, iint, xattr_value, xattr_len, &status,
+ &cause);
+
+ /*
+ * If we have a modsig and either no imasig or the imasig's key isn't
+ * known, then try verifying the modsig.
+ */
+ if (try_modsig &&
+ (!xattr_value || xattr_value->type == IMA_XATTR_DIGEST_NG ||
+ rc == -ENOKEY))
+ rc = modsig_verify(func, modsig, &status, &cause);
+
+out:
+ /*
+ * File signatures on some filesystems can not be properly verified.
+ * When such filesystems are mounted by an untrusted mounter or on a
+ * system not willing to accept such a risk, fail the file signature
+ * verification.
+ */
+ if ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) &&
+ ((inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) ||
+ (iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) {
+ status = INTEGRITY_FAIL;
+ cause = "unverifiable-signature";
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ op, cause, rc, 0);
+ } else if (status != INTEGRITY_PASS) {
+ /* Fix mode, but don't replace file signatures. */
+ if ((ima_appraise & IMA_APPRAISE_FIX) && !try_modsig &&
+ (!xattr_value ||
+ xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
+ if (!ima_fix_xattr(dentry, iint))
+ status = INTEGRITY_PASS;
+ }
+
+ /*
+ * Permit new files with file/EVM portable signatures, but
+ * without data.
+ */
+ if (inode->i_size == 0 && iint->flags & IMA_NEW_FILE &&
+ test_bit(IMA_DIGSIG, &iint->atomic_flags)) {
+ status = INTEGRITY_PASS;
+ }
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ op, cause, rc, 0);
+ } else {
+ ima_cache_flags(iint, func);
+ }
+
+ ima_set_cache_status(iint, func, status);
+ return status;
+}
+
+/*
+ * ima_update_xattr - update 'security.ima' hash value
+ */
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+{
+ struct dentry *dentry = file_dentry(file);
+ int rc = 0;
+
+ /* do not collect and update hash for digital signatures */
+ if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
+ return;
+
+ if ((iint->ima_file_status != INTEGRITY_PASS) &&
+ !(iint->flags & IMA_HASH))
+ return;
+
+ rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo, NULL);
+ if (rc < 0)
+ return;
+
+ inode_lock(file_inode(file));
+ ima_fix_xattr(dentry, iint);
+ inode_unlock(file_inode(file));
+}
+
+/**
+ * ima_inode_post_setattr - reflect file metadata changes
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dentry: pointer to the affected dentry
+ *
+ * Changes to a dentry's metadata might result in needing to appraise.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void ima_inode_post_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct integrity_iint_cache *iint;
+ int action;
+
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
+ || !(inode->i_opflags & IOP_XATTR))
+ return;
+
+ action = ima_must_appraise(mnt_userns, inode, MAY_ACCESS, POST_SETATTR);
+ iint = integrity_iint_find(inode);
+ if (iint) {
+ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
+ if (!action)
+ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+}
+
+/*
+ * ima_protect_xattr - protect 'security.ima'
+ *
+ * Ensure that not just anyone can modify or remove 'security.ima'.
+ */
+static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 1;
+ }
+ return 0;
+}
+
+static void ima_reset_appraise_flags(struct inode *inode, int digsig)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode))
+ return;
+
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+ iint->measured_pcrs = 0;
+ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
+ if (digsig)
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ else
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+}
+
+/**
+ * validate_hash_algo() - Block setxattr with unsupported hash algorithms
+ * @dentry: object of the setxattr()
+ * @xattr_value: userland supplied xattr value
+ * @xattr_value_len: length of xattr_value
+ *
+ * The xattr value is mapped to its hash algorithm, and this algorithm
+ * must be built in the kernel for the setxattr to be allowed.
+ *
+ * Emit an audit message when the algorithm is invalid.
+ *
+ * Return: 0 on success, else an error.
+ */
+static int validate_hash_algo(struct dentry *dentry,
+ const struct evm_ima_xattr_data *xattr_value,
+ size_t xattr_value_len)
+{
+ char *path = NULL, *pathbuf = NULL;
+ enum hash_algo xattr_hash_algo;
+ const char *errmsg = "unavailable-hash-algorithm";
+ unsigned int allowed_hashes;
+
+ xattr_hash_algo = ima_get_hash_algo(xattr_value, xattr_value_len);
+
+ allowed_hashes = atomic_read(&ima_setxattr_allowed_hash_algorithms);
+
+ if (allowed_hashes) {
+ /* success if the algorithm is allowed in the ima policy */
+ if (allowed_hashes & (1U << xattr_hash_algo))
+ return 0;
+
+ /*
+ * We use a different audit message when the hash algorithm
+ * is denied by a policy rule, instead of not being built
+ * in the kernel image
+ */
+ errmsg = "denied-hash-algorithm";
+ } else {
+ if (likely(xattr_hash_algo == ima_hash_algo))
+ return 0;
+
+ /* allow any xattr using an algorithm built in the kernel */
+ if (crypto_has_alg(hash_algo_name[xattr_hash_algo], 0, 0))
+ return 0;
+ }
+
+ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!pathbuf)
+ return -EACCES;
+
+ path = dentry_path(dentry, pathbuf, PATH_MAX);
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, d_inode(dentry), path,
+ "set_data", errmsg, -EACCES, 0);
+
+ kfree(pathbuf);
+
+ return -EACCES;
+}
+
+int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xvalue = xattr_value;
+ int digsig = 0;
+ int result;
+ int err;
+
+ result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ if (result == 1) {
+ if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
+ return -EINVAL;
+
+ err = validate_hash_algo(dentry, xvalue, xattr_value_len);
+ if (err)
+ return err;
+
+ digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
+ } else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) {
+ digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG);
+ }
+ if (result == 1 || evm_revalidate_status(xattr_name)) {
+ ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+ if (result == 1)
+ result = 0;
+ }
+ return result;
+}
+
+int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
+ if (result == 1 || evm_revalidate_status(xattr_name)) {
+ ima_reset_appraise_flags(d_backing_inode(dentry), 0);
+ if (result == 1)
+ result = 0;
+ }
+ return result;
+}
diff --git a/security/integrity/ima/ima_asymmetric_keys.c b/security/integrity/ima/ima_asymmetric_keys.c
new file mode 100644
index 000000000..f6aa0b47a
--- /dev/null
+++ b/security/integrity/ima/ima_asymmetric_keys.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * File: ima_asymmetric_keys.c
+ * Defines an IMA hook to measure asymmetric keys on key
+ * create or update.
+ */
+
+#include <keys/asymmetric-type.h>
+#include <linux/user_namespace.h>
+#include <linux/ima.h>
+#include "ima.h"
+
+/**
+ * ima_post_key_create_or_update - measure asymmetric keys
+ * @keyring: keyring to which the key is linked to
+ * @key: created or updated key
+ * @payload: The data used to instantiate or update the key.
+ * @payload_len: The length of @payload.
+ * @flags: key flags
+ * @create: flag indicating whether the key was created or updated
+ *
+ * Keys can only be measured, not appraised.
+ * The payload data used to instantiate or update the key is measured.
+ */
+void ima_post_key_create_or_update(struct key *keyring, struct key *key,
+ const void *payload, size_t payload_len,
+ unsigned long flags, bool create)
+{
+ bool queued = false;
+
+ /* Only asymmetric keys are handled by this hook. */
+ if (key->type != &key_type_asymmetric)
+ return;
+
+ if (!payload || (payload_len == 0))
+ return;
+
+ if (ima_should_queue_key())
+ queued = ima_queue_key(keyring, payload, payload_len);
+
+ if (queued)
+ return;
+
+ /*
+ * keyring->description points to the name of the keyring
+ * (such as ".builtin_trusted_keys", ".ima", etc.) to
+ * which the given key is linked to.
+ *
+ * The name of the keyring is passed in the "eventname"
+ * parameter to process_buffer_measurement() and is set
+ * in the "eventname" field in ima_event_data for
+ * the key measurement IMA event.
+ *
+ * The name of the keyring is also passed in the "keyring"
+ * parameter to process_buffer_measurement() to check
+ * if the IMA policy is configured to measure a key linked
+ * to the given keyring.
+ */
+ process_buffer_measurement(&init_user_ns, NULL, payload, payload_len,
+ keyring->description, KEY_CHECK, 0,
+ keyring->description, false, NULL, 0);
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
new file mode 100644
index 000000000..644990566
--- /dev/null
+++ b/security/integrity/ima/ima_crypto.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * File: ima_crypto.c
+ * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/ratelimit.h>
+#include <linux/file.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <crypto/hash.h>
+
+#include "ima.h"
+
+/* minimum file size for ahash use */
+static unsigned long ima_ahash_minsize;
+module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
+MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
+
+/* default is 0 - 1 page. */
+static int ima_maxorder;
+static unsigned int ima_bufsize = PAGE_SIZE;
+
+static int param_set_bufsize(const char *val, const struct kernel_param *kp)
+{
+ unsigned long long size;
+ int order;
+
+ size = memparse(val, NULL);
+ order = get_order(size);
+ if (order >= MAX_ORDER)
+ return -EINVAL;
+ ima_maxorder = order;
+ ima_bufsize = PAGE_SIZE << order;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_bufsize = {
+ .set = param_set_bufsize,
+ .get = param_get_uint,
+};
+#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
+
+module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
+MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
+
+static struct crypto_shash *ima_shash_tfm;
+static struct crypto_ahash *ima_ahash_tfm;
+
+struct ima_algo_desc {
+ struct crypto_shash *tfm;
+ enum hash_algo algo;
+};
+
+int ima_sha1_idx __ro_after_init;
+int ima_hash_algo_idx __ro_after_init;
+/*
+ * Additional number of slots reserved, as needed, for SHA1
+ * and IMA default algo.
+ */
+int ima_extra_slots __ro_after_init;
+
+static struct ima_algo_desc *ima_algo_array;
+
+static int __init ima_init_ima_crypto(void)
+{
+ long rc;
+
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
+ if (IS_ERR(ima_shash_tfm)) {
+ rc = PTR_ERR(ima_shash_tfm);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
+ return rc;
+ }
+ pr_info("Allocated hash algorithm: %s\n",
+ hash_algo_name[ima_hash_algo]);
+ return 0;
+}
+
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc, i;
+
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo == ima_hash_algo)
+ return tfm;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
+ return ima_algo_array[i].tfm;
+
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ return tfm;
+}
+
+int __init ima_init_crypto(void)
+{
+ enum hash_algo algo;
+ long rc;
+ int i;
+
+ rc = ima_init_ima_crypto();
+ if (rc)
+ return rc;
+
+ ima_sha1_idx = -1;
+ ima_hash_algo_idx = -1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (algo == HASH_ALGO_SHA1)
+ ima_sha1_idx = i;
+
+ if (algo == ima_hash_algo)
+ ima_hash_algo_idx = i;
+ }
+
+ if (ima_sha1_idx < 0) {
+ ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+ if (ima_hash_algo == HASH_ALGO_SHA1)
+ ima_hash_algo_idx = ima_sha1_idx;
+ }
+
+ if (ima_hash_algo_idx < 0)
+ ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+
+ ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*ima_algo_array), GFP_KERNEL);
+ if (!ima_algo_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ ima_algo_array[i].algo = algo;
+
+ /* unknown TPM algorithm */
+ if (algo == HASH_ALGO__LAST)
+ continue;
+
+ if (algo == ima_hash_algo) {
+ ima_algo_array[i].tfm = ima_shash_tfm;
+ continue;
+ }
+
+ ima_algo_array[i].tfm = ima_alloc_tfm(algo);
+ if (IS_ERR(ima_algo_array[i].tfm)) {
+ if (algo == HASH_ALGO_SHA1) {
+ rc = PTR_ERR(ima_algo_array[i].tfm);
+ ima_algo_array[i].tfm = NULL;
+ goto out_array;
+ }
+
+ ima_algo_array[i].tfm = NULL;
+ }
+ }
+
+ if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
+ if (ima_hash_algo == HASH_ALGO_SHA1) {
+ ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
+ } else {
+ ima_algo_array[ima_sha1_idx].tfm =
+ ima_alloc_tfm(HASH_ALGO_SHA1);
+ if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
+ rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
+ goto out_array;
+ }
+ }
+
+ ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
+ }
+
+ if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
+ ima_hash_algo_idx != ima_sha1_idx) {
+ ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
+ ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
+ }
+
+ return 0;
+out_array:
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (!ima_algo_array[i].tfm ||
+ ima_algo_array[i].tfm == ima_shash_tfm)
+ continue;
+
+ crypto_free_shash(ima_algo_array[i].tfm);
+ }
+ kfree(ima_algo_array);
+out:
+ crypto_free_shash(ima_shash_tfm);
+ return rc;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ int i;
+
+ if (tfm == ima_shash_tfm)
+ return;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm == tfm)
+ return;
+
+ crypto_free_shash(tfm);
+}
+
+/**
+ * ima_alloc_pages() - Allocate contiguous pages.
+ * @max_size: Maximum amount of memory to allocate.
+ * @allocated_size: Returned size of actual allocation.
+ * @last_warn: Should the min_size allocation warn or not.
+ *
+ * Tries to do opportunistic allocation for memory first trying to allocate
+ * max_size amount of memory and then splitting that until zero order is
+ * reached. Allocation is tried without generating allocation warnings unless
+ * last_warn is set. Last_warn set affects only last allocation of zero order.
+ *
+ * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
+ *
+ * Return pointer to allocated memory, or NULL on failure.
+ */
+static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
+ int last_warn)
+{
+ void *ptr;
+ int order = ima_maxorder;
+ gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
+
+ if (order)
+ order = min(get_order(max_size), order);
+
+ for (; order; order--) {
+ ptr = (void *)__get_free_pages(gfp_mask, order);
+ if (ptr) {
+ *allocated_size = PAGE_SIZE << order;
+ return ptr;
+ }
+ }
+
+ /* order is zero - one page */
+
+ gfp_mask = GFP_KERNEL;
+
+ if (!last_warn)
+ gfp_mask |= __GFP_NOWARN;
+
+ ptr = (void *)__get_free_pages(gfp_mask, 0);
+ if (ptr) {
+ *allocated_size = PAGE_SIZE;
+ return ptr;
+ }
+
+ *allocated_size = 0;
+ return NULL;
+}
+
+/**
+ * ima_free_pages() - Free pages allocated by ima_alloc_pages().
+ * @ptr: Pointer to allocated pages.
+ * @size: Size of allocated buffer.
+ */
+static void ima_free_pages(void *ptr, size_t size)
+{
+ if (!ptr)
+ return;
+ free_pages((unsigned long)ptr, get_order(size));
+}
+
+static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
+{
+ struct crypto_ahash *tfm = ima_ahash_tfm;
+ int rc;
+
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo != ima_hash_algo || !tfm) {
+ tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
+ if (!IS_ERR(tfm)) {
+ if (algo == ima_hash_algo)
+ ima_ahash_tfm = tfm;
+ } else {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_atfm(struct crypto_ahash *tfm)
+{
+ if (tfm != ima_ahash_tfm)
+ crypto_free_ahash(tfm);
+}
+
+static inline int ahash_wait(int err, struct crypto_wait *wait)
+{
+
+ err = crypto_wait_req(err, wait);
+
+ if (err)
+ pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
+
+ return err;
+}
+
+static int ima_calc_file_hash_atfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_ahash *tfm)
+{
+ loff_t i_size, offset;
+ char *rbuf[2] = { NULL, };
+ int rc, rbuf_len, active = 0, ahash_rc = 0;
+ struct ahash_request *req;
+ struct scatterlist sg[1];
+ struct crypto_wait wait;
+ size_t rbuf_size[2];
+
+ hash->length = crypto_ahash_digestsize(tfm);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
+ if (rc)
+ goto out1;
+
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
+ goto out2;
+
+ /*
+ * Try to allocate maximum size of memory.
+ * Fail if even a single page cannot be allocated.
+ */
+ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
+ if (!rbuf[0]) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+
+ /* Only allocate one buffer if that is enough. */
+ if (i_size > rbuf_size[0]) {
+ /*
+ * Try to allocate secondary buffer. If that fails fallback to
+ * using single buffering. Use previous memory allocation size
+ * as baseline for possible allocation size.
+ */
+ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
+ &rbuf_size[1], 0);
+ }
+
+ for (offset = 0; offset < i_size; offset += rbuf_len) {
+ if (!rbuf[1] && offset) {
+ /* Not using two buffers, and it is not the first
+ * read/request, wait for the completion of the
+ * previous ahash_update() request.
+ */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (rc)
+ goto out3;
+ }
+ /* read buffer */
+ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
+ rc = integrity_kernel_read(file, offset, rbuf[active],
+ rbuf_len);
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
+ /*
+ * Forward current rc, do not overwrite with return value
+ * from ahash_wait()
+ */
+ ahash_wait(ahash_rc, &wait);
+ goto out3;
+ }
+
+ if (rbuf[1] && offset) {
+ /* Using two buffers, and it is not the first
+ * read/request, wait for the completion of the
+ * previous ahash_update() request.
+ */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (rc)
+ goto out3;
+ }
+
+ sg_init_one(&sg[0], rbuf[active], rbuf_len);
+ ahash_request_set_crypt(req, sg, NULL, rbuf_len);
+
+ ahash_rc = crypto_ahash_update(req);
+
+ if (rbuf[1])
+ active = !active; /* swap buffers, if we use two */
+ }
+ /* wait for the last update request to complete */
+ rc = ahash_wait(ahash_rc, &wait);
+out3:
+ ima_free_pages(rbuf[0], rbuf_size[0]);
+ ima_free_pages(rbuf[1], rbuf_size[1]);
+out2:
+ if (!rc) {
+ ahash_request_set_crypt(req, NULL, hash->digest, 0);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
+ }
+out1:
+ ahash_request_free(req);
+ return rc;
+}
+
+static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_ahash *tfm;
+ int rc;
+
+ tfm = ima_alloc_atfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_atfm(file, hash, tfm);
+
+ ima_free_atfm(tfm);
+
+ return rc;
+}
+
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ loff_t i_size, offset = 0;
+ char *rbuf;
+ int rc;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
+ goto out;
+
+ rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
+ while (offset < i_size) {
+ int rbuf_len;
+
+ rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
+ if (rbuf_len < 0) {
+ rc = rbuf_len;
+ break;
+ }
+ if (rbuf_len == 0) { /* unexpected EOF */
+ rc = -EINVAL;
+ break;
+ }
+ offset += rbuf_len;
+
+ rc = crypto_shash_update(shash, rbuf, rbuf_len);
+ if (rc)
+ break;
+ }
+ kfree(rbuf);
+out:
+ if (!rc)
+ rc = crypto_shash_final(shash, hash->digest);
+ return rc;
+}
+
+static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
+
+/*
+ * ima_calc_file_hash - calculate file hash
+ *
+ * Asynchronous hash (ahash) allows using HW acceleration for calculating
+ * a hash. ahash performance varies for different data sizes on different
+ * crypto accelerators. shash performance might be better for smaller files.
+ * The 'ima.ahash_minsize' module parameter allows specifying the best
+ * minimum file size for using ahash on the system.
+ *
+ * If the ima.ahash_minsize parameter is not specified, this function uses
+ * shash for the hash calculation. If ahash fails, it falls back to using
+ * shash.
+ */
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ loff_t i_size;
+ int rc;
+ struct file *f = file;
+ bool new_file_instance = false;
+
+ /*
+ * For consistency, fail file's opened with the O_DIRECT flag on
+ * filesystems mounted with/without DAX option.
+ */
+ if (file->f_flags & O_DIRECT) {
+ hash->length = hash_digest_size[ima_hash_algo];
+ hash->algo = ima_hash_algo;
+ return -EINVAL;
+ }
+
+ /* Open a new file instance in O_RDONLY if we cannot read */
+ if (!(file->f_mode & FMODE_READ)) {
+ int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
+ O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
+ flags |= O_RDONLY;
+ f = dentry_open(&file->f_path, flags, file->f_cred);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ new_file_instance = true;
+ }
+
+ i_size = i_size_read(file_inode(f));
+
+ if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
+ rc = ima_calc_file_ahash(f, hash);
+ if (!rc)
+ goto out;
+ }
+
+ rc = ima_calc_file_shash(f, hash);
+out:
+ if (new_file_instance)
+ fput(f);
+ return rc;
+}
+
+/*
+ * Calculate the hash of template data
+ */
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ struct ima_template_entry *entry,
+ int tfm_idx)
+{
+ SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
+ struct ima_template_desc *td = entry->template_desc;
+ int num_fields = entry->template_desc->num_fields;
+ int rc, i;
+
+ shash->tfm = ima_algo_array[tfm_idx].tfm;
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ for (i = 0; i < num_fields; i++) {
+ u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
+ u8 *data_to_hash = field_data[i].data;
+ u32 datalen = field_data[i].len;
+ u32 datalen_to_hash = !ima_canonical_fmt ?
+ datalen : (__force u32)cpu_to_le32(datalen);
+
+ if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ rc = crypto_shash_update(shash,
+ (const u8 *) &datalen_to_hash,
+ sizeof(datalen_to_hash));
+ if (rc)
+ break;
+ } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
+ memcpy(buffer, data_to_hash, datalen);
+ data_to_hash = buffer;
+ datalen = IMA_EVENT_NAME_LEN_MAX + 1;
+ }
+ rc = crypto_shash_update(shash, data_to_hash, datalen);
+ if (rc)
+ break;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_entry *entry)
+{
+ u16 alg_id;
+ int rc, i;
+
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
+ if (rc)
+ return rc;
+
+ entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (i == ima_sha1_idx)
+ continue;
+
+ if (i < NR_BANKS(ima_tpm_chip)) {
+ alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ entry->digests[i].alg_id = alg_id;
+ }
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (!ima_algo_array[i].tfm) {
+ memcpy(entry->digests[i].digest,
+ entry->digests[ima_sha1_idx].digest,
+ TPM_DIGEST_SIZE);
+ continue;
+ }
+
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
+ struct ima_digest_data *hash,
+ struct crypto_ahash *tfm)
+{
+ struct ahash_request *req;
+ struct scatterlist sg;
+ struct crypto_wait wait;
+ int rc, ahash_rc = 0;
+
+ hash->length = crypto_ahash_digestsize(tfm);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
+ if (rc)
+ goto out;
+
+ sg_init_one(&sg, buf, len);
+ ahash_request_set_crypt(req, &sg, NULL, len);
+
+ ahash_rc = crypto_ahash_update(req);
+
+ /* wait for the update request to complete */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (!rc) {
+ ahash_request_set_crypt(req, NULL, hash->digest, 0);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
+ }
+out:
+ ahash_request_free(req);
+ return rc;
+}
+
+static int calc_buffer_ahash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ struct crypto_ahash *tfm;
+ int rc;
+
+ tfm = ima_alloc_atfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
+
+ ima_free_atfm(tfm);
+
+ return rc;
+}
+
+static int calc_buffer_shash_tfm(const void *buf, loff_t size,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+ unsigned int len;
+ int rc;
+
+ shash->tfm = tfm;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ while (size) {
+ len = size < PAGE_SIZE ? size : PAGE_SIZE;
+ rc = crypto_shash_update(shash, buf, len);
+ if (rc)
+ break;
+ buf += len;
+ size -= len;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(shash, hash->digest);
+ return rc;
+}
+
+static int calc_buffer_shash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
+
+ ima_free_tfm(tfm);
+ return rc;
+}
+
+int ima_calc_buffer_hash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ int rc;
+
+ if (ima_ahash_minsize && len >= ima_ahash_minsize) {
+ rc = calc_buffer_ahash(buf, len, hash);
+ if (!rc)
+ return 0;
+ }
+
+ return calc_buffer_shash(buf, len, hash);
+}
+
+static void ima_pcrread(u32 idx, struct tpm_digest *d)
+{
+ if (!ima_tpm_chip)
+ return;
+
+ if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
+ pr_err("Error Communicating to TPM chip\n");
+}
+
+/*
+ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
+ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
+ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
+ * allowing firmware to configure and enable different banks.
+ *
+ * Knowing which TPM bank is read to calculate the boot_aggregate digest
+ * needs to be conveyed to a verifier. For this reason, use the same
+ * hash algorithm for reading the TPM PCRs as for calculating the boot
+ * aggregate digest as stored in the measurement list.
+ */
+static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ struct crypto_shash *tfm)
+{
+ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
+ int rc;
+ u32 i;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
+ d.alg_id);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ /* cumulative digest over TPM registers 0-7 */
+ for (i = TPM_PCR0; i < TPM_PCR8; i++) {
+ ima_pcrread(i, &d);
+ /* now accumulate with current aggregate */
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ if (rc != 0)
+ return rc;
+ }
+ /*
+ * Extend cumulative digest over TPM registers 8-9, which contain
+ * measurement for the kernel command line (reg. 8) and image (reg. 9)
+ * in a typical PCR allocation. Registers 8-9 are only included in
+ * non-SHA1 boot_aggregate digests to avoid ambiguity.
+ */
+ if (alg_id != TPM_ALG_SHA1) {
+ for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+ ima_pcrread(i, &d);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ }
+ }
+ if (!rc)
+ crypto_shash_final(shash, digest);
+ return rc;
+}
+
+int ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ u16 crypto_id, alg_id;
+ int rc, i, bank_idx = -1;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (crypto_id == hash->algo) {
+ bank_idx = i;
+ break;
+ }
+
+ if (crypto_id == HASH_ALGO_SHA256)
+ bank_idx = i;
+
+ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
+ bank_idx = i;
+ }
+
+ if (bank_idx == -1) {
+ pr_err("No suitable TPM algorithm for boot aggregate\n");
+ return 0;
+ }
+
+ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
diff --git a/security/integrity/ima/ima_efi.c b/security/integrity/ima/ima_efi.c
new file mode 100644
index 000000000..9db66fe31
--- /dev/null
+++ b/security/integrity/ima/ima_efi.c
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 IBM Corporation
+ */
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/ima.h>
+#include <asm/efi.h>
+
+#ifndef arch_ima_efi_boot_mode
+#define arch_ima_efi_boot_mode efi_secureboot_mode_unset
+#endif
+
+static enum efi_secureboot_mode get_sb_mode(void)
+{
+ enum efi_secureboot_mode mode;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) {
+ pr_info("ima: secureboot mode unknown, no efi\n");
+ return efi_secureboot_mode_unknown;
+ }
+
+ mode = efi_get_secureboot_mode(efi.get_variable);
+ if (mode == efi_secureboot_mode_disabled)
+ pr_info("ima: secureboot mode disabled\n");
+ else if (mode == efi_secureboot_mode_unknown)
+ pr_info("ima: secureboot mode unknown\n");
+ else
+ pr_info("ima: secureboot mode enabled\n");
+ return mode;
+}
+
+bool arch_ima_get_secureboot(void)
+{
+ static enum efi_secureboot_mode sb_mode;
+ static bool initialized;
+
+ if (!initialized && efi_enabled(EFI_BOOT)) {
+ sb_mode = arch_ima_efi_boot_mode;
+
+ if (sb_mode == efi_secureboot_mode_unset)
+ sb_mode = get_sb_mode();
+ initialized = true;
+ }
+
+ if (sb_mode == efi_secureboot_mode_enabled)
+ return true;
+ else
+ return false;
+}
+
+/* secureboot arch rules */
+static const char * const sb_arch_rules[] = {
+#if !IS_ENABLED(CONFIG_KEXEC_SIG)
+ "appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig",
+#endif /* CONFIG_KEXEC_SIG */
+ "measure func=KEXEC_KERNEL_CHECK",
+#if !IS_ENABLED(CONFIG_MODULE_SIG)
+ "appraise func=MODULE_CHECK appraise_type=imasig",
+#endif
+ "measure func=MODULE_CHECK",
+ NULL
+};
+
+const char * const *arch_get_ima_policy(void)
+{
+ if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) {
+ if (IS_ENABLED(CONFIG_MODULE_SIG))
+ set_module_sig_enforced();
+ if (IS_ENABLED(CONFIG_KEXEC_SIG))
+ set_kexec_sig_enforced();
+ return sb_arch_rules;
+ }
+ return NULL;
+}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
new file mode 100644
index 000000000..cd1683dad
--- /dev/null
+++ b/security/integrity/ima/ima_fs.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Reiner Sailer <sailer@us.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima_fs.c
+ * implemenents security file system for reporting
+ * current measurement list and IMA statistics
+ */
+
+#include <linux/fcntl.h>
+#include <linux/kernel_read_file.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/parser.h>
+#include <linux/vmalloc.h>
+
+#include "ima.h"
+
+static DEFINE_MUTEX(ima_write_mutex);
+
+bool ima_canonical_fmt;
+static int __init default_canonical_fmt_setup(char *str)
+{
+#ifdef __BIG_ENDIAN
+ ima_canonical_fmt = true;
+#endif
+ return 1;
+}
+__setup("ima_canonical_fmt", default_canonical_fmt_setup);
+
+static int valid_policy = 1;
+
+static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+ loff_t *ppos, atomic_long_t *val)
+{
+ char tmpbuf[32]; /* greater than largest 'long' string value */
+ ssize_t len;
+
+ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+}
+
+static ssize_t ima_show_htable_violations(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
+}
+
+static const struct file_operations ima_htable_violations_ops = {
+ .read = ima_show_htable_violations,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t ima_show_measurements_count(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return ima_show_htable_value(buf, count, ppos, &ima_htable.len);
+
+}
+
+static const struct file_operations ima_measurements_count_ops = {
+ .read = ima_show_measurements_count,
+ .llseek = generic_file_llseek,
+};
+
+/* returns pointer to hlist_node */
+static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t l = *pos;
+ struct ima_queue_entry *qe;
+
+ /* we need a lock since pos could point beyond last element */
+ rcu_read_lock();
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (!l--) {
+ rcu_read_unlock();
+ return qe;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ima_queue_entry *qe = v;
+
+ /* lock protects when reading beyond last element
+ * against concurrent list-extension
+ */
+ rcu_read_lock();
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
+ rcu_read_unlock();
+ (*pos)++;
+
+ return (&qe->later == &ima_measurements) ? NULL : qe;
+}
+
+static void ima_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+void ima_putc(struct seq_file *m, void *data, int datalen)
+{
+ while (datalen--)
+ seq_putc(m, *(char *)data++);
+}
+
+/* print format:
+ * 32bit-le=pcr#
+ * char[20]=template digest
+ * 32bit-le=template name size
+ * char[n]=template name
+ * [eventdata length]
+ * eventdata[n]=template specific data
+ */
+int ima_measurements_show(struct seq_file *m, void *v)
+{
+ /* the list never shrinks, so we don't need a lock here */
+ struct ima_queue_entry *qe = v;
+ struct ima_template_entry *e;
+ char *template_name;
+ u32 pcr, namelen, template_data_len; /* temporary fields */
+ bool is_ima_template = false;
+ int i;
+
+ /* get entry */
+ e = qe->entry;
+ if (e == NULL)
+ return -1;
+
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
+ /*
+ * 1st: PCRIndex
+ * PCR used defaults to the same (config option) in
+ * little-endian format, unless set in policy
+ */
+ pcr = !ima_canonical_fmt ? e->pcr : (__force u32)cpu_to_le32(e->pcr);
+ ima_putc(m, &pcr, sizeof(e->pcr));
+
+ /* 2nd: template digest */
+ ima_putc(m, e->digests[ima_sha1_idx].digest, TPM_DIGEST_SIZE);
+
+ /* 3rd: template name size */
+ namelen = !ima_canonical_fmt ? strlen(template_name) :
+ (__force u32)cpu_to_le32(strlen(template_name));
+ ima_putc(m, &namelen, sizeof(namelen));
+
+ /* 4th: template name */
+ ima_putc(m, template_name, strlen(template_name));
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
+ is_ima_template = true;
+
+ if (!is_ima_template) {
+ template_data_len = !ima_canonical_fmt ? e->template_data_len :
+ (__force u32)cpu_to_le32(e->template_data_len);
+ ima_putc(m, &template_data_len, sizeof(e->template_data_len));
+ }
+
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ enum ima_show_type show = IMA_SHOW_BINARY;
+ const struct ima_template_field *field =
+ e->template_desc->fields[i];
+
+ if (is_ima_template && strcmp(field->field_id, "d") == 0)
+ show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+ if (is_ima_template && strcmp(field->field_id, "n") == 0)
+ show = IMA_SHOW_BINARY_OLD_STRING_FMT;
+ field->field_show(m, show, &e->template_data[i]);
+ }
+ return 0;
+}
+
+static const struct seq_operations ima_measurments_seqops = {
+ .start = ima_measurements_start,
+ .next = ima_measurements_next,
+ .stop = ima_measurements_stop,
+ .show = ima_measurements_show
+};
+
+static int ima_measurements_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ima_measurments_seqops);
+}
+
+static const struct file_operations ima_measurements_ops = {
+ .open = ima_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ seq_printf(m, "%02x", *(digest + i));
+}
+
+/* print in ascii */
+static int ima_ascii_measurements_show(struct seq_file *m, void *v)
+{
+ /* the list never shrinks, so we don't need a lock here */
+ struct ima_queue_entry *qe = v;
+ struct ima_template_entry *e;
+ char *template_name;
+ int i;
+
+ /* get entry */
+ e = qe->entry;
+ if (e == NULL)
+ return -1;
+
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
+ /* 1st: PCR used (config option) */
+ seq_printf(m, "%2d ", e->pcr);
+
+ /* 2nd: SHA1 template hash */
+ ima_print_digest(m, e->digests[ima_sha1_idx].digest, TPM_DIGEST_SIZE);
+
+ /* 3th: template name */
+ seq_printf(m, " %s", template_name);
+
+ /* 4th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static const struct seq_operations ima_ascii_measurements_seqops = {
+ .start = ima_measurements_start,
+ .next = ima_measurements_next,
+ .stop = ima_measurements_stop,
+ .show = ima_ascii_measurements_show
+};
+
+static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ima_ascii_measurements_seqops);
+}
+
+static const struct file_operations ima_ascii_measurements_ops = {
+ .open = ima_ascii_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static ssize_t ima_read_policy(char *path)
+{
+ void *data = NULL;
+ char *datap;
+ size_t size;
+ int rc, pathlen = strlen(path);
+
+ char *p;
+
+ /* remove \n */
+ datap = path;
+ strsep(&datap, "\n");
+
+ rc = kernel_read_file_from_path(path, 0, &data, INT_MAX, NULL,
+ READING_POLICY);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
+ return rc;
+ }
+ size = rc;
+ rc = 0;
+
+ datap = data;
+ while (size > 0 && (p = strsep(&datap, "\n"))) {
+ pr_debug("rule: %s\n", p);
+ rc = ima_parse_add_rule(p);
+ if (rc < 0)
+ break;
+ size -= rc;
+ }
+
+ vfree(data);
+ if (rc < 0)
+ return rc;
+ else if (size)
+ return -EINVAL;
+ else
+ return pathlen;
+}
+
+static ssize_t ima_write_policy(struct file *file, const char __user *buf,
+ size_t datalen, loff_t *ppos)
+{
+ char *data;
+ ssize_t result;
+
+ if (datalen >= PAGE_SIZE)
+ datalen = PAGE_SIZE - 1;
+
+ /* No partial writes. */
+ result = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ data = memdup_user_nul(buf, datalen);
+ if (IS_ERR(data)) {
+ result = PTR_ERR(data);
+ goto out;
+ }
+
+ result = mutex_lock_interruptible(&ima_write_mutex);
+ if (result < 0)
+ goto out_free;
+
+ if (data[0] == '/') {
+ result = ima_read_policy(data);
+ } else if (ima_appraise & IMA_APPRAISE_POLICY) {
+ pr_err("signed policy file (specified as an absolute pathname) required\n");
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", "signed policy required",
+ 1, 0);
+ result = -EACCES;
+ } else {
+ result = ima_parse_add_rule(data);
+ }
+ mutex_unlock(&ima_write_mutex);
+out_free:
+ kfree(data);
+out:
+ if (result < 0)
+ valid_policy = 0;
+
+ return result;
+}
+
+static struct dentry *ima_dir;
+static struct dentry *ima_symlink;
+static struct dentry *binary_runtime_measurements;
+static struct dentry *ascii_runtime_measurements;
+static struct dentry *runtime_measurements_count;
+static struct dentry *violations;
+static struct dentry *ima_policy;
+
+enum ima_fs_flags {
+ IMA_FS_BUSY,
+};
+
+static unsigned long ima_fs_flags;
+
+#ifdef CONFIG_IMA_READ_POLICY
+static const struct seq_operations ima_policy_seqops = {
+ .start = ima_policy_start,
+ .next = ima_policy_next,
+ .stop = ima_policy_stop,
+ .show = ima_policy_show,
+};
+#endif
+
+/*
+ * ima_open_policy: sequentialize access to the policy file
+ */
+static int ima_open_policy(struct inode *inode, struct file *filp)
+{
+ if (!(filp->f_flags & O_WRONLY)) {
+#ifndef CONFIG_IMA_READ_POLICY
+ return -EACCES;
+#else
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ return -EACCES;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return seq_open(filp, &ima_policy_seqops);
+#endif
+ }
+ if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags))
+ return -EBUSY;
+ return 0;
+}
+
+/*
+ * ima_release_policy - start using the new measure policy rules.
+ *
+ * Initially, ima_measure points to the default policy rules, now
+ * point to the new policy rules, and remove the securityfs policy file,
+ * assuming a valid policy.
+ */
+static int ima_release_policy(struct inode *inode, struct file *file)
+{
+ const char *cause = valid_policy ? "completed" : "failed";
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return seq_release(inode, file);
+
+ if (valid_policy && ima_check_policy() < 0) {
+ cause = "failed";
+ valid_policy = 0;
+ }
+
+ pr_info("policy update %s\n", cause);
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", cause, !valid_policy, 0);
+
+ if (!valid_policy) {
+ ima_delete_rules();
+ valid_policy = 1;
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+ return 0;
+ }
+
+ ima_update_policy();
+#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY)
+ securityfs_remove(ima_policy);
+ ima_policy = NULL;
+#elif defined(CONFIG_IMA_WRITE_POLICY)
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+#elif defined(CONFIG_IMA_READ_POLICY)
+ inode->i_mode &= ~S_IWUSR;
+#endif
+ return 0;
+}
+
+static const struct file_operations ima_measure_policy_ops = {
+ .open = ima_open_policy,
+ .write = ima_write_policy,
+ .read = seq_read,
+ .release = ima_release_policy,
+ .llseek = generic_file_llseek,
+};
+
+int __init ima_fs_init(void)
+{
+ int ret;
+
+ ima_dir = securityfs_create_dir("ima", integrity_dir);
+ if (IS_ERR(ima_dir))
+ return PTR_ERR(ima_dir);
+
+ ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima",
+ NULL);
+ if (IS_ERR(ima_symlink)) {
+ ret = PTR_ERR(ima_symlink);
+ goto out;
+ }
+
+ binary_runtime_measurements =
+ securityfs_create_file("binary_runtime_measurements",
+ S_IRUSR | S_IRGRP, ima_dir, NULL,
+ &ima_measurements_ops);
+ if (IS_ERR(binary_runtime_measurements)) {
+ ret = PTR_ERR(binary_runtime_measurements);
+ goto out;
+ }
+
+ ascii_runtime_measurements =
+ securityfs_create_file("ascii_runtime_measurements",
+ S_IRUSR | S_IRGRP, ima_dir, NULL,
+ &ima_ascii_measurements_ops);
+ if (IS_ERR(ascii_runtime_measurements)) {
+ ret = PTR_ERR(ascii_runtime_measurements);
+ goto out;
+ }
+
+ runtime_measurements_count =
+ securityfs_create_file("runtime_measurements_count",
+ S_IRUSR | S_IRGRP, ima_dir, NULL,
+ &ima_measurements_count_ops);
+ if (IS_ERR(runtime_measurements_count)) {
+ ret = PTR_ERR(runtime_measurements_count);
+ goto out;
+ }
+
+ violations =
+ securityfs_create_file("violations", S_IRUSR | S_IRGRP,
+ ima_dir, NULL, &ima_htable_violations_ops);
+ if (IS_ERR(violations)) {
+ ret = PTR_ERR(violations);
+ goto out;
+ }
+
+ ima_policy = securityfs_create_file("policy", POLICY_FILE_FLAGS,
+ ima_dir, NULL,
+ &ima_measure_policy_ops);
+ if (IS_ERR(ima_policy)) {
+ ret = PTR_ERR(ima_policy);
+ goto out;
+ }
+
+ return 0;
+out:
+ securityfs_remove(ima_policy);
+ securityfs_remove(violations);
+ securityfs_remove(runtime_measurements_count);
+ securityfs_remove(ascii_runtime_measurements);
+ securityfs_remove(binary_runtime_measurements);
+ securityfs_remove(ima_symlink);
+ securityfs_remove(ima_dir);
+
+ return ret;
+}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
new file mode 100644
index 000000000..63979aefc
--- /dev/null
+++ b/security/integrity/ima/ima_init.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima_init.c
+ * initialization and cleanup functions
+ */
+
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ima.h>
+#include <generated/utsrelease.h>
+
+#include "ima.h"
+
+/* name for boot aggregate entry */
+const char boot_aggregate_name[] = "boot_aggregate";
+struct tpm_chip *ima_tpm_chip;
+
+/* Add the boot aggregate to the IMA measurement list and extend
+ * the PCR register.
+ *
+ * Calculate the boot aggregate, a hash over tpm registers 0-7,
+ * assuming a TPM chip exists, and zeroes if the TPM chip does not
+ * exist. Add the boot aggregate measurement to the measurement
+ * list and extend the PCR register.
+ *
+ * If a tpm chip does not exist, indicate the core root of trust is
+ * not hardware based by invalidating the aggregate PCR value.
+ * (The aggregate PCR value is invalidated by adding one value to
+ * the measurement list and extending the aggregate PCR value with
+ * a different value.) Violations add a zero entry to the measurement
+ * list and extend the aggregate PCR value with ff...ff's.
+ */
+static int __init ima_add_boot_aggregate(void)
+{
+ static const char op[] = "add_boot_aggregate";
+ const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry;
+ struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
+ struct ima_event_data event_data = { .iint = iint,
+ .filename = boot_aggregate_name };
+ struct ima_max_digest_data hash;
+ int result = -ENOMEM;
+ int violation = 0;
+
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+ iint->ima_hash->algo = ima_hash_algo;
+ iint->ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ /*
+ * With TPM 2.0 hash agility, TPM chips could support multiple TPM
+ * PCR banks, allowing firmware to configure and enable different
+ * banks. The SHA1 bank is not necessarily enabled.
+ *
+ * Use the same hash algorithm for reading the TPM PCRs as for
+ * calculating the boot aggregate digest. Preference is given to
+ * the configured IMA default hash algorithm. Otherwise, use the
+ * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2.
+ * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank
+ * is not found.
+ */
+ if (ima_tpm_chip) {
+ result = ima_calc_boot_aggregate(&hash.hdr);
+ if (result < 0) {
+ audit_cause = "hashing_error";
+ goto err_out;
+ }
+ }
+
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
+ if (result < 0) {
+ audit_cause = "alloc_entry";
+ goto err_out;
+ }
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name,
+ CONFIG_IMA_MEASURE_PCR_IDX);
+ if (result < 0) {
+ ima_free_template_entry(entry);
+ audit_cause = "store_entry";
+ goto err_out;
+ }
+ return 0;
+err_out:
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
+ audit_cause, result, 0);
+ return result;
+}
+
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void)
+{
+ int unset_flags = ima_policy_flag & IMA_APPRAISE;
+
+ ima_policy_flag &= ~unset_flags;
+ integrity_load_x509(INTEGRITY_KEYRING_IMA, CONFIG_IMA_X509_PATH);
+
+ /* load also EVM key to avoid appraisal */
+ evm_load_x509();
+
+ ima_policy_flag |= unset_flags;
+}
+#endif
+
+int __init ima_init(void)
+{
+ int rc;
+
+ ima_tpm_chip = tpm_default_chip();
+ if (!ima_tpm_chip)
+ pr_info("No TPM chip found, activating TPM-bypass!\n");
+
+ rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
+ if (rc)
+ return rc;
+
+ rc = ima_init_crypto();
+ if (rc)
+ return rc;
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
+
+ /* It can be called before ima_init_digests(), it does not use TPM. */
+ ima_load_kexec_buffer();
+
+ rc = ima_init_digests();
+ if (rc != 0)
+ return rc;
+ rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
+ if (rc != 0)
+ return rc;
+
+ ima_init_policy();
+
+ rc = ima_fs_init();
+ if (rc != 0)
+ return rc;
+
+ ima_init_key_queue();
+
+ ima_measure_critical_data("kernel_info", "kernel_version",
+ UTS_RELEASE, strlen(UTS_RELEASE), false,
+ NULL, 0);
+
+ return rc;
+}
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
new file mode 100644
index 000000000..419dc405c
--- /dev/null
+++ b/security/integrity/ima/ima_kexec.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ * Mimi Zohar <zohar@linux.vnet.ibm.com>
+ */
+
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/kexec.h>
+#include <linux/of.h>
+#include <linux/ima.h>
+#include "ima.h"
+
+#ifdef CONFIG_IMA_KEXEC
+static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+{
+ struct ima_queue_entry *qe;
+ struct seq_file file;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+ file.buf = vmalloc(segment_size);
+ if (!file.buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ file.size = segment_size;
+ file.read_pos = 0;
+ file.count = sizeof(khdr); /* reserved space */
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (file.count < file.size) {
+ khdr.count++;
+ ima_measurements_show(&file, qe);
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto out;
+
+ /*
+ * fill in reserved space with some buffer details
+ * (eg. version, buffer size, number of measurements)
+ */
+ khdr.buffer_size = file.count;
+ if (ima_canonical_fmt) {
+ khdr.version = cpu_to_le16(khdr.version);
+ khdr.count = cpu_to_le64(khdr.count);
+ khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+ }
+ memcpy(file.buf, &khdr, sizeof(khdr));
+
+ print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1,
+ file.buf, file.count < 100 ? file.count : 100,
+ true);
+
+ *buffer_size = file.count;
+ *buffer = file.buf;
+out:
+ if (ret == -EINVAL)
+ vfree(file.buf);
+ return ret;
+}
+
+/*
+ * Called during kexec_file_load so that IMA can add a segment to the kexec
+ * image for the measurement list for the next kernel.
+ *
+ * This function assumes that kexec_mutex is held.
+ */
+void ima_add_kexec_buffer(struct kimage *image)
+{
+ struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
+ .buf_min = 0, .buf_max = ULONG_MAX,
+ .top_down = true };
+ unsigned long binary_runtime_size;
+
+ /* use more understandable variable names than defined in kbuf */
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size;
+ size_t kexec_segment_size;
+ int ret;
+
+ /*
+ * Reserve an extra half page of memory for additional measurements
+ * added during the kexec load.
+ */
+ binary_runtime_size = ima_get_binary_runtime_size();
+ if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
+ kexec_segment_size = ULONG_MAX;
+ else
+ kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
+ PAGE_SIZE / 2, PAGE_SIZE);
+ if ((kexec_segment_size == ULONG_MAX) ||
+ ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
+ pr_err("Binary measurement list too large.\n");
+ return;
+ }
+
+ ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
+ kexec_segment_size);
+ if (!kexec_buffer) {
+ pr_err("Not enough memory for the kexec measurement buffer.\n");
+ return;
+ }
+
+ kbuf.buffer = kexec_buffer;
+ kbuf.bufsz = kexec_buffer_size;
+ kbuf.memsz = kexec_segment_size;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ vfree(kexec_buffer);
+ return;
+ }
+
+ image->ima_buffer_addr = kbuf.mem;
+ image->ima_buffer_size = kexec_segment_size;
+ image->ima_buffer = kexec_buffer;
+
+ pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+}
+#endif /* IMA_KEXEC */
+
+/*
+ * Restore the measurement list from the previous kernel.
+ */
+void __init ima_load_kexec_buffer(void)
+{
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size = 0;
+ int rc;
+
+ rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
+ switch (rc) {
+ case 0:
+ rc = ima_restore_measurement_list(kexec_buffer_size,
+ kexec_buffer);
+ if (rc != 0)
+ pr_err("Failed to restore the measurement list: %d\n",
+ rc);
+
+ ima_free_kexec_buffer();
+ break;
+ case -ENOTSUPP:
+ pr_debug("Restoring the measurement list not supported\n");
+ break;
+ case -ENOENT:
+ pr_debug("No measurement list to restore\n");
+ break;
+ default:
+ pr_debug("Error restoring the measurement list: %d\n", rc);
+ }
+}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
new file mode 100644
index 000000000..185666d90
--- /dev/null
+++ b/security/integrity/ima/ima_main.c
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Integrity Measurement Architecture
+ *
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Serge Hallyn <serue@us.ibm.com>
+ * Kylene Hall <kylene@us.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima_main.c
+ * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
+ * and ima_file_check.
+ */
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/binfmts.h>
+#include <linux/kernel_read_file.h>
+#include <linux/mount.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/ima.h>
+#include <linux/iversion.h>
+#include <linux/fs.h>
+#include <linux/iversion.h>
+
+#include "ima.h"
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise = IMA_APPRAISE_ENFORCE;
+#else
+int ima_appraise;
+#endif
+
+int __ro_after_init ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
+static struct notifier_block ima_lsm_policy_notifier = {
+ .notifier_call = ima_lsm_policy_change,
+};
+
+static int __init hash_setup(char *str)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
+
+ if (hash_setup_done)
+ return 1;
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0) {
+ ima_hash_algo = HASH_ALGO_SHA1;
+ } else if (strncmp(str, "md5", 3) == 0) {
+ ima_hash_algo = HASH_ALGO_MD5;
+ } else {
+ pr_err("invalid hash algorithm \"%s\" for template \"%s\"",
+ str, IMA_TEMPLATE_IMA_NAME);
+ return 1;
+ }
+ goto out;
+ }
+
+ i = match_string(hash_algo_name, HASH_ALGO__LAST, str);
+ if (i < 0) {
+ pr_err("invalid hash algorithm \"%s\"", str);
+ return 1;
+ }
+
+ ima_hash_algo = i;
+out:
+ hash_setup_done = 1;
+ return 1;
+}
+__setup("ima_hash=", hash_setup);
+
+enum hash_algo ima_get_current_hash_algo(void)
+{
+ return ima_hash_algo;
+}
+
+/* Prevent mmap'ing a file execute that is already mmap'ed write */
+static int mmap_violation_check(enum ima_hooks func, struct file *file,
+ char **pathbuf, const char **pathname,
+ char *filename)
+{
+ struct inode *inode;
+ int rc = 0;
+
+ if ((func == MMAP_CHECK) && mapping_writably_mapped(file->f_mapping)) {
+ rc = -ETXTBSY;
+ inode = file_inode(file);
+
+ if (!*pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ *pathname = ima_d_path(&file->f_path, pathbuf,
+ filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, *pathname,
+ "mmap_file", "mmapped_writers", rc, 0);
+ }
+ return rc;
+}
+
+/*
+ * ima_rdwr_violation_check
+ *
+ * Only invalidate the PCR for measured files:
+ * - Opening a file for write when already open for read,
+ * results in a time of measure, time of use (ToMToU) error.
+ * - Opening a file for read when already open for write,
+ * could result in a file measurement error.
+ *
+ */
+static void ima_rdwr_violation_check(struct file *file,
+ struct integrity_iint_cache *iint,
+ int must_measure,
+ char **pathbuf,
+ const char **pathname,
+ char *filename)
+{
+ struct inode *inode = file_inode(file);
+ fmode_t mode = file->f_mode;
+ bool send_tomtou = false, send_writers = false;
+
+ if (mode & FMODE_WRITE) {
+ if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+ if (!iint)
+ iint = integrity_iint_find(inode);
+ /* IMA_MEASURE is set from reader side */
+ if (iint && test_bit(IMA_MUST_MEASURE,
+ &iint->atomic_flags))
+ send_tomtou = true;
+ }
+ } else {
+ if (must_measure)
+ set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
+ if (inode_is_open_for_write(inode) && must_measure)
+ send_writers = true;
+ }
+
+ if (!send_tomtou && !send_writers)
+ return;
+
+ *pathname = ima_d_path(&file->f_path, pathbuf, filename);
+
+ if (send_tomtou)
+ ima_add_violation(file, *pathname, iint,
+ "invalid_pcr", "ToMToU");
+ if (send_writers)
+ ima_add_violation(file, *pathname, iint,
+ "invalid_pcr", "open_writers");
+}
+
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
+ struct inode *inode, struct file *file)
+{
+ fmode_t mode = file->f_mode;
+ bool update;
+
+ if (!(mode & FMODE_WRITE))
+ return;
+
+ mutex_lock(&iint->mutex);
+ if (atomic_read(&inode->i_writecount) == 1) {
+ update = test_and_clear_bit(IMA_UPDATE_XATTR,
+ &iint->atomic_flags);
+ if (!IS_I_VERSION(inode) ||
+ !inode_eq_iversion(inode, iint->version) ||
+ (iint->flags & IMA_NEW_FILE)) {
+ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
+ iint->measured_pcrs = 0;
+ if (update)
+ ima_update_xattr(iint, file);
+ }
+ }
+ mutex_unlock(&iint->mutex);
+}
+
+/**
+ * ima_file_free - called on __fput()
+ * @file: pointer to file structure being freed
+ *
+ * Flag files that changed, based on i_version
+ */
+void ima_file_free(struct file *file)
+{
+ struct inode *inode = file_inode(file);
+ struct integrity_iint_cache *iint;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+
+ ima_check_last_writer(iint, inode, file);
+}
+
+static int process_measurement(struct file *file, const struct cred *cred,
+ u32 secid, char *buf, loff_t size, int mask,
+ enum ima_hooks func)
+{
+ struct inode *backing_inode, *inode = file_inode(file);
+ struct integrity_iint_cache *iint = NULL;
+ struct ima_template_desc *template_desc = NULL;
+ char *pathbuf = NULL;
+ char filename[NAME_MAX];
+ const char *pathname = NULL;
+ int rc = 0, action, must_appraise = 0;
+ int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ struct evm_ima_xattr_data *xattr_value = NULL;
+ struct modsig *modsig = NULL;
+ int xattr_len = 0;
+ bool violation_check;
+ enum hash_algo hash_algo;
+ unsigned int allowed_algos = 0;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return 0;
+
+ /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
+ * bitmask based on the appraise/audit/measurement policy.
+ * Included is the appraise submask.
+ */
+ action = ima_get_action(file_mnt_user_ns(file), inode, cred, secid,
+ mask, func, &pcr, &template_desc, NULL,
+ &allowed_algos);
+ violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) &&
+ (ima_policy_flag & IMA_MEASURE));
+ if (!action && !violation_check)
+ return 0;
+
+ must_appraise = action & IMA_APPRAISE;
+
+ /* Is the appraise rule hook specific? */
+ if (action & IMA_FILE_APPRAISE)
+ func = FILE_CHECK;
+
+ inode_lock(inode);
+
+ if (action) {
+ iint = integrity_inode_get(inode);
+ if (!iint)
+ rc = -ENOMEM;
+ }
+
+ if (!rc && violation_check)
+ ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
+ &pathbuf, &pathname, filename);
+
+ inode_unlock(inode);
+
+ if (rc)
+ goto out;
+ if (!action)
+ goto out;
+
+ mutex_lock(&iint->mutex);
+
+ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+ /* reset appraisal flags if ima_inode_post_setattr was called */
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_NONACTION_FLAGS);
+
+ /*
+ * Re-evaulate the file if either the xattr has changed or the
+ * kernel has no way of detecting file change on the filesystem.
+ * (Limited to privileged mounted filesystems.)
+ */
+ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags) ||
+ ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) &&
+ !(inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) &&
+ !(action & IMA_FAIL_UNVERIFIABLE_SIGS))) {
+ iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
+ }
+
+ /* Detect and re-evaluate changes made to the backing file. */
+ backing_inode = d_real_inode(file_dentry(file));
+ if (backing_inode != inode &&
+ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
+ if (!IS_I_VERSION(backing_inode) ||
+ backing_inode->i_sb->s_dev != iint->real_dev ||
+ backing_inode->i_ino != iint->real_ino ||
+ !inode_eq_iversion(backing_inode, iint->version)) {
+ iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
+ }
+ }
+
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+ */
+ iint->flags |= action;
+ action &= IMA_DO_MASK;
+ action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1);
+
+ /* If target pcr is already measured, unset IMA_MEASURE action */
+ if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr)))
+ action ^= IMA_MEASURE;
+
+ /* HASH sets the digital signature and update flags, nothing else */
+ if ((action & IMA_HASH) &&
+ !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) {
+ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+ if ((xattr_value && xattr_len > 2) &&
+ (xattr_value->type == EVM_IMA_XATTR_DIGSIG))
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ iint->flags |= IMA_HASHED;
+ action ^= IMA_HASH;
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+
+ /* Nothing to do, just return existing appraised status */
+ if (!action) {
+ if (must_appraise) {
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
+ if (!rc)
+ rc = ima_get_cache_status(iint, func);
+ }
+ goto out_locked;
+ }
+
+ if ((action & IMA_APPRAISE_SUBMASK) ||
+ strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ /* read 'security.ima' */
+ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+
+ /*
+ * Read the appended modsig if allowed by the policy, and allow
+ * an additional measurement list entry, if needed, based on the
+ * template format and whether the file was already measured.
+ */
+ if (iint->flags & IMA_MODSIG_ALLOWED) {
+ rc = ima_read_modsig(func, buf, size, &modsig);
+
+ if (!rc && ima_template_has_modsig(template_desc) &&
+ iint->flags & IMA_MEASURED)
+ action |= IMA_MEASURE;
+ }
+ }
+
+ hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
+
+ rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig);
+ if (rc != 0 && rc != -EBADF && rc != -EINVAL)
+ goto out_locked;
+
+ if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
+
+ if (action & IMA_MEASURE)
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len, modsig, pcr,
+ template_desc);
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
+ rc = ima_check_blacklist(iint, modsig, pcr);
+ if (rc != -EPERM) {
+ inode_lock(inode);
+ rc = ima_appraise_measurement(func, iint, file,
+ pathname, xattr_value,
+ xattr_len, modsig);
+ inode_unlock(inode);
+ }
+ if (!rc)
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
+ }
+ if (action & IMA_AUDIT)
+ ima_audit_measurement(iint, pathname);
+
+ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
+ rc = 0;
+
+ /* Ensure the digest was generated using an allowed algorithm */
+ if (rc == 0 && must_appraise && allowed_algos != 0 &&
+ (allowed_algos & (1U << hash_algo)) == 0) {
+ rc = -EACCES;
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, file_inode(file),
+ pathname, "collect_data",
+ "denied-hash-algorithm", rc, 0);
+ }
+out_locked:
+ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
+ !(iint->flags & IMA_NEW_FILE))
+ rc = -EACCES;
+ mutex_unlock(&iint->mutex);
+ kfree(xattr_value);
+ ima_free_modsig(modsig);
+out:
+ if (pathbuf)
+ __putname(pathbuf);
+ if (must_appraise) {
+ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ if (file->f_mode & FMODE_WRITE)
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+ return 0;
+}
+
+/**
+ * ima_file_mmap - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured (May be NULL)
+ * @reqprot: protection requested by the application
+ * @prot: protection that will be applied by the kernel
+ * @flags: operational flags
+ *
+ * Measure files being mmapped executable based on the ima_must_measure()
+ * policy decision.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ u32 secid;
+
+ if (file && (prot & PROT_EXEC)) {
+ security_current_getsecid_subj(&secid);
+ return process_measurement(file, current_cred(), secid, NULL,
+ 0, MAY_EXEC, MMAP_CHECK);
+ }
+
+ return 0;
+}
+
+/**
+ * ima_file_mprotect - based on policy, limit mprotect change
+ * @vma: vm_area_struct protection is set to
+ * @prot: contains the protection that will be applied by the kernel.
+ *
+ * Files can be mmap'ed read/write and later changed to execute to circumvent
+ * IMA's mmap appraisal policy rules. Due to locking issues (mmap semaphore
+ * would be taken before i_mutex), files can not be measured or appraised at
+ * this point. Eliminate this integrity gap by denying the mprotect
+ * PROT_EXECUTE change, if an mmap appraise policy rule exists.
+ *
+ * On mprotect change success, return 0. On failure, return -EACESS.
+ */
+int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot)
+{
+ struct ima_template_desc *template = NULL;
+ struct file *file;
+ char filename[NAME_MAX];
+ char *pathbuf = NULL;
+ const char *pathname = NULL;
+ struct inode *inode;
+ int result = 0;
+ int action;
+ u32 secid;
+ int pcr;
+
+ /* Is mprotect making an mmap'ed file executable? */
+ if (!(ima_policy_flag & IMA_APPRAISE) || !vma->vm_file ||
+ !(prot & PROT_EXEC) || (vma->vm_flags & VM_EXEC))
+ return 0;
+
+ security_current_getsecid_subj(&secid);
+ inode = file_inode(vma->vm_file);
+ action = ima_get_action(file_mnt_user_ns(vma->vm_file), inode,
+ current_cred(), secid, MAY_EXEC, MMAP_CHECK,
+ &pcr, &template, NULL, NULL);
+
+ /* Is the mmap'ed file in policy? */
+ if (!(action & (IMA_MEASURE | IMA_APPRAISE_SUBMASK)))
+ return 0;
+
+ if (action & IMA_APPRAISE_SUBMASK)
+ result = -EPERM;
+
+ file = vma->vm_file;
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, pathname,
+ "collect_data", "failed-mprotect", result, 0);
+ if (pathbuf)
+ __putname(pathbuf);
+
+ return result;
+}
+
+/**
+ * ima_bprm_check - based on policy, collect/store measurement.
+ * @bprm: contains the linux_binprm structure
+ *
+ * The OS protects against an executable file, already open for write,
+ * from being executed in deny_write_access() and an executable file,
+ * already open for execute, from being modified in get_write_access().
+ * So we can be certain that what we verify and measure here is actually
+ * what is being executed.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_bprm_check(struct linux_binprm *bprm)
+{
+ int ret;
+ u32 secid;
+
+ security_current_getsecid_subj(&secid);
+ ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0,
+ MAY_EXEC, BPRM_CHECK);
+ if (ret)
+ return ret;
+
+ security_cred_getsecid(bprm->cred, &secid);
+ return process_measurement(bprm->file, bprm->cred, secid, NULL, 0,
+ MAY_EXEC, CREDS_CHECK);
+}
+
+/**
+ * ima_file_check - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured
+ * @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND
+ *
+ * Measure files based on the ima_must_measure() policy decision.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_check(struct file *file, int mask)
+{
+ u32 secid;
+
+ security_current_getsecid_subj(&secid);
+ return process_measurement(file, current_cred(), secid, NULL, 0,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
+ MAY_APPEND), FILE_CHECK);
+}
+EXPORT_SYMBOL_GPL(ima_file_check);
+
+static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf,
+ size_t buf_size)
+{
+ struct integrity_iint_cache *iint = NULL, tmp_iint;
+ int rc, hash_algo;
+
+ if (ima_policy_flag) {
+ iint = integrity_iint_find(inode);
+ if (iint)
+ mutex_lock(&iint->mutex);
+ }
+
+ if ((!iint || !(iint->flags & IMA_COLLECTED)) && file) {
+ if (iint)
+ mutex_unlock(&iint->mutex);
+
+ memset(&tmp_iint, 0, sizeof(tmp_iint));
+ tmp_iint.inode = inode;
+ mutex_init(&tmp_iint.mutex);
+
+ rc = ima_collect_measurement(&tmp_iint, file, NULL, 0,
+ ima_hash_algo, NULL);
+ if (rc < 0) {
+ /* ima_hash could be allocated in case of failure. */
+ if (rc != -ENOMEM)
+ kfree(tmp_iint.ima_hash);
+
+ return -EOPNOTSUPP;
+ }
+
+ iint = &tmp_iint;
+ mutex_lock(&iint->mutex);
+ }
+
+ if (!iint)
+ return -EOPNOTSUPP;
+
+ /*
+ * ima_file_hash can be called when ima_collect_measurement has still
+ * not been called, we might not always have a hash.
+ */
+ if (!iint->ima_hash) {
+ mutex_unlock(&iint->mutex);
+ return -EOPNOTSUPP;
+ }
+
+ if (buf) {
+ size_t copied_size;
+
+ copied_size = min_t(size_t, iint->ima_hash->length, buf_size);
+ memcpy(buf, iint->ima_hash->digest, copied_size);
+ }
+ hash_algo = iint->ima_hash->algo;
+ mutex_unlock(&iint->mutex);
+
+ if (iint == &tmp_iint)
+ kfree(iint->ima_hash);
+
+ return hash_algo;
+}
+
+/**
+ * ima_file_hash - return a measurement of the file
+ * @file: pointer to the file
+ * @buf: buffer in which to store the hash
+ * @buf_size: length of the buffer
+ *
+ * On success, return the hash algorithm (as defined in the enum hash_algo).
+ * If buf is not NULL, this function also outputs the hash into buf.
+ * If the hash is larger than buf_size, then only buf_size bytes will be copied.
+ * It generally just makes sense to pass a buffer capable of holding the largest
+ * possible hash: IMA_MAX_DIGEST_SIZE.
+ * The file hash returned is based on the entire file, including the appended
+ * signature.
+ *
+ * If the measurement cannot be performed, return -EOPNOTSUPP.
+ * If the parameters are incorrect, return -EINVAL.
+ */
+int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+{
+ if (!file)
+ return -EINVAL;
+
+ return __ima_inode_hash(file_inode(file), file, buf, buf_size);
+}
+EXPORT_SYMBOL_GPL(ima_file_hash);
+
+/**
+ * ima_inode_hash - return the stored measurement if the inode has been hashed
+ * and is in the iint cache.
+ * @inode: pointer to the inode
+ * @buf: buffer in which to store the hash
+ * @buf_size: length of the buffer
+ *
+ * On success, return the hash algorithm (as defined in the enum hash_algo).
+ * If buf is not NULL, this function also outputs the hash into buf.
+ * If the hash is larger than buf_size, then only buf_size bytes will be copied.
+ * It generally just makes sense to pass a buffer capable of holding the largest
+ * possible hash: IMA_MAX_DIGEST_SIZE.
+ * The hash returned is based on the entire contents, including the appended
+ * signature.
+ *
+ * If IMA is disabled or if no measurement is available, return -EOPNOTSUPP.
+ * If the parameters are incorrect, return -EINVAL.
+ */
+int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
+{
+ if (!inode)
+ return -EINVAL;
+
+ return __ima_inode_hash(inode, NULL, buf, buf_size);
+}
+EXPORT_SYMBOL_GPL(ima_inode_hash);
+
+/**
+ * ima_post_create_tmpfile - mark newly created tmpfile as new
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode of the newly created tmpfile
+ *
+ * No measuring, appraising or auditing of newly created tmpfiles is needed.
+ * Skip calling process_measurement(), but indicate which newly, created
+ * tmpfiles are in policy.
+ */
+void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ int must_appraise;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
+ must_appraise = ima_must_appraise(mnt_userns, inode, MAY_ACCESS,
+ FILE_CHECK);
+ if (!must_appraise)
+ return;
+
+ /* Nothing to do if we can't allocate memory */
+ iint = integrity_inode_get(inode);
+ if (!iint)
+ return;
+
+ /* needed for writing the security xattrs */
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ iint->ima_file_status = INTEGRITY_PASS;
+}
+
+/**
+ * ima_post_path_mknod - mark as a new inode
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dentry: newly created dentry
+ *
+ * Mark files created via the mknodat syscall as new, so that the
+ * file data can be written later.
+ */
+void ima_post_path_mknod(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
+{
+ struct integrity_iint_cache *iint;
+ struct inode *inode = dentry->d_inode;
+ int must_appraise;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
+ must_appraise = ima_must_appraise(mnt_userns, inode, MAY_ACCESS,
+ FILE_CHECK);
+ if (!must_appraise)
+ return;
+
+ /* Nothing to do if we can't allocate memory */
+ iint = integrity_inode_get(inode);
+ if (!iint)
+ return;
+
+ /* needed for re-opening empty files */
+ iint->flags |= IMA_NEW_FILE;
+}
+
+/**
+ * ima_read_file - pre-measure/appraise hook decision based on policy
+ * @file: pointer to the file to be measured/appraised/audit
+ * @read_id: caller identifier
+ * @contents: whether a subsequent call will be made to ima_post_read_file()
+ *
+ * Permit reading a file based on policy. The policy rules are written
+ * in terms of the policy identifier. Appraising the integrity of
+ * a file requires a file descriptor.
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+int ima_read_file(struct file *file, enum kernel_read_file_id read_id,
+ bool contents)
+{
+ enum ima_hooks func;
+ u32 secid;
+
+ /*
+ * Do devices using pre-allocated memory run the risk of the
+ * firmware being accessible to the device prior to the completion
+ * of IMA's signature verification any more than when using two
+ * buffers? It may be desirable to include the buffer address
+ * in this API and walk all the dma_map_single() mappings to check.
+ */
+
+ /*
+ * There will be a call made to ima_post_read_file() with
+ * a filled buffer, so we don't need to perform an extra
+ * read early here.
+ */
+ if (contents)
+ return 0;
+
+ /* Read entire file for all partial reads. */
+ func = read_idmap[read_id] ?: FILE_CHECK;
+ security_current_getsecid_subj(&secid);
+ return process_measurement(file, current_cred(), secid, NULL,
+ 0, MAY_READ, func);
+}
+
+const int read_idmap[READING_MAX_ID] = {
+ [READING_FIRMWARE] = FIRMWARE_CHECK,
+ [READING_MODULE] = MODULE_CHECK,
+ [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
+ [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
+ [READING_POLICY] = POLICY_CHECK
+};
+
+/**
+ * ima_post_read_file - in memory collect/appraise/audit measurement
+ * @file: pointer to the file to be measured/appraised/audit
+ * @buf: pointer to in memory file contents
+ * @size: size of in memory file contents
+ * @read_id: caller identifier
+ *
+ * Measure/appraise/audit in memory file based on policy. Policy rules
+ * are written in terms of a policy identifier.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_post_read_file(struct file *file, void *buf, loff_t size,
+ enum kernel_read_file_id read_id)
+{
+ enum ima_hooks func;
+ u32 secid;
+
+ /* permit signed certs */
+ if (!file && read_id == READING_X509_CERTIFICATE)
+ return 0;
+
+ if (!file || !buf || size == 0) { /* should never happen */
+ if (ima_appraise & IMA_APPRAISE_ENFORCE)
+ return -EACCES;
+ return 0;
+ }
+
+ func = read_idmap[read_id] ?: FILE_CHECK;
+ security_current_getsecid_subj(&secid);
+ return process_measurement(file, current_cred(), secid, buf, size,
+ MAY_READ, func);
+}
+
+/**
+ * ima_load_data - appraise decision based on policy
+ * @id: kernel load data caller identifier
+ * @contents: whether the full contents will be available in a later
+ * call to ima_post_load_data().
+ *
+ * Callers of this LSM hook can not measure, appraise, or audit the
+ * data provided by userspace. Enforce policy rules requiring a file
+ * signature (eg. kexec'ed kernel image).
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+int ima_load_data(enum kernel_load_data_id id, bool contents)
+{
+ bool ima_enforce, sig_enforce;
+
+ ima_enforce =
+ (ima_appraise & IMA_APPRAISE_ENFORCE) == IMA_APPRAISE_ENFORCE;
+
+ switch (id) {
+ case LOADING_KEXEC_IMAGE:
+ if (IS_ENABLED(CONFIG_KEXEC_SIG)
+ && arch_ima_get_secureboot()) {
+ pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
+ return -EACCES;
+ }
+
+ if (ima_enforce && (ima_appraise & IMA_APPRAISE_KEXEC)) {
+ pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_FIRMWARE:
+ if (ima_enforce && (ima_appraise & IMA_APPRAISE_FIRMWARE) && !contents) {
+ pr_err("Prevent firmware sysfs fallback loading.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_MODULE:
+ sig_enforce = is_module_sig_enforced();
+
+ if (ima_enforce && (!sig_enforce
+ && (ima_appraise & IMA_APPRAISE_MODULES))) {
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * ima_post_load_data - appraise decision based on policy
+ * @buf: pointer to in memory file contents
+ * @size: size of in memory file contents
+ * @load_id: kernel load data caller identifier
+ * @description: @load_id-specific description of contents
+ *
+ * Measure/appraise/audit in memory buffer based on policy. Policy rules
+ * are written in terms of a policy identifier.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id load_id,
+ char *description)
+{
+ if (load_id == LOADING_FIRMWARE) {
+ if ((ima_appraise & IMA_APPRAISE_FIRMWARE) &&
+ (ima_appraise & IMA_APPRAISE_ENFORCE)) {
+ pr_err("Prevent firmware loading_store.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * process_buffer_measurement - Measure the buffer or the buffer data hash
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode associated with the object being measured (NULL for KEY_CHECK)
+ * @buf: pointer to the buffer that needs to be added to the log.
+ * @size: size of buffer(in bytes).
+ * @eventname: event name to be used for the buffer entry.
+ * @func: IMA hook
+ * @pcr: pcr to extend the measurement
+ * @func_data: func specific data, may be NULL
+ * @buf_hash: measure buffer data hash
+ * @digest: buffer digest will be written to
+ * @digest_len: buffer length
+ *
+ * Based on policy, either the buffer data or buffer data hash is measured
+ *
+ * Return: 0 if the buffer has been successfully measured, 1 if the digest
+ * has been written to the passed location but not added to a measurement entry,
+ * a negative value otherwise.
+ */
+int process_buffer_measurement(struct user_namespace *mnt_userns,
+ struct inode *inode, const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr, const char *func_data,
+ bool buf_hash, u8 *digest, size_t digest_len)
+{
+ int ret = 0;
+ const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry = NULL;
+ struct integrity_iint_cache iint = {};
+ struct ima_event_data event_data = {.iint = &iint,
+ .filename = eventname,
+ .buf = buf,
+ .buf_len = size};
+ struct ima_template_desc *template;
+ struct ima_max_digest_data hash;
+ char digest_hash[IMA_MAX_DIGEST_SIZE];
+ int digest_hash_len = hash_digest_size[ima_hash_algo];
+ int violation = 0;
+ int action = 0;
+ u32 secid;
+
+ if (digest && digest_len < digest_hash_len)
+ return -EINVAL;
+
+ if (!ima_policy_flag && !digest)
+ return -ENOENT;
+
+ template = ima_template_desc_buf();
+ if (!template) {
+ ret = -EINVAL;
+ audit_cause = "ima_template_desc_buf";
+ goto out;
+ }
+
+ /*
+ * Both LSM hooks and auxilary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxilary buffer measurements,
+ * retrieving the policy rule information only for the LSM hook
+ * buffer measurements.
+ */
+ if (func) {
+ security_current_getsecid_subj(&secid);
+ action = ima_get_action(mnt_userns, inode, current_cred(),
+ secid, 0, func, &pcr, &template,
+ func_data, NULL);
+ if (!(action & IMA_MEASURE) && !digest)
+ return -ENOENT;
+ }
+
+ if (!pcr)
+ pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+
+ iint.ima_hash = &hash.hdr;
+ iint.ima_hash->algo = ima_hash_algo;
+ iint.ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ ret = ima_calc_buffer_hash(buf, size, iint.ima_hash);
+ if (ret < 0) {
+ audit_cause = "hashing_error";
+ goto out;
+ }
+
+ if (buf_hash) {
+ memcpy(digest_hash, hash.hdr.digest, digest_hash_len);
+
+ ret = ima_calc_buffer_hash(digest_hash, digest_hash_len,
+ iint.ima_hash);
+ if (ret < 0) {
+ audit_cause = "hashing_error";
+ goto out;
+ }
+
+ event_data.buf = digest_hash;
+ event_data.buf_len = digest_hash_len;
+ }
+
+ if (digest)
+ memcpy(digest, iint.ima_hash->digest, digest_hash_len);
+
+ if (!ima_policy_flag || (func && !(action & IMA_MEASURE)))
+ return 1;
+
+ ret = ima_alloc_init_template(&event_data, &entry, template);
+ if (ret < 0) {
+ audit_cause = "alloc_entry";
+ goto out;
+ }
+
+ ret = ima_store_template(entry, violation, NULL, event_data.buf, pcr);
+ if (ret < 0) {
+ audit_cause = "store_entry";
+ ima_free_template_entry(entry);
+ }
+
+out:
+ if (ret < 0)
+ integrity_audit_message(AUDIT_INTEGRITY_PCR, NULL, eventname,
+ func_measure_str(func),
+ audit_cause, ret, 0, ret);
+
+ return ret;
+}
+
+/**
+ * ima_kexec_cmdline - measure kexec cmdline boot args
+ * @kernel_fd: file descriptor of the kexec kernel being loaded
+ * @buf: pointer to buffer
+ * @size: size of buffer
+ *
+ * Buffers can only be measured, not appraised.
+ */
+void ima_kexec_cmdline(int kernel_fd, const void *buf, int size)
+{
+ struct fd f;
+
+ if (!buf || !size)
+ return;
+
+ f = fdget(kernel_fd);
+ if (!f.file)
+ return;
+
+ process_buffer_measurement(file_mnt_user_ns(f.file), file_inode(f.file),
+ buf, size, "kexec-cmdline", KEXEC_CMDLINE, 0,
+ NULL, false, NULL, 0);
+ fdput(f);
+}
+
+/**
+ * ima_measure_critical_data - measure kernel integrity critical data
+ * @event_label: unique event label for grouping and limiting critical data
+ * @event_name: event name for the record in the IMA measurement list
+ * @buf: pointer to buffer data
+ * @buf_len: length of buffer data (in bytes)
+ * @hash: measure buffer data hash
+ * @digest: buffer digest will be written to
+ * @digest_len: buffer length
+ *
+ * Measure data critical to the integrity of the kernel into the IMA log
+ * and extend the pcr. Examples of critical data could be various data
+ * structures, policies, and states stored in kernel memory that can
+ * impact the integrity of the system.
+ *
+ * Return: 0 if the buffer has been successfully measured, 1 if the digest
+ * has been written to the passed location but not added to a measurement entry,
+ * a negative value otherwise.
+ */
+int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len)
+{
+ if (!event_name || !event_label || !buf || !buf_len)
+ return -ENOPARAM;
+
+ return process_buffer_measurement(&init_user_ns, NULL, buf, buf_len,
+ event_name, CRITICAL_DATA, 0,
+ event_label, hash, digest,
+ digest_len);
+}
+EXPORT_SYMBOL_GPL(ima_measure_critical_data);
+
+static int __init init_ima(void)
+{
+ int error;
+
+ ima_appraise_parse_cmdline();
+ ima_init_template_list();
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
+ error = ima_init();
+
+ if (error && strcmp(hash_algo_name[ima_hash_algo],
+ CONFIG_IMA_DEFAULT_HASH) != 0) {
+ pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
+ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
+ hash_setup_done = 0;
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
+ error = ima_init();
+ }
+
+ if (error)
+ return error;
+
+ error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
+ if (error)
+ pr_warn("Couldn't register LSM notifier, error %d\n", error);
+
+ if (!error)
+ ima_update_policy_flags();
+
+ return error;
+}
+
+late_initcall(init_ima); /* Start IMA after the TPM is available */
diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
new file mode 100644
index 000000000..3e7bee300
--- /dev/null
+++ b/security/integrity/ima/ima_modsig.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IMA support for appraising module-style appended signatures.
+ *
+ * Copyright (C) 2019 IBM Corporation
+ *
+ * Author:
+ * Thiago Jung Bauermann <bauerman@linux.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module_signature.h>
+#include <keys/asymmetric-type.h>
+#include <crypto/pkcs7.h>
+
+#include "ima.h"
+
+struct modsig {
+ struct pkcs7_message *pkcs7_msg;
+
+ enum hash_algo hash_algo;
+
+ /* This digest will go in the 'd-modsig' field of the IMA template. */
+ const u8 *digest;
+ u32 digest_size;
+
+ /*
+ * This is what will go to the measurement list if the template requires
+ * storing the signature.
+ */
+ int raw_pkcs7_len;
+ u8 raw_pkcs7[];
+};
+
+/*
+ * ima_read_modsig - Read modsig from buf.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig)
+{
+ const size_t marker_len = strlen(MODULE_SIG_STRING);
+ const struct module_signature *sig;
+ struct modsig *hdr;
+ size_t sig_len;
+ const void *p;
+ int rc;
+
+ if (buf_len <= marker_len + sizeof(*sig))
+ return -ENOENT;
+
+ p = buf + buf_len - marker_len;
+ if (memcmp(p, MODULE_SIG_STRING, marker_len))
+ return -ENOENT;
+
+ buf_len -= marker_len;
+ sig = (const struct module_signature *)(p - sizeof(*sig));
+
+ rc = mod_check_sig(sig, buf_len, func_tokens[func]);
+ if (rc)
+ return rc;
+
+ sig_len = be32_to_cpu(sig->sig_len);
+ buf_len -= sig_len + sizeof(*sig);
+
+ /* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */
+ hdr = kzalloc(sizeof(*hdr) + sig_len, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ hdr->pkcs7_msg = pkcs7_parse_message(buf + buf_len, sig_len);
+ if (IS_ERR(hdr->pkcs7_msg)) {
+ rc = PTR_ERR(hdr->pkcs7_msg);
+ kfree(hdr);
+ return rc;
+ }
+
+ memcpy(hdr->raw_pkcs7, buf + buf_len, sig_len);
+ hdr->raw_pkcs7_len = sig_len;
+
+ /* We don't know the hash algorithm yet. */
+ hdr->hash_algo = HASH_ALGO__LAST;
+
+ *modsig = hdr;
+
+ return 0;
+}
+
+/**
+ * ima_collect_modsig - Calculate the file hash without the appended signature.
+ * @modsig: parsed module signature
+ * @buf: data to verify the signature on
+ * @size: data size
+ *
+ * Since the modsig is part of the file contents, the hash used in its signature
+ * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
+ * calculates a separate one for signature verification.
+ */
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size)
+{
+ int rc;
+
+ /*
+ * Provide the file contents (minus the appended sig) so that the PKCS7
+ * code can calculate the file hash.
+ */
+ size -= modsig->raw_pkcs7_len + strlen(MODULE_SIG_STRING) +
+ sizeof(struct module_signature);
+ rc = pkcs7_supply_detached_data(modsig->pkcs7_msg, buf, size);
+ if (rc)
+ return;
+
+ /* Ask the PKCS7 code to calculate the file hash. */
+ rc = pkcs7_get_digest(modsig->pkcs7_msg, &modsig->digest,
+ &modsig->digest_size, &modsig->hash_algo);
+}
+
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig)
+{
+ return verify_pkcs7_message_sig(NULL, 0, modsig->pkcs7_msg, keyring,
+ VERIFYING_MODULE_SIGNATURE, NULL, NULL);
+}
+
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size)
+{
+ *algo = modsig->hash_algo;
+ *digest = modsig->digest;
+ *digest_size = modsig->digest_size;
+
+ return 0;
+}
+
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len)
+{
+ *data = &modsig->raw_pkcs7;
+ *data_len = modsig->raw_pkcs7_len;
+
+ return 0;
+}
+
+void ima_free_modsig(struct modsig *modsig)
+{
+ if (!modsig)
+ return;
+
+ pkcs7_free_message(modsig->pkcs7_msg);
+ kfree(modsig);
+}
diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
new file mode 100644
index 000000000..95cc31525
--- /dev/null
+++ b/security/integrity/ima/ima_mok.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Juniper Networks, Inc.
+ *
+ * Author:
+ * Petko Manolov <petko.manolov@konsulko.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <keys/system_keyring.h>
+
+
+struct key *ima_blacklist_keyring;
+
+/*
+ * Allocate the IMA blacklist keyring
+ */
+static __init int ima_mok_init(void)
+{
+ struct key_restriction *restriction;
+
+ pr_notice("Allocating IMA blacklist keyring.\n");
+
+ restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+ if (!restriction)
+ panic("Can't allocate IMA blacklist restriction.");
+
+ restriction->check = restrict_link_by_builtin_trusted;
+
+ ima_blacklist_keyring = keyring_alloc(".ima_blacklist",
+ KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_WRITE | KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_SET_KEEP,
+ restriction, NULL);
+
+ if (IS_ERR(ima_blacklist_keyring))
+ panic("Can't allocate IMA blacklist keyring.");
+ return 0;
+}
+device_initcall(ima_mok_init);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
new file mode 100644
index 000000000..bdc40535f
--- /dev/null
+++ b/security/integrity/ima/ima_policy.c
@@ -0,0 +1,2307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * ima_policy.c
+ * - initialize default measure policy rules
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel_read_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/magic.h>
+#include <linux/parser.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/seq_file.h>
+#include <linux/ima.h>
+
+#include "ima.h"
+
+/* flags definitions */
+#define IMA_FUNC 0x0001
+#define IMA_MASK 0x0002
+#define IMA_FSMAGIC 0x0004
+#define IMA_UID 0x0008
+#define IMA_FOWNER 0x0010
+#define IMA_FSUUID 0x0020
+#define IMA_INMASK 0x0040
+#define IMA_EUID 0x0080
+#define IMA_PCR 0x0100
+#define IMA_FSNAME 0x0200
+#define IMA_KEYRINGS 0x0400
+#define IMA_LABEL 0x0800
+#define IMA_VALIDATE_ALGOS 0x1000
+#define IMA_GID 0x2000
+#define IMA_EGID 0x4000
+#define IMA_FGROUP 0x8000
+
+#define UNKNOWN 0
+#define MEASURE 0x0001 /* same as IMA_MEASURE */
+#define DONT_MEASURE 0x0002
+#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
+#define DONT_APPRAISE 0x0008
+#define AUDIT 0x0040
+#define HASH 0x0100
+#define DONT_HASH 0x0200
+
+#define INVALID_PCR(a) (((a) < 0) || \
+ (a) >= (sizeof_field(struct integrity_iint_cache, measured_pcrs) * 8))
+
+int ima_policy_flag;
+static int temp_ima_appraise;
+static int build_ima_appraise __ro_after_init;
+
+atomic_t ima_setxattr_allowed_hash_algorithms;
+
+#define MAX_LSM_RULES 6
+enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
+ LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
+};
+
+enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
+
+enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY };
+
+struct ima_rule_opt_list {
+ size_t count;
+ char *items[];
+};
+
+struct ima_rule_entry {
+ struct list_head list;
+ int action;
+ unsigned int flags;
+ enum ima_hooks func;
+ int mask;
+ unsigned long fsmagic;
+ uuid_t fsuuid;
+ kuid_t uid;
+ kgid_t gid;
+ kuid_t fowner;
+ kgid_t fgroup;
+ bool (*uid_op)(kuid_t cred_uid, kuid_t rule_uid); /* Handlers for operators */
+ bool (*gid_op)(kgid_t cred_gid, kgid_t rule_gid);
+ bool (*fowner_op)(kuid_t cred_uid, kuid_t rule_uid); /* uid_eq(), uid_gt(), uid_lt() */
+ bool (*fgroup_op)(kgid_t cred_gid, kgid_t rule_gid); /* gid_eq(), gid_gt(), gid_lt() */
+ int pcr;
+ unsigned int allowed_algos; /* bitfield of allowed hash algorithms */
+ struct {
+ void *rule; /* LSM file metadata specific */
+ char *args_p; /* audit value */
+ int type; /* audit type */
+ } lsm[MAX_LSM_RULES];
+ char *fsname;
+ struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */
+ struct ima_rule_opt_list *label; /* Measure data grouped under this label */
+ struct ima_template_desc *template;
+};
+
+/*
+ * sanity check in case the kernels gains more hash algorithms that can
+ * fit in an unsigned int
+ */
+static_assert(
+ 8 * sizeof(unsigned int) >= HASH_ALGO__LAST,
+ "The bitfield allowed_algos in ima_rule_entry is too small to contain all the supported hash algorithms, consider using a bigger type");
+
+/*
+ * Without LSM specific knowledge, the default policy can only be
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, .gid,
+ * .fowner, and .fgroup
+ */
+
+/*
+ * The minimum rule set to allow for full TCB coverage. Measures all files
+ * opened or mmap for exec and everything read by root. Dangerous because
+ * normal users can easily run the machine out of memory simply building
+ * and running executables.
+ */
+static struct ima_rule_entry dont_measure_rules[] __ro_after_init = {
+ {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
+ .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = CGROUP2_SUPER_MAGIC,
+ .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = EFIVARFS_MAGIC, .flags = IMA_FSMAGIC}
+};
+
+static struct ima_rule_entry original_measurement_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_MASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_measurement_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = POLICY_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
+ {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = EFIVARFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP2_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+#ifdef CONFIG_IMA_WRITE_POLICY
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &uid_eq,
+ .flags = IMA_FOWNER},
+#else
+ /* force signature */
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &uid_eq,
+ .flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED},
+#endif
+};
+
+static struct ima_rule_entry build_appraise_rules[] __ro_after_init = {
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+};
+
+static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+};
+
+static struct ima_rule_entry critical_data_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = CRITICAL_DATA, .flags = IMA_FUNC},
+};
+
+/* An array of architecture specific rules */
+static struct ima_rule_entry *arch_policy_entry __ro_after_init;
+
+static LIST_HEAD(ima_default_rules);
+static LIST_HEAD(ima_policy_rules);
+static LIST_HEAD(ima_temp_rules);
+static struct list_head __rcu *ima_rules = (struct list_head __rcu *)(&ima_default_rules);
+
+static int ima_policy __initdata;
+
+static int __init default_measure_policy_setup(char *str)
+{
+ if (ima_policy)
+ return 1;
+
+ ima_policy = ORIGINAL_TCB;
+ return 1;
+}
+__setup("ima_tcb", default_measure_policy_setup);
+
+static bool ima_use_appraise_tcb __initdata;
+static bool ima_use_secure_boot __initdata;
+static bool ima_use_critical_data __initdata;
+static bool ima_fail_unverifiable_sigs __ro_after_init;
+static int __init policy_setup(char *str)
+{
+ char *p;
+
+ while ((p = strsep(&str, " |\n")) != NULL) {
+ if (*p == ' ')
+ continue;
+ if ((strcmp(p, "tcb") == 0) && !ima_policy)
+ ima_policy = DEFAULT_TCB;
+ else if (strcmp(p, "appraise_tcb") == 0)
+ ima_use_appraise_tcb = true;
+ else if (strcmp(p, "secure_boot") == 0)
+ ima_use_secure_boot = true;
+ else if (strcmp(p, "critical_data") == 0)
+ ima_use_critical_data = true;
+ else if (strcmp(p, "fail_securely") == 0)
+ ima_fail_unverifiable_sigs = true;
+ else
+ pr_err("policy \"%s\" not found", p);
+ }
+
+ return 1;
+}
+__setup("ima_policy=", policy_setup);
+
+static int __init default_appraise_policy_setup(char *str)
+{
+ ima_use_appraise_tcb = true;
+ return 1;
+}
+__setup("ima_appraise_tcb", default_appraise_policy_setup);
+
+static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
+{
+ struct ima_rule_opt_list *opt_list;
+ size_t count = 0;
+ char *src_copy;
+ char *cur, *next;
+ size_t i;
+
+ src_copy = match_strdup(src);
+ if (!src_copy)
+ return ERR_PTR(-ENOMEM);
+
+ next = src_copy;
+ while ((cur = strsep(&next, "|"))) {
+ /* Don't accept an empty list item */
+ if (!(*cur)) {
+ kfree(src_copy);
+ return ERR_PTR(-EINVAL);
+ }
+ count++;
+ }
+
+ /* Don't accept an empty list */
+ if (!count) {
+ kfree(src_copy);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opt_list = kzalloc(struct_size(opt_list, items, count), GFP_KERNEL);
+ if (!opt_list) {
+ kfree(src_copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * strsep() has already replaced all instances of '|' with '\0',
+ * leaving a byte sequence of NUL-terminated strings. Reference each
+ * string with the array of items.
+ *
+ * IMPORTANT: Ownership of the allocated buffer is transferred from
+ * src_copy to the first element in the items array. To free the
+ * buffer, kfree() must only be called on the first element of the
+ * array.
+ */
+ for (i = 0, cur = src_copy; i < count; i++) {
+ opt_list->items[i] = cur;
+ cur = strchr(cur, '\0') + 1;
+ }
+ opt_list->count = count;
+
+ return opt_list;
+}
+
+static void ima_free_rule_opt_list(struct ima_rule_opt_list *opt_list)
+{
+ if (!opt_list)
+ return;
+
+ if (opt_list->count) {
+ kfree(opt_list->items[0]);
+ opt_list->count = 0;
+ }
+
+ kfree(opt_list);
+}
+
+static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ ima_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry->lsm[i].args_p);
+ }
+}
+
+static void ima_free_rule(struct ima_rule_entry *entry)
+{
+ if (!entry)
+ return;
+
+ /*
+ * entry->template->fields may be allocated in ima_parse_rule() but that
+ * reference is owned by the corresponding ima_template_desc element in
+ * the defined_templates list and cannot be freed here
+ */
+ kfree(entry->fsname);
+ ima_free_rule_opt_list(entry->keyrings);
+ ima_lsm_free_rule(entry);
+ kfree(entry);
+}
+
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+{
+ struct ima_rule_entry *nentry;
+ int i;
+
+ /*
+ * Immutable elements are copied over as pointers and data; only
+ * lsm rules can change
+ */
+ nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
+ if (!nentry)
+ return NULL;
+
+ memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].args_p)
+ continue;
+
+ nentry->lsm[i].type = entry->lsm[i].type;
+ nentry->lsm[i].args_p = entry->lsm[i].args_p;
+
+ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ nentry->lsm[i].args_p,
+ &nentry->lsm[i].rule);
+ if (!nentry->lsm[i].rule)
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ nentry->lsm[i].args_p);
+ }
+ return nentry;
+}
+
+static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+{
+ int i;
+ struct ima_rule_entry *nentry;
+
+ nentry = ima_lsm_copy_rule(entry);
+ if (!nentry)
+ return -ENOMEM;
+
+ list_replace_rcu(&entry->list, &nentry->list);
+ synchronize_rcu();
+ /*
+ * ima_lsm_copy_rule() shallow copied all references, except for the
+ * LSM references, from entry to nentry so we only want to free the LSM
+ * references and the entry itself. All other memory references will now
+ * be owned by nentry.
+ */
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ ima_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry);
+
+ return 0;
+}
+
+static bool ima_rule_contains_lsm_cond(struct ima_rule_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ if (entry->lsm[i].args_p)
+ return true;
+
+ return false;
+}
+
+/*
+ * The LSM policy can be reloaded, leaving the IMA LSM based rules referring
+ * to the old, stale LSM policy. Update the IMA LSM based rules to reflect
+ * the reloaded LSM policy.
+ */
+static void ima_lsm_update_rules(void)
+{
+ struct ima_rule_entry *entry, *e;
+ int result;
+
+ list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
+ if (!ima_rule_contains_lsm_cond(entry))
+ continue;
+
+ result = ima_lsm_update_rule(entry);
+ if (result) {
+ pr_err("lsm rule update error %d\n", result);
+ return;
+ }
+ }
+}
+
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data)
+{
+ if (event != LSM_POLICY_CHANGE)
+ return NOTIFY_DONE;
+
+ ima_lsm_update_rules();
+ return NOTIFY_OK;
+}
+
+/**
+ * ima_match_rule_data - determine whether func_data matches the policy rule
+ * @rule: a pointer to a rule
+ * @func_data: data to match against the measure rule data
+ * @cred: a pointer to a credentials structure for user validation
+ *
+ * Returns true if func_data matches one in the rule, false otherwise.
+ */
+static bool ima_match_rule_data(struct ima_rule_entry *rule,
+ const char *func_data,
+ const struct cred *cred)
+{
+ const struct ima_rule_opt_list *opt_list = NULL;
+ bool matched = false;
+ size_t i;
+
+ if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+
+ switch (rule->func) {
+ case KEY_CHECK:
+ if (!rule->keyrings)
+ return true;
+
+ opt_list = rule->keyrings;
+ break;
+ case CRITICAL_DATA:
+ if (!rule->label)
+ return true;
+
+ opt_list = rule->label;
+ break;
+ default:
+ return false;
+ }
+
+ if (!func_data)
+ return false;
+
+ for (i = 0; i < opt_list->count; i++) {
+ if (!strcmp(opt_list->items[i], func_data)) {
+ matched = true;
+ break;
+ }
+ }
+
+ return matched;
+}
+
+/**
+ * ima_match_rules - determine whether an inode matches the policy rule.
+ * @rule: a pointer to a rule
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: a pointer to an inode
+ * @cred: a pointer to a credentials structure for user validation
+ * @secid: the secid of the task to be validated
+ * @func: LIM hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @func_data: func specific data, may be NULL
+ *
+ * Returns true on rule match, false on failure.
+ */
+static bool ima_match_rules(struct ima_rule_entry *rule,
+ struct user_namespace *mnt_userns,
+ struct inode *inode, const struct cred *cred,
+ u32 secid, enum ima_hooks func, int mask,
+ const char *func_data)
+{
+ int i;
+ bool result = false;
+ struct ima_rule_entry *lsm_rule = rule;
+ bool rule_reinitialized = false;
+
+ if ((rule->flags & IMA_FUNC) &&
+ (rule->func != func && func != POST_SETATTR))
+ return false;
+
+ switch (func) {
+ case KEY_CHECK:
+ case CRITICAL_DATA:
+ return ((rule->func == func) &&
+ ima_match_rule_data(rule, func_data, cred));
+ default:
+ break;
+ }
+
+ if ((rule->flags & IMA_MASK) &&
+ (rule->mask != mask && func != POST_SETATTR))
+ return false;
+ if ((rule->flags & IMA_INMASK) &&
+ (!(rule->mask & mask) && func != POST_SETATTR))
+ return false;
+ if ((rule->flags & IMA_FSMAGIC)
+ && rule->fsmagic != inode->i_sb->s_magic)
+ return false;
+ if ((rule->flags & IMA_FSNAME)
+ && strcmp(rule->fsname, inode->i_sb->s_type->name))
+ return false;
+ if ((rule->flags & IMA_FSUUID) &&
+ !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
+ return false;
+ if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+ if (rule->flags & IMA_EUID) {
+ if (has_capability_noaudit(current, CAP_SETUID)) {
+ if (!rule->uid_op(cred->euid, rule->uid)
+ && !rule->uid_op(cred->suid, rule->uid)
+ && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+ } else if (!rule->uid_op(cred->euid, rule->uid))
+ return false;
+ }
+ if ((rule->flags & IMA_GID) && !rule->gid_op(cred->gid, rule->gid))
+ return false;
+ if (rule->flags & IMA_EGID) {
+ if (has_capability_noaudit(current, CAP_SETGID)) {
+ if (!rule->gid_op(cred->egid, rule->gid)
+ && !rule->gid_op(cred->sgid, rule->gid)
+ && !rule->gid_op(cred->gid, rule->gid))
+ return false;
+ } else if (!rule->gid_op(cred->egid, rule->gid))
+ return false;
+ }
+ if ((rule->flags & IMA_FOWNER) &&
+ !rule->fowner_op(i_uid_into_mnt(mnt_userns, inode), rule->fowner))
+ return false;
+ if ((rule->flags & IMA_FGROUP) &&
+ !rule->fgroup_op(i_gid_into_mnt(mnt_userns, inode), rule->fgroup))
+ return false;
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ int rc = 0;
+ u32 osid;
+
+ if (!lsm_rule->lsm[i].rule) {
+ if (!lsm_rule->lsm[i].args_p)
+ continue;
+ else
+ return false;
+ }
+
+retry:
+ switch (i) {
+ case LSM_OBJ_USER:
+ case LSM_OBJ_ROLE:
+ case LSM_OBJ_TYPE:
+ security_inode_getsecid(inode, &osid);
+ rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type,
+ Audit_equal,
+ lsm_rule->lsm[i].rule);
+ break;
+ case LSM_SUBJ_USER:
+ case LSM_SUBJ_ROLE:
+ case LSM_SUBJ_TYPE:
+ rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type,
+ Audit_equal,
+ lsm_rule->lsm[i].rule);
+ break;
+ default:
+ break;
+ }
+
+ if (rc == -ESTALE && !rule_reinitialized) {
+ lsm_rule = ima_lsm_copy_rule(rule);
+ if (lsm_rule) {
+ rule_reinitialized = true;
+ goto retry;
+ }
+ }
+ if (!rc) {
+ result = false;
+ goto out;
+ }
+ }
+ result = true;
+
+out:
+ if (rule_reinitialized) {
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ ima_filter_rule_free(lsm_rule->lsm[i].rule);
+ kfree(lsm_rule);
+ }
+ return result;
+}
+
+/*
+ * In addition to knowing that we need to appraise the file in general,
+ * we need to differentiate between calling hooks, for hook specific rules.
+ */
+static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
+{
+ if (!(rule->flags & IMA_FUNC))
+ return IMA_FILE_APPRAISE;
+
+ switch (func) {
+ case MMAP_CHECK:
+ return IMA_MMAP_APPRAISE;
+ case BPRM_CHECK:
+ return IMA_BPRM_APPRAISE;
+ case CREDS_CHECK:
+ return IMA_CREDS_APPRAISE;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ return IMA_FILE_APPRAISE;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ return IMA_READ_APPRAISE;
+ }
+}
+
+/**
+ * ima_match_policy - decision based on LSM and other conditions
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: pointer to an inode for which the policy decision is being made
+ * @cred: pointer to a credentials structure for which the policy decision is
+ * being made
+ * @secid: LSM secid of the task to be validated
+ * @func: IMA hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @flags: IMA actions to consider (e.g. IMA_MEASURE | IMA_APPRAISE)
+ * @pcr: set the pcr to extend
+ * @template_desc: the template that should be used for this rule
+ * @func_data: func specific data, may be NULL
+ * @allowed_algos: allowlist of hash algorithms for the IMA xattr
+ *
+ * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
+ * conditions.
+ *
+ * Since the IMA policy may be updated multiple times we need to lock the
+ * list when walking it. Reads are many orders of magnitude more numerous
+ * than writes so ima_match_policy() is classical RCU candidate.
+ */
+int ima_match_policy(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct cred *cred, u32 secid, enum ima_hooks func,
+ int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos)
+{
+ struct ima_rule_entry *entry;
+ int action = 0, actmask = flags | (flags << 1);
+ struct list_head *ima_rules_tmp;
+
+ if (template_desc && !*template_desc)
+ *template_desc = ima_template_desc_current();
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+
+ if (!(entry->action & actmask))
+ continue;
+
+ if (!ima_match_rules(entry, mnt_userns, inode, cred, secid,
+ func, mask, func_data))
+ continue;
+
+ action |= entry->flags & IMA_NONACTION_FLAGS;
+
+ action |= entry->action & IMA_DO_MASK;
+ if (entry->action & IMA_APPRAISE) {
+ action |= get_subaction(entry, func);
+ action &= ~IMA_HASH;
+ if (ima_fail_unverifiable_sigs)
+ action |= IMA_FAIL_UNVERIFIABLE_SIGS;
+
+ if (allowed_algos &&
+ entry->flags & IMA_VALIDATE_ALGOS)
+ *allowed_algos = entry->allowed_algos;
+ }
+
+ if (entry->action & IMA_DO_MASK)
+ actmask &= ~(entry->action | entry->action << 1);
+ else
+ actmask &= ~(entry->action | entry->action >> 1);
+
+ if ((pcr) && (entry->flags & IMA_PCR))
+ *pcr = entry->pcr;
+
+ if (template_desc && entry->template)
+ *template_desc = entry->template;
+
+ if (!actmask)
+ break;
+ }
+ rcu_read_unlock();
+
+ return action;
+}
+
+/**
+ * ima_update_policy_flags() - Update global IMA variables
+ *
+ * Update ima_policy_flag and ima_setxattr_allowed_hash_algorithms
+ * based on the currently loaded policy.
+ *
+ * With ima_policy_flag, the decision to short circuit out of a function
+ * or not call the function in the first place can be made earlier.
+ *
+ * With ima_setxattr_allowed_hash_algorithms, the policy can restrict the
+ * set of hash algorithms accepted when updating the security.ima xattr of
+ * a file.
+ *
+ * Context: called after a policy update and at system initialization.
+ */
+void ima_update_policy_flags(void)
+{
+ struct ima_rule_entry *entry;
+ int new_policy_flag = 0;
+ struct list_head *ima_rules_tmp;
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ /*
+ * SETXATTR_CHECK rules do not implement a full policy check
+ * because rule checking would probably have an important
+ * performance impact on setxattr(). As a consequence, only one
+ * SETXATTR_CHECK can be active at a given time.
+ * Because we want to preserve that property, we set out to use
+ * atomic_cmpxchg. Either:
+ * - the atomic was non-zero: a setxattr hash policy is
+ * already enforced, we do nothing
+ * - the atomic was zero: no setxattr policy was set, enable
+ * the setxattr hash policy
+ */
+ if (entry->func == SETXATTR_CHECK) {
+ atomic_cmpxchg(&ima_setxattr_allowed_hash_algorithms,
+ 0, entry->allowed_algos);
+ /* SETXATTR_CHECK doesn't impact ima_policy_flag */
+ continue;
+ }
+
+ if (entry->action & IMA_DO_MASK)
+ new_policy_flag |= entry->action;
+ }
+ rcu_read_unlock();
+
+ ima_appraise |= (build_ima_appraise | temp_ima_appraise);
+ if (!ima_appraise)
+ new_policy_flag &= ~IMA_APPRAISE;
+
+ ima_policy_flag = new_policy_flag;
+}
+
+static int ima_appraise_flag(enum ima_hooks func)
+{
+ if (func == MODULE_CHECK)
+ return IMA_APPRAISE_MODULES;
+ else if (func == FIRMWARE_CHECK)
+ return IMA_APPRAISE_FIRMWARE;
+ else if (func == POLICY_CHECK)
+ return IMA_APPRAISE_POLICY;
+ else if (func == KEXEC_KERNEL_CHECK)
+ return IMA_APPRAISE_KEXEC;
+ return 0;
+}
+
+static void add_rules(struct ima_rule_entry *entries, int count,
+ enum policy_rule_list policy_rule)
+{
+ int i = 0;
+
+ for (i = 0; i < count; i++) {
+ struct ima_rule_entry *entry;
+
+ if (policy_rule & IMA_DEFAULT_POLICY)
+ list_add_tail(&entries[i].list, &ima_default_rules);
+
+ if (policy_rule & IMA_CUSTOM_POLICY) {
+ entry = kmemdup(&entries[i], sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry)
+ continue;
+
+ list_add_tail(&entry->list, &ima_policy_rules);
+ }
+ if (entries[i].action == APPRAISE) {
+ if (entries != build_appraise_rules)
+ temp_ima_appraise |=
+ ima_appraise_flag(entries[i].func);
+ else
+ build_ima_appraise |=
+ ima_appraise_flag(entries[i].func);
+ }
+ }
+}
+
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry);
+
+static int __init ima_init_arch_policy(void)
+{
+ const char * const *arch_rules;
+ const char * const *rules;
+ int arch_entries = 0;
+ int i = 0;
+
+ arch_rules = arch_get_ima_policy();
+ if (!arch_rules)
+ return arch_entries;
+
+ /* Get number of rules */
+ for (rules = arch_rules; *rules != NULL; rules++)
+ arch_entries++;
+
+ arch_policy_entry = kcalloc(arch_entries + 1,
+ sizeof(*arch_policy_entry), GFP_KERNEL);
+ if (!arch_policy_entry)
+ return 0;
+
+ /* Convert each policy string rules to struct ima_rule_entry format */
+ for (rules = arch_rules, i = 0; *rules != NULL; rules++) {
+ char rule[255];
+ int result;
+
+ result = strscpy(rule, *rules, sizeof(rule));
+
+ INIT_LIST_HEAD(&arch_policy_entry[i].list);
+ result = ima_parse_rule(rule, &arch_policy_entry[i]);
+ if (result) {
+ pr_warn("Skipping unknown architecture policy rule: %s\n",
+ rule);
+ memset(&arch_policy_entry[i], 0,
+ sizeof(*arch_policy_entry));
+ continue;
+ }
+ i++;
+ }
+ return i;
+}
+
+/**
+ * ima_init_policy - initialize the default measure rules.
+ *
+ * ima_rules points to either the ima_default_rules or the new ima_policy_rules.
+ */
+void __init ima_init_policy(void)
+{
+ int build_appraise_entries, arch_entries;
+
+ /* if !ima_policy, we load NO default rules */
+ if (ima_policy)
+ add_rules(dont_measure_rules, ARRAY_SIZE(dont_measure_rules),
+ IMA_DEFAULT_POLICY);
+
+ switch (ima_policy) {
+ case ORIGINAL_TCB:
+ add_rules(original_measurement_rules,
+ ARRAY_SIZE(original_measurement_rules),
+ IMA_DEFAULT_POLICY);
+ break;
+ case DEFAULT_TCB:
+ add_rules(default_measurement_rules,
+ ARRAY_SIZE(default_measurement_rules),
+ IMA_DEFAULT_POLICY);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Based on runtime secure boot flags, insert arch specific measurement
+ * and appraise rules requiring file signatures for both the initial
+ * and custom policies, prior to other appraise rules.
+ * (Highest priority)
+ */
+ arch_entries = ima_init_arch_policy();
+ if (!arch_entries)
+ pr_info("No architecture policies found\n");
+ else
+ add_rules(arch_policy_entry, arch_entries,
+ IMA_DEFAULT_POLICY | IMA_CUSTOM_POLICY);
+
+ /*
+ * Insert the builtin "secure_boot" policy rules requiring file
+ * signatures, prior to other appraise rules.
+ */
+ if (ima_use_secure_boot)
+ add_rules(secure_boot_rules, ARRAY_SIZE(secure_boot_rules),
+ IMA_DEFAULT_POLICY);
+
+ /*
+ * Insert the build time appraise rules requiring file signatures
+ * for both the initial and custom policies, prior to other appraise
+ * rules. As the secure boot rules includes all of the build time
+ * rules, include either one or the other set of rules, but not both.
+ */
+ build_appraise_entries = ARRAY_SIZE(build_appraise_rules);
+ if (build_appraise_entries) {
+ if (ima_use_secure_boot)
+ add_rules(build_appraise_rules, build_appraise_entries,
+ IMA_CUSTOM_POLICY);
+ else
+ add_rules(build_appraise_rules, build_appraise_entries,
+ IMA_DEFAULT_POLICY | IMA_CUSTOM_POLICY);
+ }
+
+ if (ima_use_appraise_tcb)
+ add_rules(default_appraise_rules,
+ ARRAY_SIZE(default_appraise_rules),
+ IMA_DEFAULT_POLICY);
+
+ if (ima_use_critical_data)
+ add_rules(critical_data_rules,
+ ARRAY_SIZE(critical_data_rules),
+ IMA_DEFAULT_POLICY);
+
+ atomic_set(&ima_setxattr_allowed_hash_algorithms, 0);
+
+ ima_update_policy_flags();
+}
+
+/* Make sure we have a valid policy, at least containing some rules. */
+int ima_check_policy(void)
+{
+ if (list_empty(&ima_temp_rules))
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * ima_update_policy - update default_rules with new measure rules
+ *
+ * Called on file .release to update the default rules with a complete new
+ * policy. What we do here is to splice ima_policy_rules and ima_temp_rules so
+ * they make a queue. The policy may be updated multiple times and this is the
+ * RCU updater.
+ *
+ * Policy rules are never deleted so ima_policy_flag gets zeroed only once when
+ * we switch from the default policy to user defined.
+ */
+void ima_update_policy(void)
+{
+ struct list_head *policy = &ima_policy_rules;
+
+ list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu);
+
+ if (ima_rules != (struct list_head __rcu *)policy) {
+ ima_policy_flag = 0;
+
+ rcu_assign_pointer(ima_rules, policy);
+ /*
+ * IMA architecture specific policy rules are specified
+ * as strings and converted to an array of ima_entry_rules
+ * on boot. After loading a custom policy, free the
+ * architecture specific rules stored as an array.
+ */
+ kfree(arch_policy_entry);
+ }
+ ima_update_policy_flags();
+
+ /* Custom IMA policy has been loaded */
+ ima_process_queued_keys();
+}
+
+/* Keep the enumeration in sync with the policy_tokens! */
+enum policy_opt {
+ Opt_measure, Opt_dont_measure,
+ Opt_appraise, Opt_dont_appraise,
+ Opt_audit, Opt_hash, Opt_dont_hash,
+ Opt_obj_user, Opt_obj_role, Opt_obj_type,
+ Opt_subj_user, Opt_subj_role, Opt_subj_type,
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname, Opt_fsuuid,
+ Opt_uid_eq, Opt_euid_eq, Opt_gid_eq, Opt_egid_eq,
+ Opt_fowner_eq, Opt_fgroup_eq,
+ Opt_uid_gt, Opt_euid_gt, Opt_gid_gt, Opt_egid_gt,
+ Opt_fowner_gt, Opt_fgroup_gt,
+ Opt_uid_lt, Opt_euid_lt, Opt_gid_lt, Opt_egid_lt,
+ Opt_fowner_lt, Opt_fgroup_lt,
+ Opt_digest_type,
+ Opt_appraise_type, Opt_appraise_flag, Opt_appraise_algos,
+ Opt_permit_directio, Opt_pcr, Opt_template, Opt_keyrings,
+ Opt_label, Opt_err
+};
+
+static const match_table_t policy_tokens = {
+ {Opt_measure, "measure"},
+ {Opt_dont_measure, "dont_measure"},
+ {Opt_appraise, "appraise"},
+ {Opt_dont_appraise, "dont_appraise"},
+ {Opt_audit, "audit"},
+ {Opt_hash, "hash"},
+ {Opt_dont_hash, "dont_hash"},
+ {Opt_obj_user, "obj_user=%s"},
+ {Opt_obj_role, "obj_role=%s"},
+ {Opt_obj_type, "obj_type=%s"},
+ {Opt_subj_user, "subj_user=%s"},
+ {Opt_subj_role, "subj_role=%s"},
+ {Opt_subj_type, "subj_type=%s"},
+ {Opt_func, "func=%s"},
+ {Opt_mask, "mask=%s"},
+ {Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsname, "fsname=%s"},
+ {Opt_fsuuid, "fsuuid=%s"},
+ {Opt_uid_eq, "uid=%s"},
+ {Opt_euid_eq, "euid=%s"},
+ {Opt_gid_eq, "gid=%s"},
+ {Opt_egid_eq, "egid=%s"},
+ {Opt_fowner_eq, "fowner=%s"},
+ {Opt_fgroup_eq, "fgroup=%s"},
+ {Opt_uid_gt, "uid>%s"},
+ {Opt_euid_gt, "euid>%s"},
+ {Opt_gid_gt, "gid>%s"},
+ {Opt_egid_gt, "egid>%s"},
+ {Opt_fowner_gt, "fowner>%s"},
+ {Opt_fgroup_gt, "fgroup>%s"},
+ {Opt_uid_lt, "uid<%s"},
+ {Opt_euid_lt, "euid<%s"},
+ {Opt_gid_lt, "gid<%s"},
+ {Opt_egid_lt, "egid<%s"},
+ {Opt_fowner_lt, "fowner<%s"},
+ {Opt_fgroup_lt, "fgroup<%s"},
+ {Opt_digest_type, "digest_type=%s"},
+ {Opt_appraise_type, "appraise_type=%s"},
+ {Opt_appraise_flag, "appraise_flag=%s"},
+ {Opt_appraise_algos, "appraise_algos=%s"},
+ {Opt_permit_directio, "permit_directio"},
+ {Opt_pcr, "pcr=%s"},
+ {Opt_template, "template=%s"},
+ {Opt_keyrings, "keyrings=%s"},
+ {Opt_label, "label=%s"},
+ {Opt_err, NULL}
+};
+
+static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ substring_t *args, int lsm_rule, int audit_type)
+{
+ int result;
+
+ if (entry->lsm[lsm_rule].rule)
+ return -EINVAL;
+
+ entry->lsm[lsm_rule].args_p = match_strdup(args);
+ if (!entry->lsm[lsm_rule].args_p)
+ return -ENOMEM;
+
+ entry->lsm[lsm_rule].type = audit_type;
+ result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+ entry->lsm[lsm_rule].args_p,
+ &entry->lsm[lsm_rule].rule);
+ if (!entry->lsm[lsm_rule].rule) {
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ entry->lsm[lsm_rule].args_p);
+
+ if (ima_rules == (struct list_head __rcu *)(&ima_default_rules)) {
+ kfree(entry->lsm[lsm_rule].args_p);
+ entry->lsm[lsm_rule].args_p = NULL;
+ result = -EINVAL;
+ } else
+ result = 0;
+ }
+
+ return result;
+}
+
+static void ima_log_string_op(struct audit_buffer *ab, char *key, char *value,
+ enum policy_opt rule_operator)
+{
+ if (!ab)
+ return;
+
+ switch (rule_operator) {
+ case Opt_uid_gt:
+ case Opt_euid_gt:
+ case Opt_gid_gt:
+ case Opt_egid_gt:
+ case Opt_fowner_gt:
+ case Opt_fgroup_gt:
+ audit_log_format(ab, "%s>", key);
+ break;
+ case Opt_uid_lt:
+ case Opt_euid_lt:
+ case Opt_gid_lt:
+ case Opt_egid_lt:
+ case Opt_fowner_lt:
+ case Opt_fgroup_lt:
+ audit_log_format(ab, "%s<", key);
+ break;
+ default:
+ audit_log_format(ab, "%s=", key);
+ }
+ audit_log_format(ab, "%s ", value);
+}
+static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+{
+ ima_log_string_op(ab, key, value, Opt_err);
+}
+
+/*
+ * Validating the appended signature included in the measurement list requires
+ * the file hash calculated without the appended signature (i.e., the 'd-modsig'
+ * field). Therefore, notify the user if they have the 'modsig' field but not
+ * the 'd-modsig' field in the template.
+ */
+static void check_template_modsig(const struct ima_template_desc *template)
+{
+#define MSG "template with 'modsig' field also needs 'd-modsig' field\n"
+ bool has_modsig, has_dmodsig;
+ static bool checked;
+ int i;
+
+ /* We only need to notify the user once. */
+ if (checked)
+ return;
+
+ has_modsig = has_dmodsig = false;
+ for (i = 0; i < template->num_fields; i++) {
+ if (!strcmp(template->fields[i]->field_id, "modsig"))
+ has_modsig = true;
+ else if (!strcmp(template->fields[i]->field_id, "d-modsig"))
+ has_dmodsig = true;
+ }
+
+ if (has_modsig && !has_dmodsig)
+ pr_notice(MSG);
+
+ checked = true;
+#undef MSG
+}
+
+/*
+ * Warn if the template does not contain the given field.
+ */
+static void check_template_field(const struct ima_template_desc *template,
+ const char *field, const char *msg)
+{
+ int i;
+
+ for (i = 0; i < template->num_fields; i++)
+ if (!strcmp(template->fields[i]->field_id, field))
+ return;
+
+ pr_notice_once("%s", msg);
+}
+
+static bool ima_validate_rule(struct ima_rule_entry *entry)
+{
+ /* Ensure that the action is set and is compatible with the flags */
+ if (entry->action == UNKNOWN)
+ return false;
+
+ if (entry->action != MEASURE && entry->flags & IMA_PCR)
+ return false;
+
+ if (entry->action != APPRAISE &&
+ entry->flags & (IMA_DIGSIG_REQUIRED | IMA_MODSIG_ALLOWED |
+ IMA_CHECK_BLACKLIST | IMA_VALIDATE_ALGOS))
+ return false;
+
+ /*
+ * The IMA_FUNC bit must be set if and only if there's a valid hook
+ * function specified, and vice versa. Enforcing this property allows
+ * for the NONE case below to validate a rule without an explicit hook
+ * function.
+ */
+ if (((entry->flags & IMA_FUNC) && entry->func == NONE) ||
+ (!(entry->flags & IMA_FUNC) && entry->func != NONE))
+ return false;
+
+ /*
+ * Ensure that the hook function is compatible with the other
+ * components of the rule
+ */
+ switch (entry->func) {
+ case NONE:
+ case FILE_CHECK:
+ case MMAP_CHECK:
+ case BPRM_CHECK:
+ case CREDS_CHECK:
+ case POST_SETATTR:
+ case FIRMWARE_CHECK:
+ case POLICY_CHECK:
+ if (entry->flags & ~(IMA_FUNC | IMA_MASK | IMA_FSMAGIC |
+ IMA_UID | IMA_FOWNER | IMA_FSUUID |
+ IMA_INMASK | IMA_EUID | IMA_PCR |
+ IMA_FSNAME | IMA_GID | IMA_EGID |
+ IMA_FGROUP | IMA_DIGSIG_REQUIRED |
+ IMA_PERMIT_DIRECTIO | IMA_VALIDATE_ALGOS |
+ IMA_VERITY_REQUIRED))
+ return false;
+
+ break;
+ case MODULE_CHECK:
+ case KEXEC_KERNEL_CHECK:
+ case KEXEC_INITRAMFS_CHECK:
+ if (entry->flags & ~(IMA_FUNC | IMA_MASK | IMA_FSMAGIC |
+ IMA_UID | IMA_FOWNER | IMA_FSUUID |
+ IMA_INMASK | IMA_EUID | IMA_PCR |
+ IMA_FSNAME | IMA_GID | IMA_EGID |
+ IMA_FGROUP | IMA_DIGSIG_REQUIRED |
+ IMA_PERMIT_DIRECTIO | IMA_MODSIG_ALLOWED |
+ IMA_CHECK_BLACKLIST | IMA_VALIDATE_ALGOS))
+ return false;
+
+ break;
+ case KEXEC_CMDLINE:
+ if (entry->action & ~(MEASURE | DONT_MEASURE))
+ return false;
+
+ if (entry->flags & ~(IMA_FUNC | IMA_FSMAGIC | IMA_UID |
+ IMA_FOWNER | IMA_FSUUID | IMA_EUID |
+ IMA_PCR | IMA_FSNAME | IMA_GID | IMA_EGID |
+ IMA_FGROUP))
+ return false;
+
+ break;
+ case KEY_CHECK:
+ if (entry->action & ~(MEASURE | DONT_MEASURE))
+ return false;
+
+ if (entry->flags & ~(IMA_FUNC | IMA_UID | IMA_GID | IMA_PCR |
+ IMA_KEYRINGS))
+ return false;
+
+ if (ima_rule_contains_lsm_cond(entry))
+ return false;
+
+ break;
+ case CRITICAL_DATA:
+ if (entry->action & ~(MEASURE | DONT_MEASURE))
+ return false;
+
+ if (entry->flags & ~(IMA_FUNC | IMA_UID | IMA_GID | IMA_PCR |
+ IMA_LABEL))
+ return false;
+
+ if (ima_rule_contains_lsm_cond(entry))
+ return false;
+
+ break;
+ case SETXATTR_CHECK:
+ /* any action other than APPRAISE is unsupported */
+ if (entry->action != APPRAISE)
+ return false;
+
+ /* SETXATTR_CHECK requires an appraise_algos parameter */
+ if (!(entry->flags & IMA_VALIDATE_ALGOS))
+ return false;
+
+ /*
+ * full policies are not supported, they would have too
+ * much of a performance impact
+ */
+ if (entry->flags & ~(IMA_FUNC | IMA_VALIDATE_ALGOS))
+ return false;
+
+ break;
+ default:
+ return false;
+ }
+
+ /* Ensure that combinations of flags are compatible with each other */
+ if (entry->flags & IMA_CHECK_BLACKLIST &&
+ !(entry->flags & IMA_MODSIG_ALLOWED))
+ return false;
+
+ /*
+ * Unlike for regular IMA 'appraise' policy rules where security.ima
+ * xattr may contain either a file hash or signature, the security.ima
+ * xattr for fsverity must contain a file signature (sigv3). Ensure
+ * that 'appraise' rules for fsverity require file signatures by
+ * checking the IMA_DIGSIG_REQUIRED flag is set.
+ */
+ if (entry->action == APPRAISE &&
+ (entry->flags & IMA_VERITY_REQUIRED) &&
+ !(entry->flags & IMA_DIGSIG_REQUIRED))
+ return false;
+
+ return true;
+}
+
+static unsigned int ima_parse_appraise_algos(char *arg)
+{
+ unsigned int res = 0;
+ int idx;
+ char *token;
+
+ while ((token = strsep(&arg, ",")) != NULL) {
+ idx = match_string(hash_algo_name, HASH_ALGO__LAST, token);
+
+ if (idx < 0) {
+ pr_err("unknown hash algorithm \"%s\"",
+ token);
+ return 0;
+ }
+
+ if (!crypto_has_alg(hash_algo_name[idx], 0, 0)) {
+ pr_err("unavailable hash algorithm \"%s\", check your kernel configuration",
+ token);
+ return 0;
+ }
+
+ /* Add the hash algorithm to the 'allowed' bitfield */
+ res |= (1U << idx);
+ }
+
+ return res;
+}
+
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+{
+ struct audit_buffer *ab;
+ char *from;
+ char *p;
+ bool eid_token; /* either euid or egid */
+ struct ima_template_desc *template_desc;
+ int result = 0;
+
+ ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_POLICY_RULE);
+
+ entry->uid = INVALID_UID;
+ entry->gid = INVALID_GID;
+ entry->fowner = INVALID_UID;
+ entry->fgroup = INVALID_GID;
+ entry->uid_op = &uid_eq;
+ entry->gid_op = &gid_eq;
+ entry->fowner_op = &uid_eq;
+ entry->fgroup_op = &gid_eq;
+ entry->action = UNKNOWN;
+ while ((p = strsep(&rule, " \t")) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ int token;
+ unsigned long lnum;
+
+ if (result < 0)
+ break;
+ if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
+ continue;
+ token = match_token(p, policy_tokens, args);
+ switch (token) {
+ case Opt_measure:
+ ima_log_string(ab, "action", "measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = MEASURE;
+ break;
+ case Opt_dont_measure:
+ ima_log_string(ab, "action", "dont_measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_MEASURE;
+ break;
+ case Opt_appraise:
+ ima_log_string(ab, "action", "appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = APPRAISE;
+ break;
+ case Opt_dont_appraise:
+ ima_log_string(ab, "action", "dont_appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_APPRAISE;
+ break;
+ case Opt_audit:
+ ima_log_string(ab, "action", "audit");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = AUDIT;
+ break;
+ case Opt_hash:
+ ima_log_string(ab, "action", "hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = HASH;
+ break;
+ case Opt_dont_hash:
+ ima_log_string(ab, "action", "dont_hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_HASH;
+ break;
+ case Opt_func:
+ ima_log_string(ab, "func", args[0].from);
+
+ if (entry->func)
+ result = -EINVAL;
+
+ if (strcmp(args[0].from, "FILE_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ /* PATH_CHECK is for backwards compat */
+ else if (strcmp(args[0].from, "PATH_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
+ entry->func = MODULE_CHECK;
+ else if (strcmp(args[0].from, "FIRMWARE_CHECK") == 0)
+ entry->func = FIRMWARE_CHECK;
+ else if ((strcmp(args[0].from, "FILE_MMAP") == 0)
+ || (strcmp(args[0].from, "MMAP_CHECK") == 0))
+ entry->func = MMAP_CHECK;
+ else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
+ entry->func = BPRM_CHECK;
+ else if (strcmp(args[0].from, "CREDS_CHECK") == 0)
+ entry->func = CREDS_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_KERNEL_CHECK") ==
+ 0)
+ entry->func = KEXEC_KERNEL_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_INITRAMFS_CHECK")
+ == 0)
+ entry->func = KEXEC_INITRAMFS_CHECK;
+ else if (strcmp(args[0].from, "POLICY_CHECK") == 0)
+ entry->func = POLICY_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
+ entry->func = KEXEC_CMDLINE;
+ else if (IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) &&
+ strcmp(args[0].from, "KEY_CHECK") == 0)
+ entry->func = KEY_CHECK;
+ else if (strcmp(args[0].from, "CRITICAL_DATA") == 0)
+ entry->func = CRITICAL_DATA;
+ else if (strcmp(args[0].from, "SETXATTR_CHECK") == 0)
+ entry->func = SETXATTR_CHECK;
+ else
+ result = -EINVAL;
+ if (!result)
+ entry->flags |= IMA_FUNC;
+ break;
+ case Opt_mask:
+ ima_log_string(ab, "mask", args[0].from);
+
+ if (entry->mask)
+ result = -EINVAL;
+
+ from = args[0].from;
+ if (*from == '^')
+ from++;
+
+ if ((strcmp(from, "MAY_EXEC")) == 0)
+ entry->mask = MAY_EXEC;
+ else if (strcmp(from, "MAY_WRITE") == 0)
+ entry->mask = MAY_WRITE;
+ else if (strcmp(from, "MAY_READ") == 0)
+ entry->mask = MAY_READ;
+ else if (strcmp(from, "MAY_APPEND") == 0)
+ entry->mask = MAY_APPEND;
+ else
+ result = -EINVAL;
+ if (!result)
+ entry->flags |= (*args[0].from == '^')
+ ? IMA_INMASK : IMA_MASK;
+ break;
+ case Opt_fsmagic:
+ ima_log_string(ab, "fsmagic", args[0].from);
+
+ if (entry->fsmagic) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 16, &entry->fsmagic);
+ if (!result)
+ entry->flags |= IMA_FSMAGIC;
+ break;
+ case Opt_fsname:
+ ima_log_string(ab, "fsname", args[0].from);
+
+ entry->fsname = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->fsname) {
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_FSNAME;
+ break;
+ case Opt_keyrings:
+ ima_log_string(ab, "keyrings", args[0].from);
+
+ if (!IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) ||
+ entry->keyrings) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->keyrings = ima_alloc_rule_opt_list(args);
+ if (IS_ERR(entry->keyrings)) {
+ result = PTR_ERR(entry->keyrings);
+ entry->keyrings = NULL;
+ break;
+ }
+
+ entry->flags |= IMA_KEYRINGS;
+ break;
+ case Opt_label:
+ ima_log_string(ab, "label", args[0].from);
+
+ if (entry->label) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->label = ima_alloc_rule_opt_list(args);
+ if (IS_ERR(entry->label)) {
+ result = PTR_ERR(entry->label);
+ entry->label = NULL;
+ break;
+ }
+
+ entry->flags |= IMA_LABEL;
+ break;
+ case Opt_fsuuid:
+ ima_log_string(ab, "fsuuid", args[0].from);
+
+ if (!uuid_is_null(&entry->fsuuid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = uuid_parse(args[0].from, &entry->fsuuid);
+ if (!result)
+ entry->flags |= IMA_FSUUID;
+ break;
+ case Opt_uid_gt:
+ case Opt_euid_gt:
+ entry->uid_op = &uid_gt;
+ fallthrough;
+ case Opt_uid_lt:
+ case Opt_euid_lt:
+ if ((token == Opt_uid_lt) || (token == Opt_euid_lt))
+ entry->uid_op = &uid_lt;
+ fallthrough;
+ case Opt_uid_eq:
+ case Opt_euid_eq:
+ eid_token = (token == Opt_euid_eq) ||
+ (token == Opt_euid_gt) ||
+ (token == Opt_euid_lt);
+
+ ima_log_string_op(ab, eid_token ? "euid" : "uid",
+ args[0].from, token);
+
+ if (uid_valid(entry->uid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->uid = make_kuid(current_user_ns(),
+ (uid_t) lnum);
+ if (!uid_valid(entry->uid) ||
+ (uid_t)lnum != lnum)
+ result = -EINVAL;
+ else
+ entry->flags |= eid_token
+ ? IMA_EUID : IMA_UID;
+ }
+ break;
+ case Opt_gid_gt:
+ case Opt_egid_gt:
+ entry->gid_op = &gid_gt;
+ fallthrough;
+ case Opt_gid_lt:
+ case Opt_egid_lt:
+ if ((token == Opt_gid_lt) || (token == Opt_egid_lt))
+ entry->gid_op = &gid_lt;
+ fallthrough;
+ case Opt_gid_eq:
+ case Opt_egid_eq:
+ eid_token = (token == Opt_egid_eq) ||
+ (token == Opt_egid_gt) ||
+ (token == Opt_egid_lt);
+
+ ima_log_string_op(ab, eid_token ? "egid" : "gid",
+ args[0].from, token);
+
+ if (gid_valid(entry->gid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->gid = make_kgid(current_user_ns(),
+ (gid_t)lnum);
+ if (!gid_valid(entry->gid) ||
+ (((gid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= eid_token
+ ? IMA_EGID : IMA_GID;
+ }
+ break;
+ case Opt_fowner_gt:
+ entry->fowner_op = &uid_gt;
+ fallthrough;
+ case Opt_fowner_lt:
+ if (token == Opt_fowner_lt)
+ entry->fowner_op = &uid_lt;
+ fallthrough;
+ case Opt_fowner_eq:
+ ima_log_string_op(ab, "fowner", args[0].from, token);
+
+ if (uid_valid(entry->fowner)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->fowner = make_kuid(current_user_ns(),
+ (uid_t)lnum);
+ if (!uid_valid(entry->fowner) ||
+ (((uid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_FOWNER;
+ }
+ break;
+ case Opt_fgroup_gt:
+ entry->fgroup_op = &gid_gt;
+ fallthrough;
+ case Opt_fgroup_lt:
+ if (token == Opt_fgroup_lt)
+ entry->fgroup_op = &gid_lt;
+ fallthrough;
+ case Opt_fgroup_eq:
+ ima_log_string_op(ab, "fgroup", args[0].from, token);
+
+ if (gid_valid(entry->fgroup)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->fgroup = make_kgid(current_user_ns(),
+ (gid_t)lnum);
+ if (!gid_valid(entry->fgroup) ||
+ (((gid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_FGROUP;
+ }
+ break;
+ case Opt_obj_user:
+ ima_log_string(ab, "obj_user", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_OBJ_USER,
+ AUDIT_OBJ_USER);
+ break;
+ case Opt_obj_role:
+ ima_log_string(ab, "obj_role", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_OBJ_ROLE,
+ AUDIT_OBJ_ROLE);
+ break;
+ case Opt_obj_type:
+ ima_log_string(ab, "obj_type", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_OBJ_TYPE,
+ AUDIT_OBJ_TYPE);
+ break;
+ case Opt_subj_user:
+ ima_log_string(ab, "subj_user", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_SUBJ_USER,
+ AUDIT_SUBJ_USER);
+ break;
+ case Opt_subj_role:
+ ima_log_string(ab, "subj_role", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_SUBJ_ROLE,
+ AUDIT_SUBJ_ROLE);
+ break;
+ case Opt_subj_type:
+ ima_log_string(ab, "subj_type", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_SUBJ_TYPE,
+ AUDIT_SUBJ_TYPE);
+ break;
+ case Opt_digest_type:
+ ima_log_string(ab, "digest_type", args[0].from);
+ if (entry->flags & IMA_DIGSIG_REQUIRED)
+ result = -EINVAL;
+ else if ((strcmp(args[0].from, "verity")) == 0)
+ entry->flags |= IMA_VERITY_REQUIRED;
+ else
+ result = -EINVAL;
+ break;
+ case Opt_appraise_type:
+ ima_log_string(ab, "appraise_type", args[0].from);
+
+ if ((strcmp(args[0].from, "imasig")) == 0) {
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_DIGSIG_REQUIRED;
+ } else if (strcmp(args[0].from, "sigv3") == 0) {
+ /* Only fsverity supports sigv3 for now */
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ entry->flags |= IMA_DIGSIG_REQUIRED;
+ else
+ result = -EINVAL;
+ } else if (IS_ENABLED(CONFIG_IMA_APPRAISE_MODSIG) &&
+ strcmp(args[0].from, "imasig|modsig") == 0) {
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_DIGSIG_REQUIRED |
+ IMA_MODSIG_ALLOWED;
+ } else {
+ result = -EINVAL;
+ }
+ break;
+ case Opt_appraise_flag:
+ ima_log_string(ab, "appraise_flag", args[0].from);
+ if (IS_ENABLED(CONFIG_IMA_APPRAISE_MODSIG) &&
+ strstr(args[0].from, "blacklist"))
+ entry->flags |= IMA_CHECK_BLACKLIST;
+ else
+ result = -EINVAL;
+ break;
+ case Opt_appraise_algos:
+ ima_log_string(ab, "appraise_algos", args[0].from);
+
+ if (entry->allowed_algos) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->allowed_algos =
+ ima_parse_appraise_algos(args[0].from);
+ /* invalid or empty list of algorithms */
+ if (!entry->allowed_algos) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->flags |= IMA_VALIDATE_ALGOS;
+
+ break;
+ case Opt_permit_directio:
+ entry->flags |= IMA_PERMIT_DIRECTIO;
+ break;
+ case Opt_pcr:
+ ima_log_string(ab, "pcr", args[0].from);
+
+ result = kstrtoint(args[0].from, 10, &entry->pcr);
+ if (result || INVALID_PCR(entry->pcr))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_PCR;
+
+ break;
+ case Opt_template:
+ ima_log_string(ab, "template", args[0].from);
+ if (entry->action != MEASURE) {
+ result = -EINVAL;
+ break;
+ }
+ template_desc = lookup_template_desc(args[0].from);
+ if (!template_desc || entry->template) {
+ result = -EINVAL;
+ break;
+ }
+
+ /*
+ * template_desc_init_fields() does nothing if
+ * the template is already initialised, so
+ * it's safe to do this unconditionally
+ */
+ template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ entry->template = template_desc;
+ break;
+ case Opt_err:
+ ima_log_string(ab, "UNKNOWN", p);
+ result = -EINVAL;
+ break;
+ }
+ }
+ if (!result && !ima_validate_rule(entry))
+ result = -EINVAL;
+ else if (entry->action == APPRAISE)
+ temp_ima_appraise |= ima_appraise_flag(entry->func);
+
+ if (!result && entry->flags & IMA_MODSIG_ALLOWED) {
+ template_desc = entry->template ? entry->template :
+ ima_template_desc_current();
+ check_template_modsig(template_desc);
+ }
+
+ /* d-ngv2 template field recommended for unsigned fs-verity digests */
+ if (!result && entry->action == MEASURE &&
+ entry->flags & IMA_VERITY_REQUIRED) {
+ template_desc = entry->template ? entry->template :
+ ima_template_desc_current();
+ check_template_field(template_desc, "d-ngv2",
+ "verity rules should include d-ngv2");
+ }
+
+ audit_log_format(ab, "res=%d", !result);
+ audit_log_end(ab);
+ return result;
+}
+
+/**
+ * ima_parse_add_rule - add a rule to ima_policy_rules
+ * @rule: ima measurement policy rule
+ *
+ * Avoid locking by allowing just one writer at a time in ima_write_policy()
+ * Returns the length of the rule parsed, an error code on failure
+ */
+ssize_t ima_parse_add_rule(char *rule)
+{
+ static const char op[] = "update_policy";
+ char *p;
+ struct ima_rule_entry *entry;
+ ssize_t result, len;
+ int audit_info = 0;
+
+ p = strsep(&rule, "\n");
+ len = strlen(p) + 1;
+ p += strspn(p, " \t");
+
+ if (*p == '#' || *p == '\0')
+ return len;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
+ NULL, op, "-ENOMEM", -ENOMEM, audit_info);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&entry->list);
+
+ result = ima_parse_rule(p, entry);
+ if (result) {
+ ima_free_rule(entry);
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
+ NULL, op, "invalid-policy", result,
+ audit_info);
+ return result;
+ }
+
+ list_add_tail(&entry->list, &ima_temp_rules);
+
+ return len;
+}
+
+/**
+ * ima_delete_rules() called to cleanup invalid in-flight policy.
+ * We don't need locking as we operate on the temp list, which is
+ * different from the active one. There is also only one user of
+ * ima_delete_rules() at a time.
+ */
+void ima_delete_rules(void)
+{
+ struct ima_rule_entry *entry, *tmp;
+
+ temp_ima_appraise = 0;
+ list_for_each_entry_safe(entry, tmp, &ima_temp_rules, list) {
+ list_del(&entry->list);
+ ima_free_rule(entry);
+ }
+}
+
+#define __ima_hook_stringify(func, str) (#func),
+
+const char *const func_tokens[] = {
+ __ima_hooks(__ima_hook_stringify)
+};
+
+#ifdef CONFIG_IMA_READ_POLICY
+enum {
+ mask_exec = 0, mask_write, mask_read, mask_append
+};
+
+static const char *const mask_tokens[] = {
+ "^MAY_EXEC",
+ "^MAY_WRITE",
+ "^MAY_READ",
+ "^MAY_APPEND"
+};
+
+void *ima_policy_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t l = *pos;
+ struct ima_rule_entry *entry;
+ struct list_head *ima_rules_tmp;
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (!l--) {
+ rcu_read_unlock();
+ return entry;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ima_rule_entry *entry = v;
+
+ rcu_read_lock();
+ entry = list_entry_rcu(entry->list.next, struct ima_rule_entry, list);
+ rcu_read_unlock();
+ (*pos)++;
+
+ return (&entry->list == &ima_default_rules ||
+ &entry->list == &ima_policy_rules) ? NULL : entry;
+}
+
+void ima_policy_stop(struct seq_file *m, void *v)
+{
+}
+
+#define pt(token) policy_tokens[token].pattern
+#define mt(token) mask_tokens[token]
+
+/*
+ * policy_func_show - display the ima_hooks policy rule
+ */
+static void policy_func_show(struct seq_file *m, enum ima_hooks func)
+{
+ if (func > 0 && func < MAX_CHECK)
+ seq_printf(m, "func=%s ", func_tokens[func]);
+ else
+ seq_printf(m, "func=%d ", func);
+}
+
+static void ima_show_rule_opt_list(struct seq_file *m,
+ const struct ima_rule_opt_list *opt_list)
+{
+ size_t i;
+
+ for (i = 0; i < opt_list->count; i++)
+ seq_printf(m, "%s%s", i ? "|" : "", opt_list->items[i]);
+}
+
+static void ima_policy_show_appraise_algos(struct seq_file *m,
+ unsigned int allowed_hashes)
+{
+ int idx, list_size = 0;
+
+ for (idx = 0; idx < HASH_ALGO__LAST; idx++) {
+ if (!(allowed_hashes & (1U << idx)))
+ continue;
+
+ /* only add commas if the list contains multiple entries */
+ if (list_size++)
+ seq_puts(m, ",");
+
+ seq_puts(m, hash_algo_name[idx]);
+ }
+}
+
+int ima_policy_show(struct seq_file *m, void *v)
+{
+ struct ima_rule_entry *entry = v;
+ int i;
+ char tbuf[64] = {0,};
+ int offset = 0;
+
+ rcu_read_lock();
+
+ /* Do not print rules with inactive LSM labels */
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].args_p && !entry->lsm[i].rule) {
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+
+ if (entry->action & MEASURE)
+ seq_puts(m, pt(Opt_measure));
+ if (entry->action & DONT_MEASURE)
+ seq_puts(m, pt(Opt_dont_measure));
+ if (entry->action & APPRAISE)
+ seq_puts(m, pt(Opt_appraise));
+ if (entry->action & DONT_APPRAISE)
+ seq_puts(m, pt(Opt_dont_appraise));
+ if (entry->action & AUDIT)
+ seq_puts(m, pt(Opt_audit));
+ if (entry->action & HASH)
+ seq_puts(m, pt(Opt_hash));
+ if (entry->action & DONT_HASH)
+ seq_puts(m, pt(Opt_dont_hash));
+
+ seq_puts(m, " ");
+
+ if (entry->flags & IMA_FUNC)
+ policy_func_show(m, entry->func);
+
+ if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) {
+ if (entry->flags & IMA_MASK)
+ offset = 1;
+ if (entry->mask & MAY_EXEC)
+ seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset);
+ if (entry->mask & MAY_WRITE)
+ seq_printf(m, pt(Opt_mask), mt(mask_write) + offset);
+ if (entry->mask & MAY_READ)
+ seq_printf(m, pt(Opt_mask), mt(mask_read) + offset);
+ if (entry->mask & MAY_APPEND)
+ seq_printf(m, pt(Opt_mask), mt(mask_append) + offset);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSMAGIC) {
+ snprintf(tbuf, sizeof(tbuf), "0x%lx", entry->fsmagic);
+ seq_printf(m, pt(Opt_fsmagic), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSNAME) {
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->fsname);
+ seq_printf(m, pt(Opt_fsname), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_KEYRINGS) {
+ seq_puts(m, "keyrings=");
+ ima_show_rule_opt_list(m, entry->keyrings);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_LABEL) {
+ seq_puts(m, "label=");
+ ima_show_rule_opt_list(m, entry->label);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_PCR) {
+ snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr);
+ seq_printf(m, pt(Opt_pcr), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSUUID) {
+ seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_UID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
+ if (entry->uid_op == &uid_gt)
+ seq_printf(m, pt(Opt_uid_gt), tbuf);
+ else if (entry->uid_op == &uid_lt)
+ seq_printf(m, pt(Opt_uid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_uid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_EUID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
+ if (entry->uid_op == &uid_gt)
+ seq_printf(m, pt(Opt_euid_gt), tbuf);
+ else if (entry->uid_op == &uid_lt)
+ seq_printf(m, pt(Opt_euid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_euid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_GID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kgid_val(entry->gid));
+ if (entry->gid_op == &gid_gt)
+ seq_printf(m, pt(Opt_gid_gt), tbuf);
+ else if (entry->gid_op == &gid_lt)
+ seq_printf(m, pt(Opt_gid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_gid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_EGID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kgid_val(entry->gid));
+ if (entry->gid_op == &gid_gt)
+ seq_printf(m, pt(Opt_egid_gt), tbuf);
+ else if (entry->gid_op == &gid_lt)
+ seq_printf(m, pt(Opt_egid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_egid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FOWNER) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->fowner));
+ if (entry->fowner_op == &uid_gt)
+ seq_printf(m, pt(Opt_fowner_gt), tbuf);
+ else if (entry->fowner_op == &uid_lt)
+ seq_printf(m, pt(Opt_fowner_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_fowner_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FGROUP) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kgid_val(entry->fgroup));
+ if (entry->fgroup_op == &gid_gt)
+ seq_printf(m, pt(Opt_fgroup_gt), tbuf);
+ else if (entry->fgroup_op == &gid_lt)
+ seq_printf(m, pt(Opt_fgroup_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_fgroup_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_VALIDATE_ALGOS) {
+ seq_puts(m, "appraise_algos=");
+ ima_policy_show_appraise_algos(m, entry->allowed_algos);
+ seq_puts(m, " ");
+ }
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].rule) {
+ switch (i) {
+ case LSM_OBJ_USER:
+ seq_printf(m, pt(Opt_obj_user),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_OBJ_ROLE:
+ seq_printf(m, pt(Opt_obj_role),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_OBJ_TYPE:
+ seq_printf(m, pt(Opt_obj_type),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_USER:
+ seq_printf(m, pt(Opt_subj_user),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_ROLE:
+ seq_printf(m, pt(Opt_subj_role),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_TYPE:
+ seq_printf(m, pt(Opt_subj_type),
+ entry->lsm[i].args_p);
+ break;
+ }
+ seq_puts(m, " ");
+ }
+ }
+ if (entry->template)
+ seq_printf(m, "template=%s ", entry->template->name);
+ if (entry->flags & IMA_DIGSIG_REQUIRED) {
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ seq_puts(m, "appraise_type=sigv3 ");
+ else if (entry->flags & IMA_MODSIG_ALLOWED)
+ seq_puts(m, "appraise_type=imasig|modsig ");
+ else
+ seq_puts(m, "appraise_type=imasig ");
+ }
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ seq_puts(m, "digest_type=verity ");
+ if (entry->flags & IMA_CHECK_BLACKLIST)
+ seq_puts(m, "appraise_flag=check_blacklist ");
+ if (entry->flags & IMA_PERMIT_DIRECTIO)
+ seq_puts(m, "permit_directio ");
+ rcu_read_unlock();
+ seq_puts(m, "\n");
+ return 0;
+}
+#endif /* CONFIG_IMA_READ_POLICY */
+
+#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
+/*
+ * ima_appraise_signature: whether IMA will appraise a given function using
+ * an IMA digital signature. This is restricted to cases where the kernel
+ * has a set of built-in trusted keys in order to avoid an attacker simply
+ * loading additional keys.
+ */
+bool ima_appraise_signature(enum kernel_read_file_id id)
+{
+ struct ima_rule_entry *entry;
+ bool found = false;
+ enum ima_hooks func;
+ struct list_head *ima_rules_tmp;
+
+ if (id >= READING_MAX_ID)
+ return false;
+
+ if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE)
+ && security_locked_down(LOCKDOWN_KEXEC))
+ return false;
+
+ func = read_idmap[id] ?: FILE_CHECK;
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (entry->action != APPRAISE)
+ continue;
+
+ /*
+ * A generic entry will match, but otherwise require that it
+ * match the func we're looking for
+ */
+ if (entry->func && entry->func != func)
+ continue;
+
+ /*
+ * We require this to be a digital signature, not a raw IMA
+ * hash.
+ */
+ if (entry->flags & IMA_DIGSIG_REQUIRED)
+ found = true;
+
+ /*
+ * We've found a rule that matches, so break now even if it
+ * didn't require a digital signature - a later rule that does
+ * won't override it, so would be a false positive.
+ */
+ break;
+ }
+
+ rcu_read_unlock();
+ return found;
+}
+#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
new file mode 100644
index 000000000..532da87ce
--- /dev/null
+++ b/security/integrity/ima/ima_queue.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Serge Hallyn <serue@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima_queue.c
+ * Implements queues that store template measurements and
+ * maintains aggregate over the stored measurements
+ * in the pre-configured TPM PCR (if available).
+ * The measurement list is append-only. No entry is
+ * ever removed or changed during the boot-cycle.
+ */
+
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include "ima.h"
+
+#define AUDIT_CAUSE_LEN_MAX 32
+
+/* pre-allocated array of tpm_digest structures to extend a PCR */
+static struct tpm_digest *digests;
+
+LIST_HEAD(ima_measurements); /* list of all measurements */
+#ifdef CONFIG_IMA_KEXEC
+static unsigned long binary_runtime_size;
+#else
+static unsigned long binary_runtime_size = ULONG_MAX;
+#endif
+
+/* key: inode (before secure-hashing a file) */
+struct ima_h_table ima_htable = {
+ .len = ATOMIC_LONG_INIT(0),
+ .violations = ATOMIC_LONG_INIT(0),
+ .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
+};
+
+/* mutex protects atomicity of extending measurement list
+ * and extending the TPM PCR aggregate. Since tpm_extend can take
+ * long (and the tpm driver uses a mutex), we can't use the spinlock.
+ */
+static DEFINE_MUTEX(ima_extend_list_mutex);
+
+/* lookup up the digest value in the hash table, and return the entry */
+static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
+ int pcr)
+{
+ struct ima_queue_entry *qe, *ret = NULL;
+ unsigned int key;
+ int rc;
+
+ key = ima_hash_key(digest_value);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
+ rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
+ digest_value, hash_digest_size[ima_hash_algo]);
+ if ((rc == 0) && (qe->entry->pcr == pcr)) {
+ ret = qe;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Calculate the memory required for serializing a single
+ * binary_runtime_measurement list entry, which contains a
+ * couple of variable length fields (e.g template name and data).
+ */
+static int get_binary_runtime_size(struct ima_template_entry *entry)
+{
+ int size = 0;
+
+ size += sizeof(u32); /* pcr */
+ size += TPM_DIGEST_SIZE;
+ size += sizeof(int); /* template name size field */
+ size += strlen(entry->template_desc->name);
+ size += sizeof(entry->template_data_len);
+ size += entry->template_data_len;
+ return size;
+}
+
+/* ima_add_template_entry helper function:
+ * - Add template entry to the measurement list and hash table, for
+ * all entries except those carried across kexec.
+ *
+ * (Called with ima_extend_list_mutex held.)
+ */
+static int ima_add_digest_entry(struct ima_template_entry *entry,
+ bool update_htable)
+{
+ struct ima_queue_entry *qe;
+ unsigned int key;
+
+ qe = kmalloc(sizeof(*qe), GFP_KERNEL);
+ if (qe == NULL) {
+ pr_err("OUT OF MEMORY ERROR creating queue entry\n");
+ return -ENOMEM;
+ }
+ qe->entry = entry;
+
+ INIT_LIST_HEAD(&qe->later);
+ list_add_tail_rcu(&qe->later, &ima_measurements);
+
+ atomic_long_inc(&ima_htable.len);
+ if (update_htable) {
+ key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ }
+
+ if (binary_runtime_size != ULONG_MAX) {
+ int size;
+
+ size = get_binary_runtime_size(entry);
+ binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
+ binary_runtime_size + size : ULONG_MAX;
+ }
+ return 0;
+}
+
+/*
+ * Return the amount of memory required for serializing the
+ * entire binary_runtime_measurement list, including the ima_kexec_hdr
+ * structure.
+ */
+unsigned long ima_get_binary_runtime_size(void)
+{
+ if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+ return ULONG_MAX;
+ else
+ return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+}
+
+static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
+{
+ int result = 0;
+
+ if (!ima_tpm_chip)
+ return result;
+
+ result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg);
+ if (result != 0)
+ pr_err("Error Communicating to TPM chip, result: %d\n", result);
+ return result;
+}
+
+/*
+ * Add template entry to the measurement list and hash table, and
+ * extend the pcr.
+ *
+ * On systems which support carrying the IMA measurement list across
+ * kexec, maintain the total memory size required for serializing the
+ * binary_runtime_measurements.
+ */
+int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
+{
+ u8 *digest = entry->digests[ima_hash_algo_idx].digest;
+ struct tpm_digest *digests_arg = entry->digests;
+ const char *audit_cause = "hash_added";
+ char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
+ int audit_info = 1;
+ int result = 0, tpmresult = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
+ if (ima_lookup_digest_entry(digest, entry->pcr)) {
+ audit_cause = "hash_exists";
+ result = -EEXIST;
+ goto out;
+ }
+ }
+
+ result = ima_add_digest_entry(entry,
+ !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE));
+ if (result < 0) {
+ audit_cause = "ENOMEM";
+ audit_info = 0;
+ goto out;
+ }
+
+ if (violation) /* invalidate pcr */
+ digests_arg = digests;
+
+ tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
+ if (tpmresult != 0) {
+ snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
+ tpmresult);
+ audit_cause = tpm_audit_cause;
+ audit_info = 0;
+ }
+out:
+ mutex_unlock(&ima_extend_list_mutex);
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+ op, audit_cause, result, audit_info);
+ return result;
+}
+
+int ima_restore_measurement_entry(struct ima_template_entry *entry)
+{
+ int result = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ result = ima_add_digest_entry(entry, 0);
+ mutex_unlock(&ima_extend_list_mutex);
+ return result;
+}
+
+int __init ima_init_digests(void)
+{
+ u16 digest_size;
+ u16 crypto_id;
+ int i;
+
+ if (!ima_tpm_chip)
+ return 0;
+
+ digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
+ GFP_NOFS);
+ if (!digests)
+ return -ENOMEM;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (crypto_id == HASH_ALGO__LAST)
+ digest_size = SHA1_DIGEST_SIZE;
+
+ memset(digests[i].digest, 0xff, digest_size);
+ }
+
+ return 0;
+}
diff --git a/security/integrity/ima/ima_queue_keys.c b/security/integrity/ima/ima_queue_keys.c
new file mode 100644
index 000000000..93056c03b
--- /dev/null
+++ b/security/integrity/ima/ima_queue_keys.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * File: ima_queue_keys.c
+ * Enables deferred processing of keys
+ */
+
+#include <linux/user_namespace.h>
+#include <linux/workqueue.h>
+#include <keys/asymmetric-type.h>
+#include "ima.h"
+
+/*
+ * Flag to indicate whether a key can be processed
+ * right away or should be queued for processing later.
+ */
+static bool ima_process_keys;
+
+/*
+ * To synchronize access to the list of keys that need to be measured
+ */
+static DEFINE_MUTEX(ima_keys_lock);
+static LIST_HEAD(ima_keys);
+
+/*
+ * If custom IMA policy is not loaded then keys queued up
+ * for measurement should be freed. This worker is used
+ * for handling this scenario.
+ */
+static long ima_key_queue_timeout = 300000; /* 5 Minutes */
+static void ima_keys_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ima_keys_delayed_work, ima_keys_handler);
+static bool timer_expired;
+
+/*
+ * This worker function frees keys that may still be
+ * queued up in case custom IMA policy was not loaded.
+ */
+static void ima_keys_handler(struct work_struct *work)
+{
+ timer_expired = true;
+ ima_process_queued_keys();
+}
+
+/*
+ * This function sets up a worker to free queued keys in case
+ * custom IMA policy was never loaded.
+ */
+void ima_init_key_queue(void)
+{
+ schedule_delayed_work(&ima_keys_delayed_work,
+ msecs_to_jiffies(ima_key_queue_timeout));
+}
+
+static void ima_free_key_entry(struct ima_key_entry *entry)
+{
+ if (entry) {
+ kfree(entry->payload);
+ kfree(entry->keyring_name);
+ kfree(entry);
+ }
+}
+
+static struct ima_key_entry *ima_alloc_key_entry(struct key *keyring,
+ const void *payload,
+ size_t payload_len)
+{
+ int rc = 0;
+ const char *audit_cause = "ENOMEM";
+ struct ima_key_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->payload = kmemdup(payload, payload_len, GFP_KERNEL);
+ entry->keyring_name = kstrdup(keyring->description,
+ GFP_KERNEL);
+ entry->payload_len = payload_len;
+ }
+
+ if ((entry == NULL) || (entry->payload == NULL) ||
+ (entry->keyring_name == NULL)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&entry->list);
+
+out:
+ if (rc) {
+ integrity_audit_message(AUDIT_INTEGRITY_PCR, NULL,
+ keyring->description,
+ func_measure_str(KEY_CHECK),
+ audit_cause, rc, 0, rc);
+ ima_free_key_entry(entry);
+ entry = NULL;
+ }
+
+ return entry;
+}
+
+bool ima_queue_key(struct key *keyring, const void *payload,
+ size_t payload_len)
+{
+ bool queued = false;
+ struct ima_key_entry *entry;
+
+ entry = ima_alloc_key_entry(keyring, payload, payload_len);
+ if (!entry)
+ return false;
+
+ mutex_lock(&ima_keys_lock);
+ if (!ima_process_keys) {
+ list_add_tail(&entry->list, &ima_keys);
+ queued = true;
+ }
+ mutex_unlock(&ima_keys_lock);
+
+ if (!queued)
+ ima_free_key_entry(entry);
+
+ return queued;
+}
+
+/*
+ * ima_process_queued_keys() - process keys queued for measurement
+ *
+ * This function sets ima_process_keys to true and processes queued keys.
+ * From here on keys will be processed right away (not queued).
+ */
+void ima_process_queued_keys(void)
+{
+ struct ima_key_entry *entry, *tmp;
+ bool process = false;
+
+ if (ima_process_keys)
+ return;
+
+ /*
+ * Since ima_process_keys is set to true, any new key will be
+ * processed immediately and not be queued to ima_keys list.
+ * First one setting the ima_process_keys flag to true will
+ * process the queued keys.
+ */
+ mutex_lock(&ima_keys_lock);
+ if (!ima_process_keys) {
+ ima_process_keys = true;
+ process = true;
+ }
+ mutex_unlock(&ima_keys_lock);
+
+ if (!process)
+ return;
+
+ if (!timer_expired)
+ cancel_delayed_work_sync(&ima_keys_delayed_work);
+
+ list_for_each_entry_safe(entry, tmp, &ima_keys, list) {
+ if (!timer_expired)
+ process_buffer_measurement(&init_user_ns, NULL,
+ entry->payload,
+ entry->payload_len,
+ entry->keyring_name,
+ KEY_CHECK, 0,
+ entry->keyring_name,
+ false, NULL, 0);
+ list_del(&entry->list);
+ ima_free_key_entry(entry);
+ }
+}
+
+inline bool ima_should_queue_key(void)
+{
+ return !ima_process_keys;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 000000000..04c49f05c
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+
+#include <linux/rculist.h>
+#include "ima.h"
+#include "ima_template_lib.h"
+
+enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME,
+ HDR_TEMPLATE_DATA, HDR__LAST };
+
+static struct ima_template_desc builtin_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng", .fmt = "d-ng|n-ng"},
+ {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+ {.name = "ima-ngv2", .fmt = "d-ngv2|n-ng"},
+ {.name = "ima-sigv2", .fmt = "d-ngv2|n-ng|sig"},
+ {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
+ {.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"},
+ {.name = "evm-sig",
+ .fmt = "d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode"},
+ {.name = "", .fmt = ""}, /* placeholder for a custom format */
+};
+
+static LIST_HEAD(defined_templates);
+static DEFINE_SPINLOCK(template_list);
+static int template_setup_done;
+
+static const struct ima_template_field supported_fields[] = {
+ {.field_id = "d", .field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n", .field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "d-ngv2", .field_init = ima_eventdigest_ngv2_init,
+ .field_show = ima_show_template_digest_ngv2},
+ {.field_id = "n-ng", .field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig", .field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "buf", .field_init = ima_eventbuf_init,
+ .field_show = ima_show_template_buf},
+ {.field_id = "d-modsig", .field_init = ima_eventdigest_modsig_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "modsig", .field_init = ima_eventmodsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "evmsig", .field_init = ima_eventevmsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "iuid", .field_init = ima_eventinodeuid_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "igid", .field_init = ima_eventinodegid_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "imode", .field_init = ima_eventinodemode_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "xattrnames",
+ .field_init = ima_eventinodexattrnames_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "xattrlengths",
+ .field_init = ima_eventinodexattrlengths_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "xattrvalues",
+ .field_init = ima_eventinodexattrvalues_init,
+ .field_show = ima_show_template_sig},
+};
+
+/*
+ * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't
+ * need to be accounted for since they shouldn't be defined in the same template
+ * description as 'd-ng' and 'n-ng' respectively.
+ */
+#define MAX_TEMPLATE_NAME_LEN \
+ sizeof("d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode")
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *ima_buf_template;
+
+/**
+ * ima_template_has_modsig - Check whether template has modsig-related fields.
+ * @ima_template: IMA template to check.
+ *
+ * Tells whether the given template has fields referencing a file's appended
+ * signature.
+ */
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template)
+{
+ int i;
+
+ for (i = 0; i < ima_template->num_fields; i++)
+ if (!strcmp(ima_template->fields[i]->field_id, "modsig") ||
+ !strcmp(ima_template->fields[i]->field_id, "d-modsig"))
+ return true;
+
+ return false;
+}
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ if (template_setup_done)
+ return 1;
+
+ if (!ima_template)
+ ima_init_template_list();
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc) {
+ pr_err("template %s not found, using %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ template_setup_done = 1;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static int __init ima_template_fmt_setup(char *str)
+{
+ int num_templates = ARRAY_SIZE(builtin_templates);
+
+ if (template_setup_done)
+ return 1;
+
+ if (template_desc_init_fields(str, NULL, NULL) < 0) {
+ pr_err("format string '%s' not valid, using template %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ builtin_templates[num_templates - 1].fmt = str;
+ ima_template = builtin_templates + num_templates - 1;
+ template_setup_done = 1;
+
+ return 1;
+}
+__setup("ima_template_fmt=", ima_template_fmt_setup);
+
+struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ struct ima_template_desc *template_desc;
+ int found = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(template_desc, &defined_templates, list) {
+ if ((strcmp(template_desc->name, name) == 0) ||
+ (strcmp(template_desc->fmt, name) == 0)) {
+ found = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? template_desc : NULL;
+}
+
+static const struct ima_template_field *
+lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields)
+{
+ const char *template_fmt_ptr;
+ const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
+ int template_num_fields;
+ int i, len;
+
+ if (num_fields && *num_fields > 0) /* already initialized? */
+ return 0;
+
+ template_num_fields = template_fmt_size(template_fmt);
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
+ pr_err("format string '%s' contains too many fields\n",
+ template_fmt);
+ return -EINVAL;
+ }
+
+ for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields;
+ i++, template_fmt_ptr += len + 1) {
+ char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1];
+
+ len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr;
+ if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) {
+ pr_err("Invalid field with length %d\n", len);
+ return -EINVAL;
+ }
+
+ memcpy(tmp_field_id, template_fmt_ptr, len);
+ tmp_field_id[len] = '\0';
+ found_fields[i] = lookup_template_field(tmp_field_id);
+ if (!found_fields[i]) {
+ pr_err("field '%s' not found\n", tmp_field_id);
+ return -ENOENT;
+ }
+ }
+
+ if (fields && num_fields) {
+ *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL);
+ if (*fields == NULL)
+ return -ENOMEM;
+
+ memcpy(*fields, found_fields, i * sizeof(**fields));
+ *num_fields = i;
+ }
+
+ return 0;
+}
+
+void ima_init_template_list(void)
+{
+ int i;
+
+ if (!list_empty(&defined_templates))
+ return;
+
+ spin_lock(&template_list);
+ for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
+ list_add_tail_rcu(&builtin_templates[i].list,
+ &defined_templates);
+ }
+ spin_unlock(&template_list);
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template) {
+ ima_init_template_list();
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ }
+ return ima_template;
+}
+
+struct ima_template_desc *ima_template_desc_buf(void)
+{
+ if (!ima_buf_template) {
+ ima_init_template_list();
+ ima_buf_template = lookup_template_desc("ima-buf");
+ }
+ return ima_buf_template;
+}
+
+int __init ima_init_template(void)
+{
+ struct ima_template_desc *template = ima_template_desc_current();
+ int result;
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0) {
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), result);
+ return result;
+ }
+
+ template = ima_template_desc_buf();
+ if (!template) {
+ pr_err("Failed to get ima-buf template\n");
+ return -EINVAL;
+ }
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), result);
+
+ return result;
+}
+
+static struct ima_template_desc *restore_template_fmt(char *template_name)
+{
+ struct ima_template_desc *template_desc = NULL;
+ int ret;
+
+ ret = template_desc_init_fields(template_name, NULL, NULL);
+ if (ret < 0) {
+ pr_err("attempting to initialize the template \"%s\" failed\n",
+ template_name);
+ goto out;
+ }
+
+ template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
+ if (!template_desc)
+ goto out;
+
+ template_desc->name = "";
+ template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+ if (!template_desc->fmt) {
+ kfree(template_desc);
+ template_desc = NULL;
+ goto out;
+ }
+
+ spin_lock(&template_list);
+ list_add_tail_rcu(&template_desc->list, &defined_templates);
+ spin_unlock(&template_list);
+out:
+ return template_desc;
+}
+
+static int ima_restore_template_data(struct ima_template_desc *template_desc,
+ void *template_data,
+ int template_data_size,
+ struct ima_template_entry **entry)
+{
+ struct tpm_digest *digests;
+ int ret = 0;
+ int i;
+
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*digests), GFP_NOFS);
+ if (!digests) {
+ kfree(*entry);
+ return -ENOMEM;
+ }
+
+ (*entry)->digests = digests;
+
+ ret = ima_parse_buf(template_data, template_data + template_data_size,
+ NULL, template_desc->num_fields,
+ (*entry)->template_data, NULL, NULL,
+ ENFORCE_FIELDS | ENFORCE_BUFEND, "template data");
+ if (ret < 0) {
+ kfree((*entry)->digests);
+ kfree(*entry);
+ return ret;
+ }
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_field_data *field_data = &(*entry)->template_data[i];
+ u8 *data = field_data->data;
+
+ (*entry)->template_data[i].data =
+ kzalloc(field_data->len + 1, GFP_KERNEL);
+ if (!(*entry)->template_data[i].data) {
+ ret = -ENOMEM;
+ break;
+ }
+ memcpy((*entry)->template_data[i].data, data, field_data->len);
+ (*entry)->template_data_len += sizeof(field_data->len);
+ (*entry)->template_data_len += field_data->len;
+ }
+
+ if (ret < 0) {
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ }
+
+ return ret;
+}
+
+/* Restore the serialized binary measurement list without extending PCRs. */
+int ima_restore_measurement_list(loff_t size, void *buf)
+{
+ char template_name[MAX_TEMPLATE_NAME_LEN];
+ unsigned char zero[TPM_DIGEST_SIZE] = { 0 };
+
+ struct ima_kexec_hdr *khdr = buf;
+ struct ima_field_data hdr[HDR__LAST] = {
+ [HDR_PCR] = {.len = sizeof(u32)},
+ [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE},
+ };
+
+ void *bufp = buf + sizeof(*khdr);
+ void *bufendp;
+ struct ima_template_entry *entry;
+ struct ima_template_desc *template_desc;
+ DECLARE_BITMAP(hdr_mask, HDR__LAST);
+ unsigned long count = 0;
+ int ret = 0;
+
+ if (!buf || size < sizeof(*khdr))
+ return 0;
+
+ if (ima_canonical_fmt) {
+ khdr->version = le16_to_cpu((__force __le16)khdr->version);
+ khdr->count = le64_to_cpu((__force __le64)khdr->count);
+ khdr->buffer_size = le64_to_cpu((__force __le64)khdr->buffer_size);
+ }
+
+ if (khdr->version != 1) {
+ pr_err("attempting to restore a incompatible measurement list");
+ return -EINVAL;
+ }
+
+ if (khdr->count > ULONG_MAX - 1) {
+ pr_err("attempting to restore too many measurements");
+ return -EINVAL;
+ }
+
+ bitmap_zero(hdr_mask, HDR__LAST);
+ bitmap_set(hdr_mask, HDR_PCR, 1);
+ bitmap_set(hdr_mask, HDR_DIGEST, 1);
+
+ /*
+ * ima kexec buffer prefix: version, buffer size, count
+ * v1 format: pcr, digest, template-name-len, template-name,
+ * template-data-size, template-data
+ */
+ bufendp = buf + khdr->buffer_size;
+ while ((bufp < bufendp) && (count++ < khdr->count)) {
+ int enforce_mask = ENFORCE_FIELDS;
+
+ enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0;
+ ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL,
+ hdr_mask, enforce_mask, "entry header");
+ if (ret < 0)
+ break;
+
+ if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
+ pr_err("attempting to restore a template name that is too long\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* template name is not null terminated */
+ memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data,
+ hdr[HDR_TEMPLATE_NAME].len);
+ template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
+
+ if (strcmp(template_name, "ima") == 0) {
+ pr_err("attempting to restore an unsupported template \"%s\" failed\n",
+ template_name);
+ ret = -EINVAL;
+ break;
+ }
+
+ template_desc = lookup_template_desc(template_name);
+ if (!template_desc) {
+ template_desc = restore_template_fmt(template_name);
+ if (!template_desc)
+ break;
+ }
+
+ /*
+ * Only the running system's template format is initialized
+ * on boot. As needed, initialize the other template formats.
+ */
+ ret = template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ if (ret < 0) {
+ pr_err("attempting to restore the template fmt \"%s\" failed\n",
+ template_desc->fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = ima_restore_template_data(template_desc,
+ hdr[HDR_TEMPLATE_DATA].data,
+ hdr[HDR_TEMPLATE_DATA].len,
+ &entry);
+ if (ret < 0)
+ break;
+
+ if (memcmp(hdr[HDR_DIGEST].data, zero, sizeof(zero))) {
+ ret = ima_calc_field_array_hash(
+ &entry->template_data[0],
+ entry);
+ if (ret < 0) {
+ pr_err("cannot calculate template digest\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
+ le32_to_cpu(*(__le32 *)(hdr[HDR_PCR].data));
+ ret = ima_restore_measurement_entry(entry);
+ if (ret < 0)
+ break;
+
+ }
+ return ret;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 000000000..7bf9b1507
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+
+#include "ima_template_lib.h"
+#include <linux/xattr.h>
+#include <linux/evm.h>
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX,
+ DATA_FMT_UINT
+};
+
+enum digest_type {
+ DIGEST_TYPE_IMA,
+ DIGEST_TYPE_VERITY,
+ DIGEST_TYPE__LAST
+};
+
+#define DIGEST_TYPE_NAME_LEN_MAX 7 /* including NUL */
+static const char * const digest_type_name[DIGEST_TYPE__LAST] = {
+ [DIGEST_TYPE_IMA] = "ima",
+ [DIGEST_TYPE_VERITY] = "verity"
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen = datalen;
+
+ if (datafmt == DATA_FMT_STRING)
+ buflen = datalen + 1;
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data;
+ u32 buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO:
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strrchr(field_data->data, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ fallthrough;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ case DATA_FMT_UINT:
+ switch (field_data->len) {
+ case sizeof(u8):
+ seq_printf(m, "%u", *(u8 *)buf_ptr);
+ break;
+ case sizeof(u16):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%u",
+ le16_to_cpu(*(__le16 *)buf_ptr));
+ else
+ seq_printf(m, "%u", *(u16 *)buf_ptr);
+ break;
+ case sizeof(u32):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%u",
+ le32_to_cpu(*(__le32 *)buf_ptr));
+ else
+ seq_printf(m, "%u", *(u32 *)buf_ptr);
+ break;
+ case sizeof(u64):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%llu",
+ le64_to_cpu(*(__le64 *)buf_ptr));
+ else
+ seq_printf(m, "%llu", *(u64 *)buf_ptr);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
+ strlen(field_data->data) : field_data->len;
+
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
+ u32 field_len = !ima_canonical_fmt ?
+ len : (__force u32)cpu_to_le32(len);
+
+ ima_putc(m, &field_len, sizeof(field_len));
+ }
+
+ if (!len)
+ return;
+
+ ima_putc(m, field_data->data, len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ case IMA_SHOW_BINARY_NO_FIELD_LEN:
+ case IMA_SHOW_BINARY_OLD_STRING_FMT:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_digest_ngv2(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show,
+ DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+void ima_show_template_uint(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_UINT, field_data);
+}
+
+/**
+ * ima_parse_buf() - Parses lengths and data from an input buffer
+ * @bufstartp: Buffer start address.
+ * @bufendp: Buffer end address.
+ * @bufcurp: Pointer to remaining (non-parsed) data.
+ * @maxfields: Length of fields array.
+ * @fields: Array containing lengths and pointers of parsed data.
+ * @curfields: Number of array items containing parsed data.
+ * @len_mask: Bitmap (if bit is set, data length should not be parsed).
+ * @enforce_mask: Check if curfields == maxfields and/or bufcurp == bufendp.
+ * @bufname: String identifier of the input buffer.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+ int maxfields, struct ima_field_data *fields, int *curfields,
+ unsigned long *len_mask, int enforce_mask, char *bufname)
+{
+ void *bufp = bufstartp;
+ int i;
+
+ for (i = 0; i < maxfields; i++) {
+ if (len_mask == NULL || !test_bit(i, len_mask)) {
+ if (bufp > (bufendp - sizeof(u32)))
+ break;
+
+ if (ima_canonical_fmt)
+ fields[i].len = le32_to_cpu(*(__le32 *)bufp);
+ else
+ fields[i].len = *(u32 *)bufp;
+
+ bufp += sizeof(u32);
+ }
+
+ if (bufp > (bufendp - fields[i].len))
+ break;
+
+ fields[i].data = bufp;
+ bufp += fields[i].len;
+ }
+
+ if ((enforce_mask & ENFORCE_FIELDS) && i != maxfields) {
+ pr_err("%s: nr of fields mismatch: expected: %d, current: %d\n",
+ bufname, maxfields, i);
+ return -EINVAL;
+ }
+
+ if ((enforce_mask & ENFORCE_BUFEND) && bufp != bufendp) {
+ pr_err("%s: buf end mismatch: expected: %p, current: %p\n",
+ bufname, bufendp, bufp);
+ return -EINVAL;
+ }
+
+ if (curfields)
+ *curfields = i;
+
+ if (bufcurp)
+ *bufcurp = bufp;
+
+ return 0;
+}
+
+static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize,
+ u8 digest_type, u8 hash_algo,
+ struct ima_field_data *field_data)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: <hash algo> + ':' + '\0' + digest,
+ * - DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO:
+ * <digest type> + ':' + <hash algo> + ':' + '\0' + digest,
+ *
+ * where 'DATA_FMT_DIGEST' is the original digest format ('d')
+ * with a hash size limitation of 20 bytes,
+ * where <digest type> is either "ima" or "verity",
+ * where <hash algo> is the hash_algo_name[] string.
+ */
+ u8 buffer[DIGEST_TYPE_NAME_LEN_MAX + CRYPTO_MAX_ALG_NAME + 2 +
+ IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (digest_type < DIGEST_TYPE__LAST && hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO;
+ offset += 1 + sprintf(buffer, "%s:%s:",
+ digest_type_name[digest_type],
+ hash_algo_name[hash_algo]);
+ } else if (hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ offset += 1 + sprintf(buffer, "%s:",
+ hash_algo_name[hash_algo]);
+ }
+
+ if (digest)
+ memcpy(buffer + offset, digest, digestsize);
+ else
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset by the
+ * hash algorithm digest size.
+ */
+ offset += hash_digest_size[hash_algo];
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct ima_max_digest_data hash;
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(event_data->iint->ima_hash->algo)) {
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+ goto out;
+ }
+
+ if ((const char *)event_data->filename == boot_aggregate_name) {
+ if (ima_tpm_chip) {
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_boot_aggregate(&hash.hdr);
+
+ /* algo can change depending on available PCR banks */
+ if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
+ result = -EINVAL;
+
+ if (result < 0)
+ memset(&hash, 0, sizeof(hash));
+ }
+
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
+ goto out;
+ }
+
+ if (!event_data->file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(event_data->file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(event_data->file, &hash.hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ event_data->filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ DIGEST_TYPE__LAST, HASH_ALGO__LAST,
+ field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = ima_hash_algo;
+ u32 cur_digestsize = 0;
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+
+ hash_algo = event_data->iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ DIGEST_TYPE__LAST, hash_algo,
+ field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit),
+ * prefixed with both the digest type and hash algorithm.
+ */
+int ima_eventdigest_ngv2_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = ima_hash_algo;
+ u32 cur_digestsize = 0;
+ u8 digest_type = DIGEST_TYPE_IMA;
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+
+ hash_algo = event_data->iint->ima_hash->algo;
+ if (event_data->iint->flags & IMA_VERITY_REQUIRED)
+ digest_type = DIGEST_TYPE_VERITY;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ digest_type, hash_algo,
+ field_data);
+}
+
+/*
+ * This function writes the digest of the file which is expected to match the
+ * digest contained in the file's appended signature.
+ */
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ enum hash_algo hash_algo;
+ const u8 *cur_digest;
+ u32 cur_digestsize;
+
+ if (!event_data->modsig)
+ return 0;
+
+ if (event_data->violation) {
+ /* Recording a violation. */
+ hash_algo = HASH_ALGO_SHA1;
+ cur_digest = NULL;
+ cur_digestsize = 0;
+ } else {
+ int rc;
+
+ rc = ima_get_modsig_digest(event_data->modsig, &hash_algo,
+ &cur_digest, &cur_digestsize);
+ if (rc)
+ return rc;
+ else if (hash_algo == HASH_ALGO__LAST || cur_digestsize == 0)
+ /* There was some error collecting the digest. */
+ return -EINVAL;
+ }
+
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ DIGEST_TYPE__LAST, hash_algo,
+ field_data);
+}
+
+static int ima_eventname_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ u32 cur_filename_len = 0;
+
+ BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+
+ if (event_data->filename) {
+ cur_filename = event_data->filename;
+ cur_filename_len = strlen(event_data->filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (event_data->file) {
+ cur_filename = event_data->file->f_path.dentry->d_name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ return ima_write_template_field_data(cur_filename, cur_filename_len,
+ DATA_FMT_STRING, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(event_data, field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(event_data, field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct evm_ima_xattr_data *xattr_value = event_data->xattr_value;
+
+ if (!xattr_value ||
+ (xattr_value->type != EVM_IMA_XATTR_DIGSIG &&
+ xattr_value->type != IMA_VERITY_DIGSIG))
+ return ima_eventevmsig_init(event_data, field_data);
+
+ return ima_write_template_field_data(xattr_value, event_data->xattr_len,
+ DATA_FMT_HEX, field_data);
+}
+
+/*
+ * ima_eventbuf_init - include the buffer(kexec-cmldine) as part of the
+ * template data.
+ */
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ if ((!event_data->buf) || (event_data->buf_len == 0))
+ return 0;
+
+ return ima_write_template_field_data(event_data->buf,
+ event_data->buf_len, DATA_FMT_HEX,
+ field_data);
+}
+
+/*
+ * ima_eventmodsig_init - include the appended file signature as part of the
+ * template data
+ */
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ const void *data;
+ u32 data_len;
+ int rc;
+
+ if (!event_data->modsig)
+ return 0;
+
+ /*
+ * modsig is a runtime structure containing pointers. Get its raw data
+ * instead.
+ */
+ rc = ima_get_raw_modsig(event_data->modsig, &data, &data_len);
+ if (rc)
+ return rc;
+
+ return ima_write_template_field_data(data, data_len, DATA_FMT_HEX,
+ field_data);
+}
+
+/*
+ * ima_eventevmsig_init - include the EVM portable signature as part of the
+ * template data
+ */
+int ima_eventevmsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct evm_ima_xattr_data *xattr_data = NULL;
+ int rc = 0;
+
+ if (!event_data->file)
+ return 0;
+
+ rc = vfs_getxattr_alloc(&init_user_ns, file_dentry(event_data->file),
+ XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0)
+ return 0;
+
+ if (xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG) {
+ kfree(xattr_data);
+ return 0;
+ }
+
+ rc = ima_write_template_field_data((char *)xattr_data, rc, DATA_FMT_HEX,
+ field_data);
+ kfree(xattr_data);
+ return rc;
+}
+
+static int ima_eventinodedac_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ bool get_uid)
+{
+ unsigned int id;
+
+ if (!event_data->file)
+ return 0;
+
+ if (get_uid)
+ id = i_uid_read(file_inode(event_data->file));
+ else
+ id = i_gid_read(file_inode(event_data->file));
+
+ if (ima_canonical_fmt) {
+ if (sizeof(id) == sizeof(u16))
+ id = (__force u16)cpu_to_le16(id);
+ else
+ id = (__force u32)cpu_to_le32(id);
+ }
+
+ return ima_write_template_field_data((void *)&id, sizeof(id),
+ DATA_FMT_UINT, field_data);
+}
+
+/*
+ * ima_eventinodeuid_init - include the inode UID as part of the template
+ * data
+ */
+int ima_eventinodeuid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodedac_init_common(event_data, field_data, true);
+}
+
+/*
+ * ima_eventinodegid_init - include the inode GID as part of the template
+ * data
+ */
+int ima_eventinodegid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodedac_init_common(event_data, field_data, false);
+}
+
+/*
+ * ima_eventinodemode_init - include the inode mode as part of the template
+ * data
+ */
+int ima_eventinodemode_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct inode *inode;
+ u16 mode;
+
+ if (!event_data->file)
+ return 0;
+
+ inode = file_inode(event_data->file);
+ mode = inode->i_mode;
+ if (ima_canonical_fmt)
+ mode = (__force u16)cpu_to_le16(mode);
+
+ return ima_write_template_field_data((char *)&mode, sizeof(mode),
+ DATA_FMT_UINT, field_data);
+}
+
+static int ima_eventinodexattrs_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ char type)
+{
+ u8 *buffer = NULL;
+ int rc;
+
+ if (!event_data->file)
+ return 0;
+
+ rc = evm_read_protected_xattrs(file_dentry(event_data->file), NULL, 0,
+ type, ima_canonical_fmt);
+ if (rc < 0)
+ return 0;
+
+ buffer = kmalloc(rc, GFP_KERNEL);
+ if (!buffer)
+ return 0;
+
+ rc = evm_read_protected_xattrs(file_dentry(event_data->file), buffer,
+ rc, type, ima_canonical_fmt);
+ if (rc < 0) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = ima_write_template_field_data((char *)buffer, rc, DATA_FMT_HEX,
+ field_data);
+out:
+ kfree(buffer);
+ return rc;
+}
+
+/*
+ * ima_eventinodexattrnames_init - include a list of xattr names as part of the
+ * template data
+ */
+int ima_eventinodexattrnames_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'n');
+}
+
+/*
+ * ima_eventinodexattrlengths_init - include a list of xattr lengths as part of
+ * the template data
+ */
+int ima_eventinodexattrlengths_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'l');
+}
+
+/*
+ * ima_eventinodexattrvalues_init - include a list of xattr values as part of
+ * the template data
+ */
+int ima_eventinodexattrvalues_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'v');
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 000000000..9f7c335f3
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+#define ENFORCE_FIELDS 0x00000001
+#define ENFORCE_BUFEND 0x00000002
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ngv2(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_uint(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+ int maxfields, struct ima_field_data *fields, int *curfields,
+ unsigned long *len_mask, int enforce_mask, char *bufname);
+int ima_eventdigest_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ngv2_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventevmsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodeuid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodegid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodemode_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrnames_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrlengths_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrvalues_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 000000000..52c3c806b
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha1.h>
+#include <crypto/hash.h>
+#include <linux/key.h>
+#include <linux/audit.h>
+
+/* iint action cache flags */
+#define IMA_MEASURE 0x00000001
+#define IMA_MEASURED 0x00000002
+#define IMA_APPRAISE 0x00000004
+#define IMA_APPRAISED 0x00000008
+/*#define IMA_COLLECT 0x00000010 do not use this flag */
+#define IMA_COLLECTED 0x00000020
+#define IMA_AUDIT 0x00000040
+#define IMA_AUDITED 0x00000080
+#define IMA_HASH 0x00000100
+#define IMA_HASHED 0x00000200
+
+/* iint policy rule cache flags */
+#define IMA_NONACTION_FLAGS 0xff000000
+#define IMA_DIGSIG_REQUIRED 0x01000000
+#define IMA_PERMIT_DIRECTIO 0x02000000
+#define IMA_NEW_FILE 0x04000000
+#define EVM_IMMUTABLE_DIGSIG 0x08000000
+#define IMA_FAIL_UNVERIFIABLE_SIGS 0x10000000
+#define IMA_MODSIG_ALLOWED 0x20000000
+#define IMA_CHECK_BLACKLIST 0x40000000
+#define IMA_VERITY_REQUIRED 0x80000000
+
+#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_HASH | IMA_APPRAISE_SUBMASK)
+#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+ IMA_HASHED | IMA_COLLECTED | \
+ IMA_APPRAISED_SUBMASK)
+
+/* iint subaction appraise cache flags */
+#define IMA_FILE_APPRAISE 0x00001000
+#define IMA_FILE_APPRAISED 0x00002000
+#define IMA_MMAP_APPRAISE 0x00004000
+#define IMA_MMAP_APPRAISED 0x00008000
+#define IMA_BPRM_APPRAISE 0x00010000
+#define IMA_BPRM_APPRAISED 0x00020000
+#define IMA_READ_APPRAISE 0x00040000
+#define IMA_READ_APPRAISED 0x00080000
+#define IMA_CREDS_APPRAISE 0x00100000
+#define IMA_CREDS_APPRAISED 0x00200000
+#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
+ IMA_BPRM_APPRAISE | IMA_READ_APPRAISE | \
+ IMA_CREDS_APPRAISE)
+#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+ IMA_BPRM_APPRAISED | IMA_READ_APPRAISED | \
+ IMA_CREDS_APPRAISED)
+
+/* iint cache atomic_flags */
+#define IMA_CHANGE_XATTR 0
+#define IMA_UPDATE_XATTR 1
+#define IMA_CHANGE_ATTR 2
+#define IMA_DIGSIG 3
+#define IMA_MUST_MEASURE 4
+
+enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
+ EVM_XATTR_PORTABLE_DIGSIG,
+ IMA_VERITY_DIGSIG,
+ IMA_XATTR_LAST
+};
+
+struct evm_ima_xattr_data {
+ u8 type;
+ u8 data[];
+} __packed;
+
+/* Only used in the EVM HMAC code. */
+struct evm_xattr {
+ struct evm_ima_xattr_data data;
+ u8 digest[SHA1_DIGEST_SIZE];
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE HASH_MAX_DIGESTSIZE
+
+struct ima_digest_data {
+ u8 algo;
+ u8 length;
+ union {
+ struct {
+ u8 unused;
+ u8 type;
+ } sha1;
+ struct {
+ u8 type;
+ u8 algo;
+ } ng;
+ u8 data[2];
+ } xattr;
+ u8 digest[];
+} __packed;
+
+/*
+ * Instead of wrapping the ima_digest_data struct inside a local structure
+ * with the maximum hash size, define ima_max_digest_data struct.
+ */
+struct ima_max_digest_data {
+ struct ima_digest_data hdr;
+ u8 digest[HASH_MAX_DIGESTSIZE];
+} __packed;
+
+/*
+ * signature header format v2 - for using with asymmetric keys
+ *
+ * The signature_v2_hdr struct includes a signature format version
+ * to simplify defining new signature formats.
+ *
+ * signature format:
+ * version 2: regular file data hash based signature
+ * version 3: struct ima_file_id data based signature
+ */
+struct signature_v2_hdr {
+ uint8_t type; /* xattr type */
+ uint8_t version; /* signature format version */
+ uint8_t hash_algo; /* Digest algorithm [enum hash_algo] */
+ __be32 keyid; /* IMA key identifier - not X509/PGP specific */
+ __be16 sig_size; /* signature size */
+ uint8_t sig[]; /* signature payload */
+} __packed;
+
+/*
+ * IMA signature version 3 disambiguates the data that is signed, by
+ * indirectly signing the hash of the ima_file_id structure data,
+ * containing either the fsverity_descriptor struct digest or, in the
+ * future, the regular IMA file hash.
+ *
+ * (The hash of the ima_file_id structure is only of the portion used.)
+ */
+struct ima_file_id {
+ __u8 hash_type; /* xattr type [enum evm_ima_xattr_type] */
+ __u8 hash_algorithm; /* Digest algorithm [enum hash_algo] */
+ __u8 hash[HASH_MAX_DIGESTSIZE];
+} __packed;
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct mutex mutex; /* protects: version, flags, digest */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned long flags;
+ unsigned long measured_pcrs;
+ unsigned long atomic_flags;
+ unsigned long real_ino;
+ dev_t real_dev;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+ enum integrity_status ima_read_status:4;
+ enum integrity_status ima_creds_status:4;
+ enum integrity_status evm_status:4;
+ struct ima_digest_data *ima_hash;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+int integrity_kernel_read(struct file *file, loff_t offset,
+ void *addr, unsigned long count);
+
+#define INTEGRITY_KEYRING_EVM 0
+#define INTEGRITY_KEYRING_IMA 1
+#define INTEGRITY_KEYRING_PLATFORM 2
+#define INTEGRITY_KEYRING_MACHINE 3
+#define INTEGRITY_KEYRING_MAX 4
+
+extern struct dentry *integrity_dir;
+
+struct modsig;
+
+#ifdef CONFIG_INTEGRITY_SIGNATURE
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen);
+int integrity_modsig_verify(unsigned int id, const struct modsig *modsig);
+
+int __init integrity_init_keyring(const unsigned int id);
+int __init integrity_load_x509(const unsigned int id, const char *path);
+int __init integrity_load_cert(const unsigned int id, const char *source,
+ const void *data, size_t len, key_perm_t perm);
+#else
+
+static inline int integrity_digsig_verify(const unsigned int id,
+ const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int integrity_modsig_verify(unsigned int id,
+ const struct modsig *modsig)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int integrity_init_keyring(const unsigned int id)
+{
+ return 0;
+}
+
+static inline int __init integrity_load_cert(const unsigned int id,
+ const char *source,
+ const void *data, size_t len,
+ key_perm_t perm)
+{
+ return 0;
+}
+#endif /* CONFIG_INTEGRITY_SIGNATURE */
+
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen);
+#else
+static inline int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig);
+#else
+static inline int ima_modsig_verify(struct key *keyring,
+ const struct modsig *modsig)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void);
+#else
+static inline void ima_load_x509(void)
+{
+}
+#endif
+
+#ifdef CONFIG_EVM_LOAD_X509
+void __init evm_load_x509(void);
+#else
+static inline void evm_load_x509(void)
+{
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_AUDIT
+/* declarations */
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int info);
+
+void integrity_audit_message(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int info,
+ int errno);
+
+static inline struct audit_buffer *
+integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
+{
+ return audit_log_start(ctx, gfp_mask, type);
+}
+
+#else
+static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname,
+ const char *op, const char *cause,
+ int result, int info)
+{
+}
+
+static inline void integrity_audit_message(int audit_msgno,
+ struct inode *inode,
+ const unsigned char *fname,
+ const char *op, const char *cause,
+ int result, int info, int errno)
+{
+}
+
+static inline struct audit_buffer *
+integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
+{
+ return NULL;
+}
+
+#endif
+
+#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING
+void __init add_to_platform_keyring(const char *source, const void *data,
+ size_t len);
+#else
+static inline void __init add_to_platform_keyring(const char *source,
+ const void *data, size_t len)
+{
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
+void __init add_to_machine_keyring(const char *source, const void *data, size_t len);
+bool __init trust_moklist(void);
+#else
+static inline void __init add_to_machine_keyring(const char *source,
+ const void *data, size_t len)
+{
+}
+static inline bool __init trust_moklist(void)
+{
+ return false;
+}
+#endif
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
new file mode 100644
index 000000000..0ec5e4c22
--- /dev/null
+++ b/security/integrity/integrity_audit.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: integrity_audit.c
+ * Audit calls for the integrity subsystem
+ */
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/audit.h>
+#include "integrity.h"
+
+static int integrity_audit_info;
+
+/* ima_audit_setup - enable informational auditing messages */
+static int __init integrity_audit_setup(char *str)
+{
+ unsigned long audit;
+
+ if (!kstrtoul(str, 0, &audit))
+ integrity_audit_info = audit ? 1 : 0;
+ return 1;
+}
+__setup("integrity_audit=", integrity_audit_setup);
+
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int audit_info)
+{
+ integrity_audit_message(audit_msgno, inode, fname, op, cause,
+ result, audit_info, 0);
+}
+
+void integrity_audit_message(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int audit_info,
+ int errno)
+{
+ struct audit_buffer *ab;
+ char name[TASK_COMM_LEN];
+
+ if (!integrity_audit_info && audit_info == 1) /* Skip info messages */
+ return;
+
+ ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno);
+ if (!ab)
+ return;
+ audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
+ task_pid_nr(current),
+ from_kuid(&init_user_ns, current_uid()),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
+ audit_log_task_context(ab);
+ audit_log_format(ab, " op=%s cause=%s comm=", op, cause);
+ audit_log_untrustedstring(ab, get_task_comm(name, current));
+ if (fname) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, fname);
+ }
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ audit_log_format(ab, " res=%d errno=%d", !result, errno);
+ audit_log_end(ab);
+}
diff --git a/security/integrity/platform_certs/efi_parser.c b/security/integrity/platform_certs/efi_parser.c
new file mode 100644
index 000000000..d98260f84
--- /dev/null
+++ b/security/integrity/platform_certs/efi_parser.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* EFI signature/key/certificate list parser
+ *
+ * Copyright (C) 2012, 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) "EFI: "fmt
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+
+/**
+ * parse_efi_signature_list - Parse an EFI signature list for certificates
+ * @source: The source of the key
+ * @data: The data blob to parse
+ * @size: The size of the data blob
+ * @get_handler_for_guid: Get the handler func for the sig type (or NULL)
+ *
+ * Parse an EFI signature list looking for elements of interest. A list is
+ * made up of a series of sublists, where all the elements in a sublist are of
+ * the same type, but sublists can be of different types.
+ *
+ * For each sublist encountered, the @get_handler_for_guid function is called
+ * with the type specifier GUID and returns either a pointer to a function to
+ * handle elements of that type or NULL if the type is not of interest.
+ *
+ * If the sublist is of interest, each element is passed to the handler
+ * function in turn.
+ *
+ * Error EBADMSG is returned if the list doesn't parse correctly and 0 is
+ * returned if the list was parsed correctly. No error can be returned from
+ * the @get_handler_for_guid function or the element handler function it
+ * returns.
+ */
+int __init parse_efi_signature_list(
+ const char *source,
+ const void *data, size_t size,
+ efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *))
+{
+ efi_element_handler_t handler;
+ unsigned int offs = 0;
+
+ pr_devel("-->%s(,%zu)\n", __func__, size);
+
+ while (size > 0) {
+ const efi_signature_data_t *elem;
+ efi_signature_list_t list;
+ size_t lsize, esize, hsize, elsize;
+
+ if (size < sizeof(list))
+ return -EBADMSG;
+
+ memcpy(&list, data, sizeof(list));
+ pr_devel("LIST[%04x] guid=%pUl ls=%x hs=%x ss=%x\n",
+ offs,
+ &list.signature_type, list.signature_list_size,
+ list.signature_header_size, list.signature_size);
+
+ lsize = list.signature_list_size;
+ hsize = list.signature_header_size;
+ esize = list.signature_size;
+ elsize = lsize - sizeof(list) - hsize;
+
+ if (lsize > size) {
+ pr_devel("<--%s() = -EBADMSG [overrun @%x]\n",
+ __func__, offs);
+ return -EBADMSG;
+ }
+
+ if (lsize < sizeof(list) ||
+ lsize - sizeof(list) < hsize ||
+ esize < sizeof(*elem) ||
+ elsize < esize ||
+ elsize % esize != 0) {
+ pr_devel("- bad size combo @%x\n", offs);
+ return -EBADMSG;
+ }
+
+ handler = get_handler_for_guid(&list.signature_type);
+ if (!handler) {
+ data += lsize;
+ size -= lsize;
+ offs += lsize;
+ continue;
+ }
+
+ data += sizeof(list) + hsize;
+ size -= sizeof(list) + hsize;
+ offs += sizeof(list) + hsize;
+
+ for (; elsize > 0; elsize -= esize) {
+ elem = data;
+
+ pr_devel("ELEM[%04x]\n", offs);
+ handler(source,
+ &elem->signature_data,
+ esize - sizeof(*elem));
+
+ data += esize;
+ size -= esize;
+ offs += esize;
+ }
+ }
+
+ return 0;
+}
diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
new file mode 100644
index 000000000..8a1124e4d
--- /dev/null
+++ b/security/integrity/platform_certs/keyring_handler.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include "../integrity.h"
+#include "keyring_handler.h"
+
+static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
+static efi_guid_t efi_cert_x509_sha256_guid __initdata =
+ EFI_CERT_X509_SHA256_GUID;
+static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
+
+/*
+ * Blacklist an X509 TBS hash.
+ */
+static __init void uefi_blacklist_x509_tbs(const char *source,
+ const void *data, size_t len)
+{
+ mark_hash_blacklisted(data, len, BLACKLIST_HASH_X509_TBS);
+}
+
+/*
+ * Blacklist the hash of an executable.
+ */
+static __init void uefi_blacklist_binary(const char *source,
+ const void *data, size_t len)
+{
+ mark_hash_blacklisted(data, len, BLACKLIST_HASH_BINARY);
+}
+
+/*
+ * Add an X509 cert to the revocation list.
+ */
+static __init void uefi_revocation_list_x509(const char *source,
+ const void *data, size_t len)
+{
+ add_key_to_revocation_list(data, len);
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the UEFI db tables.
+ */
+__init efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return add_to_platform_keyring;
+ return NULL;
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the MokListRT tables.
+ */
+__init efi_element_handler_t get_handler_for_mok(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0) {
+ if (IS_ENABLED(CONFIG_INTEGRITY_MACHINE_KEYRING) && trust_moklist())
+ return add_to_machine_keyring;
+ else
+ return add_to_platform_keyring;
+ }
+ return NULL;
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the UEFI dbx and MokListXRT tables.
+ */
+__init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
+ return uefi_blacklist_x509_tbs;
+ if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
+ return uefi_blacklist_binary;
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return uefi_revocation_list_x509;
+ return NULL;
+}
diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
new file mode 100644
index 000000000..212d894a8
--- /dev/null
+++ b/security/integrity/platform_certs/keyring_handler.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef PLATFORM_CERTS_INTERNAL_H
+#define PLATFORM_CERTS_INTERNAL_H
+
+#include <linux/efi.h>
+
+void blacklist_hash(const char *source, const void *data,
+ size_t len, const char *type,
+ size_t type_len);
+
+/*
+ * Blacklist an X509 TBS hash.
+ */
+void blacklist_x509_tbs(const char *source, const void *data, size_t len);
+
+/*
+ * Blacklist the hash of an executable.
+ */
+void blacklist_binary(const char *source, const void *data, size_t len);
+
+/*
+ * Return the handler for particular signature list types found in the db.
+ */
+efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
+
+/*
+ * Return the handler for particular signature list types found in the mok.
+ */
+efi_element_handler_t get_handler_for_mok(const efi_guid_t *sig_type);
+
+/*
+ * Return the handler for particular signature list types found in the dbx.
+ */
+efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
+
+#endif
+
+#ifndef UEFI_QUIRK_SKIP_CERT
+#define UEFI_QUIRK_SKIP_CERT(vendor, product) \
+ .matches = { \
+ DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
+ DMI_MATCH(DMI_PRODUCT_NAME, product), \
+ },
+#endif
diff --git a/security/integrity/platform_certs/load_ipl_s390.c b/security/integrity/platform_certs/load_ipl_s390.c
new file mode 100644
index 000000000..e769dcb7e
--- /dev/null
+++ b/security/integrity/platform_certs/load_ipl_s390.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include <asm/boot_data.h>
+#include "../integrity.h"
+
+/*
+ * Load the certs contained in the IPL report created by the machine loader
+ * into the platform trusted keyring.
+ */
+static int __init load_ipl_certs(void)
+{
+ void *ptr, *end;
+ unsigned int len;
+
+ if (!ipl_cert_list_addr)
+ return 0;
+ /* Copy the certificates to the system keyring */
+ ptr = (void *) ipl_cert_list_addr;
+ end = ptr + ipl_cert_list_size;
+ while ((void *) ptr < end) {
+ len = *(unsigned int *) ptr;
+ ptr += sizeof(unsigned int);
+ add_to_platform_keyring("IPL:db", ptr, len);
+ ptr += len;
+ }
+ return 0;
+}
+late_initcall(load_ipl_certs);
diff --git a/security/integrity/platform_certs/load_powerpc.c b/security/integrity/platform_certs/load_powerpc.c
new file mode 100644
index 000000000..a2900cb85
--- /dev/null
+++ b/security/integrity/platform_certs/load_powerpc.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * - loads keys and hashes stored and controlled by the firmware.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <asm/secure_boot.h>
+#include <asm/secvar.h>
+#include "keyring_handler.h"
+
+/*
+ * Get a certificate list blob from the named secure variable.
+ */
+static __init void *get_cert_list(u8 *key, unsigned long keylen, uint64_t *size)
+{
+ int rc;
+ void *db;
+
+ rc = secvar_ops->get(key, keylen, NULL, size);
+ if (rc) {
+ pr_err("Couldn't get size: %d\n", rc);
+ return NULL;
+ }
+
+ db = kmalloc(*size, GFP_KERNEL);
+ if (!db)
+ return NULL;
+
+ rc = secvar_ops->get(key, keylen, db, size);
+ if (rc) {
+ kfree(db);
+ pr_err("Error reading %s var: %d\n", key, rc);
+ return NULL;
+ }
+
+ return db;
+}
+
+/*
+ * Load the certs contained in the keys databases into the platform trusted
+ * keyring and the blacklisted X.509 cert SHA256 hashes into the blacklist
+ * keyring.
+ */
+static int __init load_powerpc_certs(void)
+{
+ void *db = NULL, *dbx = NULL;
+ uint64_t dbsize = 0, dbxsize = 0;
+ int rc = 0;
+ struct device_node *node;
+
+ if (!secvar_ops)
+ return -ENODEV;
+
+ /* The following only applies for the edk2-compat backend. */
+ node = of_find_compatible_node(NULL, NULL, "ibm,edk2-compat-v1");
+ if (!node)
+ return -ENODEV;
+
+ /*
+ * Get db, and dbx. They might not exist, so it isn't an error if we
+ * can't get them.
+ */
+ db = get_cert_list("db", 3, &dbsize);
+ if (!db) {
+ pr_err("Couldn't get db list from firmware\n");
+ } else {
+ rc = parse_efi_signature_list("powerpc:db", db, dbsize,
+ get_handler_for_db);
+ if (rc)
+ pr_err("Couldn't parse db signatures: %d\n", rc);
+ kfree(db);
+ }
+
+ dbx = get_cert_list("dbx", 4, &dbxsize);
+ if (!dbx) {
+ pr_info("Couldn't get dbx list from firmware\n");
+ } else {
+ rc = parse_efi_signature_list("powerpc:dbx", dbx, dbxsize,
+ get_handler_for_dbx);
+ if (rc)
+ pr_err("Couldn't parse dbx signatures: %d\n", rc);
+ kfree(dbx);
+ }
+
+ of_node_put(node);
+
+ return rc;
+}
+late_initcall(load_powerpc_certs);
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
new file mode 100644
index 000000000..d1fdd1134
--- /dev/null
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <linux/ima.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include "../integrity.h"
+#include "keyring_handler.h"
+
+/*
+ * On T2 Macs reading the db and dbx efi variables to load UEFI Secure Boot
+ * certificates causes occurrence of a page fault in Apple's firmware and
+ * a crash disabling EFI runtime services. The following quirk skips reading
+ * these variables.
+ */
+static const struct dmi_system_id uefi_skip_cert[] = {
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,2") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,3") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,4") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,2") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,3") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,4") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,2") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir9,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "Macmini8,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") },
+ { }
+};
+
+/*
+ * Look to see if a UEFI variable called MokIgnoreDB exists and return true if
+ * it does.
+ *
+ * This UEFI variable is set by the shim if a user tells the shim to not use
+ * the certs/hashes in the UEFI db variable for verification purposes. If it
+ * is set, we should ignore the db variable also and the true return indicates
+ * this.
+ */
+static __init bool uefi_check_ignore_db(void)
+{
+ efi_status_t status;
+ unsigned int db = 0;
+ unsigned long size = sizeof(db);
+ efi_guid_t guid = EFI_SHIM_LOCK_GUID;
+
+ status = efi.get_variable(L"MokIgnoreDB", &guid, NULL, &size, &db);
+ return status == EFI_SUCCESS;
+}
+
+/*
+ * Get a certificate list blob from the named EFI variable.
+ */
+static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
+ unsigned long *size, efi_status_t *status)
+{
+ unsigned long lsize = 4;
+ unsigned long tmpdb[4];
+ void *db;
+
+ *status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
+ if (*status == EFI_NOT_FOUND)
+ return NULL;
+
+ if (*status != EFI_BUFFER_TOO_SMALL) {
+ pr_err("Couldn't get size: 0x%lx\n", *status);
+ return NULL;
+ }
+
+ db = kmalloc(lsize, GFP_KERNEL);
+ if (!db)
+ return NULL;
+
+ *status = efi.get_variable(name, guid, NULL, &lsize, db);
+ if (*status != EFI_SUCCESS) {
+ kfree(db);
+ pr_err("Error reading db var: 0x%lx\n", *status);
+ return NULL;
+ }
+
+ *size = lsize;
+ return db;
+}
+
+/*
+ * load_moklist_certs() - Load MokList certs
+ *
+ * Load the certs contained in the UEFI MokListRT database into the
+ * platform trusted keyring.
+ *
+ * This routine checks the EFI MOK config table first. If and only if
+ * that fails, this routine uses the MokListRT ordinary UEFI variable.
+ *
+ * Return: Status
+ */
+static int __init load_moklist_certs(void)
+{
+ struct efi_mokvar_table_entry *mokvar_entry;
+ efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
+ void *mok;
+ unsigned long moksize;
+ efi_status_t status;
+ int rc;
+
+ /* First try to load certs from the EFI MOKvar config table.
+ * It's not an error if the MOKvar config table doesn't exist
+ * or the MokListRT entry is not found in it.
+ */
+ mokvar_entry = efi_mokvar_entry_find("MokListRT");
+ if (mokvar_entry) {
+ rc = parse_efi_signature_list("UEFI:MokListRT (MOKvar table)",
+ mokvar_entry->data,
+ mokvar_entry->data_size,
+ get_handler_for_mok);
+ /* All done if that worked. */
+ if (!rc)
+ return rc;
+
+ pr_err("Couldn't parse MokListRT signatures from EFI MOKvar config table: %d\n",
+ rc);
+ }
+
+ /* Get MokListRT. It might not exist, so it isn't an error
+ * if we can't get it.
+ */
+ mok = get_cert_list(L"MokListRT", &mok_var, &moksize, &status);
+ if (mok) {
+ rc = parse_efi_signature_list("UEFI:MokListRT",
+ mok, moksize, get_handler_for_mok);
+ kfree(mok);
+ if (rc)
+ pr_err("Couldn't parse MokListRT signatures: %d\n", rc);
+ return rc;
+ }
+ if (status == EFI_NOT_FOUND)
+ pr_debug("MokListRT variable wasn't found\n");
+ else
+ pr_info("Couldn't get UEFI MokListRT\n");
+ return 0;
+}
+
+/*
+ * load_uefi_certs() - Load certs from UEFI sources
+ *
+ * Load the certs contained in the UEFI databases into the platform trusted
+ * keyring and the UEFI blacklisted X.509 cert SHA256 hashes into the blacklist
+ * keyring.
+ */
+static int __init load_uefi_certs(void)
+{
+ efi_guid_t secure_var = EFI_IMAGE_SECURITY_DATABASE_GUID;
+ efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
+ void *db = NULL, *dbx = NULL, *mokx = NULL;
+ unsigned long dbsize = 0, dbxsize = 0, mokxsize = 0;
+ efi_status_t status;
+ int rc = 0;
+ const struct dmi_system_id *dmi_id;
+
+ dmi_id = dmi_first_match(uefi_skip_cert);
+ if (dmi_id) {
+ pr_err("Reading UEFI Secure Boot Certs is not supported on T2 Macs.\n");
+ return false;
+ }
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return false;
+
+ /* Get db and dbx. They might not exist, so it isn't an error
+ * if we can't get them.
+ */
+ if (!uefi_check_ignore_db()) {
+ db = get_cert_list(L"db", &secure_var, &dbsize, &status);
+ if (!db) {
+ if (status == EFI_NOT_FOUND)
+ pr_debug("MODSIGN: db variable wasn't found\n");
+ else
+ pr_err("MODSIGN: Couldn't get UEFI db list\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:db",
+ db, dbsize, get_handler_for_db);
+ if (rc)
+ pr_err("Couldn't parse db signatures: %d\n",
+ rc);
+ kfree(db);
+ }
+ }
+
+ dbx = get_cert_list(L"dbx", &secure_var, &dbxsize, &status);
+ if (!dbx) {
+ if (status == EFI_NOT_FOUND)
+ pr_debug("dbx variable wasn't found\n");
+ else
+ pr_info("Couldn't get UEFI dbx list\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:dbx",
+ dbx, dbxsize,
+ get_handler_for_dbx);
+ if (rc)
+ pr_err("Couldn't parse dbx signatures: %d\n", rc);
+ kfree(dbx);
+ }
+
+ /* the MOK/MOKx can not be trusted when secure boot is disabled */
+ if (!arch_ima_get_secureboot())
+ return 0;
+
+ mokx = get_cert_list(L"MokListXRT", &mok_var, &mokxsize, &status);
+ if (!mokx) {
+ if (status == EFI_NOT_FOUND)
+ pr_debug("mokx variable wasn't found\n");
+ else
+ pr_info("Couldn't get mokx list\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:MokListXRT",
+ mokx, mokxsize,
+ get_handler_for_dbx);
+ if (rc)
+ pr_err("Couldn't parse mokx signatures %d\n", rc);
+ kfree(mokx);
+ }
+
+ /* Load the MokListRT certs */
+ rc = load_moklist_certs();
+
+ return rc;
+}
+late_initcall(load_uefi_certs);
diff --git a/security/integrity/platform_certs/machine_keyring.c b/security/integrity/platform_certs/machine_keyring.c
new file mode 100644
index 000000000..7aaed7950
--- /dev/null
+++ b/security/integrity/platform_certs/machine_keyring.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Machine keyring routines.
+ *
+ * Copyright (c) 2021, Oracle and/or its affiliates.
+ */
+
+#include <linux/efi.h>
+#include "../integrity.h"
+
+static bool trust_mok;
+
+static __init int machine_keyring_init(void)
+{
+ int rc;
+
+ rc = integrity_init_keyring(INTEGRITY_KEYRING_MACHINE);
+ if (rc)
+ return rc;
+
+ pr_notice("Machine keyring initialized\n");
+ return 0;
+}
+device_initcall(machine_keyring_init);
+
+void __init add_to_machine_keyring(const char *source, const void *data, size_t len)
+{
+ key_perm_t perm;
+ int rc;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW;
+ rc = integrity_load_cert(INTEGRITY_KEYRING_MACHINE, source, data, len, perm);
+
+ /*
+ * Some MOKList keys may not pass the machine keyring restrictions.
+ * If the restriction check does not pass and the platform keyring
+ * is configured, try to add it into that keyring instead.
+ */
+ if (rc && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
+ rc = integrity_load_cert(INTEGRITY_KEYRING_PLATFORM, source,
+ data, len, perm);
+
+ if (rc)
+ pr_info("Error adding keys to machine keyring %s\n", source);
+}
+
+/*
+ * Try to load the MokListTrustedRT MOK variable to see if we should trust
+ * the MOK keys within the kernel. It is not an error if this variable
+ * does not exist. If it does not exist, MOK keys should not be trusted
+ * within the machine keyring.
+ */
+static __init bool uefi_check_trust_mok_keys(void)
+{
+ struct efi_mokvar_table_entry *mokvar_entry;
+
+ mokvar_entry = efi_mokvar_entry_find("MokListTrustedRT");
+
+ if (mokvar_entry)
+ return true;
+
+ return false;
+}
+
+bool __init trust_moklist(void)
+{
+ static bool initialized;
+
+ if (!initialized) {
+ initialized = true;
+
+ if (uefi_check_trust_mok_keys())
+ trust_mok = true;
+ }
+
+ return trust_mok;
+}
diff --git a/security/integrity/platform_certs/platform_keyring.c b/security/integrity/platform_certs/platform_keyring.c
new file mode 100644
index 000000000..bcafd7387
--- /dev/null
+++ b/security/integrity/platform_certs/platform_keyring.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Platform keyring for firmware/platform keys
+ *
+ * Copyright IBM Corporation, 2018
+ * Author(s): Nayna Jain <nayna@linux.ibm.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "../integrity.h"
+
+/**
+ * add_to_platform_keyring - Add to platform keyring without validation.
+ * @source: Source of key
+ * @data: The blob holding the key
+ * @len: The length of the data blob
+ *
+ * Add a key to the platform keyring without checking its trust chain. This
+ * is available only during kernel initialisation.
+ */
+void __init add_to_platform_keyring(const char *source, const void *data,
+ size_t len)
+{
+ key_perm_t perm;
+ int rc;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW;
+
+ rc = integrity_load_cert(INTEGRITY_KEYRING_PLATFORM, source, data, len,
+ perm);
+ if (rc)
+ pr_info("Error adding keys to platform keyring %s\n", source);
+}
+
+/*
+ * Create the trusted keyrings.
+ */
+static __init int platform_keyring_init(void)
+{
+ int rc;
+
+ rc = integrity_init_keyring(INTEGRITY_KEYRING_PLATFORM);
+ if (rc)
+ return rc;
+
+ pr_notice("Platform Keyring initialized\n");
+ return 0;
+}
+
+/*
+ * Must be initialised before we try and load the keys into the keyring.
+ */
+device_initcall(platform_keyring_init);
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
new file mode 100644
index 000000000..abb03a1b2
--- /dev/null
+++ b/security/keys/Kconfig
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Key management configuration
+#
+
+config KEYS
+ bool "Enable access key retention support"
+ select ASSOCIATIVE_ARRAY
+ help
+ This option provides support for retaining authentication tokens and
+ access keys in the kernel.
+
+ It also includes provision of methods by which such keys might be
+ associated with a process so that network filesystems, encryption
+ support and the like can find them.
+
+ Furthermore, a special type of key is available that acts as keyring:
+ a searchable sequence of keys. Each process is equipped with access
+ to five standard keyrings: UID-specific, GID-specific, session,
+ process and thread.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEYS_REQUEST_CACHE
+ bool "Enable temporary caching of the last request_key() result"
+ depends on KEYS
+ help
+ This option causes the result of the last successful request_key()
+ call that didn't upcall to the kernel to be cached temporarily in the
+ task_struct. The cache is cleared by exit and just prior to the
+ resumption of userspace.
+
+ This allows the key used for multiple step processes where each step
+ wants to request a key that is likely the same as the one requested
+ by the last step to save on the searching.
+
+ An example of such a process is a pathwalk through a network
+ filesystem in which each method needs to request an authentication
+ key. Pathwalk will call multiple methods for each dentry traversed
+ (permission, d_revalidate, lookup, getxattr, getacl, ...).
+
+config PERSISTENT_KEYRINGS
+ bool "Enable register of persistent per-UID keyrings"
+ depends on KEYS
+ help
+ This option provides a register of persistent per-UID keyrings,
+ primarily aimed at Kerberos key storage. The keyrings are persistent
+ in the sense that they stay around after all processes of that UID
+ have exited, not that they survive the machine being rebooted.
+
+ A particular keyring may be accessed by either the user whose keyring
+ it is or by a process with administrative privileges. The active
+ LSMs gets to rule on which admin-level processes get to access the
+ cache.
+
+ Keyrings are created and added into the register upon demand and get
+ removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+ depends on CRYPTO_LIB_CHACHA20POLY1305 = y
+ help
+ This option provides support for holding large keys within the kernel
+ (for example Kerberos ticket caches). The data may be stored out to
+ swapspace by tmpfs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config TRUSTED_KEYS
+ tristate "TRUSTED KEYS"
+ depends on KEYS
+ help
+ This option provides support for creating, sealing, and unsealing
+ keys in the kernel. Trusted keys are random number symmetric keys,
+ generated and sealed by a trust source selected at kernel boot-time.
+ Userspace will only ever see encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+if TRUSTED_KEYS
+source "security/keys/trusted-keys/Kconfig"
+endif
+
+config ENCRYPTED_KEYS
+ tristate "ENCRYPTED KEYS"
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_SHA256
+ select CRYPTO_RNG
+ help
+ This option provides support for create/encrypting/decrypting keys
+ in the kernel. Encrypted keys are instantiated using kernel
+ generated random numbers or provided decrypted data, and are
+ encrypted/decrypted with a 'master' symmetric key. The 'master'
+ key can be either a trusted-key or user-key type. Only encrypted
+ blobs are ever output to Userspace.
+
+ If you are unsure as to whether this is required, answer N.
+
+config USER_DECRYPTED_DATA
+ bool "Allow encrypted keys with user decrypted data"
+ depends on ENCRYPTED_KEYS
+ help
+ This option provides support for instantiating encrypted keys using
+ user-provided decrypted data. The decrypted data must be hex-ascii
+ encoded.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEY_DH_OPERATIONS
+ bool "Diffie-Hellman operations on retained keys"
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_KDF800108_CTR
+ select CRYPTO_DH
+ help
+ This option provides support for calculating Diffie-Hellman
+ public keys and shared secrets using values stored as keys
+ in the kernel.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEY_NOTIFICATIONS
+ bool "Provide key/keyring change notifications"
+ depends on KEYS && WATCH_QUEUE
+ help
+ This option provides support for getting change notifications
+ on keys and keyrings on which the caller has View permission.
+ This makes use of pipes to handle the notification buffer and
+ provides KEYCTL_WATCH_KEY to enable/disable watches.
diff --git a/security/keys/Makefile b/security/keys/Makefile
new file mode 100644
index 000000000..5f40807f0
--- /dev/null
+++ b/security/keys/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for key management
+#
+
+#
+# Core
+#
+obj-y := \
+ gc.o \
+ key.o \
+ keyring.o \
+ keyctl.o \
+ permission.o \
+ process_keys.o \
+ request_key.o \
+ request_key_auth.o \
+ user_defined.o
+compat-obj-$(CONFIG_KEY_DH_OPERATIONS) += compat_dh.o
+obj-$(CONFIG_COMPAT) += compat.o $(compat-obj-y)
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
+obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o
+obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
+
+#
+# Key types
+#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted-keys/
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 000000000..c3367622c
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Large capacity key type
+ *
+ * Copyright (C) 2017-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) "big_key: "fmt
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+#include <crypto/chacha20poly1305.h>
+
+/*
+ * Layout of key payload words.
+ */
+struct big_key_payload {
+ u8 *data;
+ struct path path;
+ size_t length;
+};
+#define to_big_key_payload(payload) \
+ (struct big_key_payload *)((payload).data)
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+ .name = "big_key",
+ .preparse = big_key_preparse,
+ .free_preparse = big_key_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .revoke = big_key_revoke,
+ .destroy = big_key_destroy,
+ .describe = big_key_describe,
+ .read = big_key_read,
+ .update = big_key_update,
+};
+
+/*
+ * Preparse a big key
+ */
+int big_key_preparse(struct key_preparsed_payload *prep)
+{
+ struct big_key_payload *payload = to_big_key_payload(prep->payload);
+ struct file *file;
+ u8 *buf, *enckey;
+ ssize_t written;
+ size_t datalen = prep->datalen;
+ size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(*payload) != sizeof(prep->payload.data));
+
+ if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+ return -EINVAL;
+
+ /* Set an arbitrary quota */
+ prep->quotalen = 16;
+
+ payload->length = datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ /* Create a shmem file to store the data in. This will permit the data
+ * to be swapped out if needed.
+ *
+ * File content is stored encrypted with randomly generated key.
+ * Since the key is random for each file, we can set the nonce
+ * to zero, provided we never define a ->update() call.
+ */
+ loff_t pos = 0;
+
+ buf = kvmalloc(enclen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* generate random key */
+ enckey = kmalloc(CHACHA20POLY1305_KEY_SIZE, GFP_KERNEL);
+ if (!enckey) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = get_random_bytes_wait(enckey, CHACHA20POLY1305_KEY_SIZE);
+ if (unlikely(ret))
+ goto err_enckey;
+
+ /* encrypt data */
+ chacha20poly1305_encrypt(buf, prep->data, datalen, NULL, 0,
+ 0, enckey);
+
+ /* save aligned data to file */
+ file = shmem_kernel_file_setup("", enclen, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_enckey;
+ }
+
+ written = kernel_write(file, buf, enclen, &pos);
+ if (written != enclen) {
+ ret = written;
+ if (written >= 0)
+ ret = -EIO;
+ goto err_fput;
+ }
+
+ /* Pin the mount and dentry to the key so that we can open it again
+ * later
+ */
+ payload->data = enckey;
+ payload->path = file->f_path;
+ path_get(&payload->path);
+ fput(file);
+ kvfree_sensitive(buf, enclen);
+ } else {
+ /* Just store the data in a buffer */
+ void *data = kmalloc(datalen, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ payload->data = data;
+ memcpy(data, prep->data, prep->datalen);
+ }
+ return 0;
+
+err_fput:
+ fput(file);
+err_enckey:
+ kfree_sensitive(enckey);
+error:
+ kvfree_sensitive(buf, enclen);
+ return ret;
+}
+
+/*
+ * Clear preparsement.
+ */
+void big_key_free_preparse(struct key_preparsed_payload *prep)
+{
+ struct big_key_payload *payload = to_big_key_payload(prep->payload);
+
+ if (prep->datalen > BIG_KEY_FILE_THRESHOLD)
+ path_put(&payload->path);
+ kfree_sensitive(payload->data);
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+ if (key_is_positive(key) && payload->length > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(&payload->path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
+
+ if (payload->length > BIG_KEY_FILE_THRESHOLD) {
+ path_put(&payload->path);
+ payload->path.mnt = NULL;
+ payload->path.dentry = NULL;
+ }
+ kfree_sensitive(payload->data);
+ payload->data = NULL;
+}
+
+/*
+ * Update a big key
+ */
+int big_key_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ int ret;
+
+ ret = key_payload_reserve(key, prep->datalen);
+ if (ret < 0)
+ return ret;
+
+ if (key_is_positive(key))
+ big_key_destroy(key);
+
+ return generic_key_instantiate(key, prep);
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
+
+ seq_puts(m, key->description);
+
+ if (key_is_positive(key))
+ seq_printf(m, ": %zu [%s]",
+ payload->length,
+ payload->length > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char *buffer, size_t buflen)
+{
+ struct big_key_payload *payload = to_big_key_payload(key->payload);
+ size_t datalen = payload->length;
+ long ret;
+
+ if (!buffer || buflen < datalen)
+ return datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct file *file;
+ u8 *buf, *enckey = payload->data;
+ size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
+ loff_t pos = 0;
+
+ buf = kvmalloc(enclen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ file = dentry_open(&payload->path, O_RDONLY, current_cred());
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto error;
+ }
+
+ /* read file to kernel and decrypt */
+ ret = kernel_read(file, buf, enclen, &pos);
+ if (ret != enclen) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_fput;
+ }
+
+ ret = chacha20poly1305_decrypt(buf, buf, enclen, NULL, 0, 0,
+ enckey) ? 0 : -EBADMSG;
+ if (unlikely(ret))
+ goto err_fput;
+
+ ret = datalen;
+
+ /* copy out decrypted data */
+ memcpy(buffer, buf, datalen);
+
+err_fput:
+ fput(file);
+error:
+ kvfree_sensitive(buf, enclen);
+ } else {
+ ret = datalen;
+ memcpy(buffer, payload->data, datalen);
+ }
+
+ return ret;
+}
+
+/*
+ * Register key type
+ */
+static int __init big_key_init(void)
+{
+ return register_key_type(&key_type_big_key);
+}
+
+late_initcall(big_key_init);
diff --git a/security/keys/compat.c b/security/keys/compat.c
new file mode 100644
index 000000000..1545efdca
--- /dev/null
+++ b/security/keys/compat.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/syscalls.h>
+#include <linux/keyctl.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ */
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+ u32, arg2, u32, arg3, u32, arg4, u32, arg5)
+{
+ switch (option) {
+ case KEYCTL_GET_KEYRING_ID:
+ return keyctl_get_keyring_ID(arg2, arg3);
+
+ case KEYCTL_JOIN_SESSION_KEYRING:
+ return keyctl_join_session_keyring(compat_ptr(arg2));
+
+ case KEYCTL_UPDATE:
+ return keyctl_update_key(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_REVOKE:
+ return keyctl_revoke_key(arg2);
+
+ case KEYCTL_DESCRIBE:
+ return keyctl_describe_key(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_CLEAR:
+ return keyctl_keyring_clear(arg2);
+
+ case KEYCTL_LINK:
+ return keyctl_keyring_link(arg2, arg3);
+
+ case KEYCTL_UNLINK:
+ return keyctl_keyring_unlink(arg2, arg3);
+
+ case KEYCTL_SEARCH:
+ return keyctl_keyring_search(arg2, compat_ptr(arg3),
+ compat_ptr(arg4), arg5);
+
+ case KEYCTL_READ:
+ return keyctl_read_key(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_CHOWN:
+ return keyctl_chown_key(arg2, arg3, arg4);
+
+ case KEYCTL_SETPERM:
+ return keyctl_setperm_key(arg2, arg3);
+
+ case KEYCTL_INSTANTIATE:
+ return keyctl_instantiate_key(arg2, compat_ptr(arg3), arg4,
+ arg5);
+
+ case KEYCTL_NEGATE:
+ return keyctl_negate_key(arg2, arg3, arg4);
+
+ case KEYCTL_SET_REQKEY_KEYRING:
+ return keyctl_set_reqkey_keyring(arg2);
+
+ case KEYCTL_SET_TIMEOUT:
+ return keyctl_set_timeout(arg2, arg3);
+
+ case KEYCTL_ASSUME_AUTHORITY:
+ return keyctl_assume_authority(arg2);
+
+ case KEYCTL_GET_SECURITY:
+ return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_SESSION_TO_PARENT:
+ return keyctl_session_to_parent();
+
+ case KEYCTL_REJECT:
+ return keyctl_reject_key(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_INSTANTIATE_IOV:
+ return keyctl_instantiate_key_iov(arg2, compat_ptr(arg3), arg4,
+ arg5);
+
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key(arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent(arg2, arg3);
+
+ case KEYCTL_DH_COMPUTE:
+ return compat_keyctl_dh_compute(compat_ptr(arg2),
+ compat_ptr(arg3),
+ arg4, compat_ptr(arg5));
+
+ case KEYCTL_RESTRICT_KEYRING:
+ return keyctl_restrict_keyring(arg2, compat_ptr(arg3),
+ compat_ptr(arg4));
+
+ case KEYCTL_PKEY_QUERY:
+ if (arg3 != 0)
+ return -EINVAL;
+ return keyctl_pkey_query(arg2,
+ compat_ptr(arg4),
+ compat_ptr(arg5));
+
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ case KEYCTL_PKEY_SIGN:
+ return keyctl_pkey_e_d_s(option,
+ compat_ptr(arg2), compat_ptr(arg3),
+ compat_ptr(arg4), compat_ptr(arg5));
+
+ case KEYCTL_PKEY_VERIFY:
+ return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3),
+ compat_ptr(arg4), compat_ptr(arg5));
+
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities(compat_ptr(arg2), arg3);
+
+ case KEYCTL_WATCH_KEY:
+ return keyctl_watch_key(arg2, arg3, arg4);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/security/keys/compat_dh.c b/security/keys/compat_dh.c
new file mode 100644
index 000000000..19384e7e9
--- /dev/null
+++ b/security/keys/compat_dh.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* 32-bit compatibility syscall for 64-bit systems for DH operations
+ *
+ * Copyright (C) 2016 Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/uaccess.h>
+
+#include "internal.h"
+
+/*
+ * Perform the DH computation or DH based key derivation.
+ *
+ * If successful, 0 will be returned.
+ */
+long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct compat_keyctl_kdf_params __user *kdf)
+{
+ struct keyctl_kdf_params kdfcopy;
+ struct compat_keyctl_kdf_params compat_kdfcopy;
+
+ if (!kdf)
+ return __keyctl_dh_compute(params, buffer, buflen, NULL);
+
+ if (copy_from_user(&compat_kdfcopy, kdf, sizeof(compat_kdfcopy)) != 0)
+ return -EFAULT;
+
+ kdfcopy.hashname = compat_ptr(compat_kdfcopy.hashname);
+ kdfcopy.otherinfo = compat_ptr(compat_kdfcopy.otherinfo);
+ kdfcopy.otherinfolen = compat_kdfcopy.otherinfolen;
+ memcpy(kdfcopy.__spare, compat_kdfcopy.__spare,
+ sizeof(kdfcopy.__spare));
+
+ return __keyctl_dh_compute(params, buffer, buflen, &kdfcopy);
+}
diff --git a/security/keys/dh.c b/security/keys/dh.c
new file mode 100644
index 000000000..b339760a3
--- /dev/null
+++ b/security/keys/dh.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Crypto operations using stored keys
+ *
+ * Copyright (c) 2016, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include <crypto/dh.h>
+#include <crypto/kdf_sp800108.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static ssize_t dh_data_from_key(key_serial_t keyid, const void **data)
+{
+ struct key *key;
+ key_ref_t key_ref;
+ long status;
+ ssize_t ret;
+
+ key_ref = lookup_user_key(keyid, 0, KEY_NEED_READ);
+ if (IS_ERR(key_ref)) {
+ ret = -ENOKEY;
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ ret = -EOPNOTSUPP;
+ if (key->type == &key_type_user) {
+ down_read(&key->sem);
+ status = key_validate(key);
+ if (status == 0) {
+ const struct user_key_payload *payload;
+ uint8_t *duplicate;
+
+ payload = user_key_payload_locked(key);
+
+ duplicate = kmemdup(payload->data, payload->datalen,
+ GFP_KERNEL);
+ if (duplicate) {
+ *data = duplicate;
+ ret = payload->datalen;
+ } else {
+ ret = -ENOMEM;
+ }
+ }
+ up_read(&key->sem);
+ }
+
+ key_put(key);
+error:
+ return ret;
+}
+
+static void dh_free_data(struct dh *dh)
+{
+ kfree_sensitive(dh->key);
+ kfree_sensitive(dh->p);
+ kfree_sensitive(dh->g);
+}
+
+struct dh_completion {
+ struct completion completion;
+ int err;
+};
+
+static void dh_crypto_done(struct crypto_async_request *req, int err)
+{
+ struct dh_completion *compl = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ compl->err = err;
+ complete(&compl->completion);
+}
+
+static int kdf_alloc(struct crypto_shash **hash, char *hashname)
+{
+ struct crypto_shash *tfm;
+
+ /* allocate synchronous hash */
+ tfm = crypto_alloc_shash(hashname, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_info("could not allocate digest TFM handle %s\n", hashname);
+ return PTR_ERR(tfm);
+ }
+
+ if (crypto_shash_digestsize(tfm) == 0) {
+ crypto_free_shash(tfm);
+ return -EINVAL;
+ }
+
+ *hash = tfm;
+
+ return 0;
+}
+
+static void kdf_dealloc(struct crypto_shash *hash)
+{
+ if (hash)
+ crypto_free_shash(hash);
+}
+
+static int keyctl_dh_compute_kdf(struct crypto_shash *hash,
+ char __user *buffer, size_t buflen,
+ uint8_t *kbuf, size_t kbuflen)
+{
+ struct kvec kbuf_iov = { .iov_base = kbuf, .iov_len = kbuflen };
+ uint8_t *outbuf = NULL;
+ int ret;
+ size_t outbuf_len = roundup(buflen, crypto_shash_digestsize(hash));
+
+ outbuf = kmalloc(outbuf_len, GFP_KERNEL);
+ if (!outbuf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = crypto_kdf108_ctr_generate(hash, &kbuf_iov, 1, outbuf, outbuf_len);
+ if (ret)
+ goto err;
+
+ ret = buflen;
+ if (copy_to_user(buffer, outbuf, buflen) != 0)
+ ret = -EFAULT;
+
+err:
+ kfree_sensitive(outbuf);
+ return ret;
+}
+
+long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params *kdfcopy)
+{
+ long ret;
+ ssize_t dlen;
+ int secretlen;
+ int outlen;
+ struct keyctl_dh_params pcopy;
+ struct dh dh_inputs;
+ struct scatterlist outsg;
+ struct dh_completion compl;
+ struct crypto_kpp *tfm;
+ struct kpp_request *req;
+ uint8_t *secret;
+ uint8_t *outbuf;
+ struct crypto_shash *hash = NULL;
+
+ if (!params || (!buffer && buflen)) {
+ ret = -EINVAL;
+ goto out1;
+ }
+ if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
+ ret = -EFAULT;
+ goto out1;
+ }
+
+ if (kdfcopy) {
+ char *hashname;
+
+ if (memchr_inv(kdfcopy->__spare, 0, sizeof(kdfcopy->__spare))) {
+ ret = -EINVAL;
+ goto out1;
+ }
+
+ if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
+ kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
+ ret = -EMSGSIZE;
+ goto out1;
+ }
+
+ /* get KDF name string */
+ hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
+ if (IS_ERR(hashname)) {
+ ret = PTR_ERR(hashname);
+ goto out1;
+ }
+
+ /* allocate KDF from the kernel crypto API */
+ ret = kdf_alloc(&hash, hashname);
+ kfree(hashname);
+ if (ret)
+ goto out1;
+ }
+
+ memset(&dh_inputs, 0, sizeof(dh_inputs));
+
+ dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out1;
+ }
+ dh_inputs.p_size = dlen;
+
+ dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out2;
+ }
+ dh_inputs.g_size = dlen;
+
+ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out2;
+ }
+ dh_inputs.key_size = dlen;
+
+ secretlen = crypto_dh_key_len(&dh_inputs);
+ secret = kmalloc(secretlen, GFP_KERNEL);
+ if (!secret) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+ ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
+ if (ret)
+ goto out3;
+
+ tfm = crypto_alloc_kpp("dh", 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out3;
+ }
+
+ ret = crypto_kpp_set_secret(tfm, secret, secretlen);
+ if (ret)
+ goto out4;
+
+ outlen = crypto_kpp_maxsize(tfm);
+
+ if (!kdfcopy) {
+ /*
+ * When not using a KDF, buflen 0 is used to read the
+ * required buffer length
+ */
+ if (buflen == 0) {
+ ret = outlen;
+ goto out4;
+ } else if (outlen > buflen) {
+ ret = -EOVERFLOW;
+ goto out4;
+ }
+ }
+
+ outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
+ GFP_KERNEL);
+ if (!outbuf) {
+ ret = -ENOMEM;
+ goto out4;
+ }
+
+ sg_init_one(&outsg, outbuf, outlen);
+
+ req = kpp_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out5;
+ }
+
+ kpp_request_set_input(req, NULL, 0);
+ kpp_request_set_output(req, &outsg, outlen);
+ init_completion(&compl.completion);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ dh_crypto_done, &compl);
+
+ /*
+ * For DH, generate_public_key and generate_shared_secret are
+ * the same calculation
+ */
+ ret = crypto_kpp_generate_public_key(req);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&compl.completion);
+ ret = compl.err;
+ if (ret)
+ goto out6;
+ }
+
+ if (kdfcopy) {
+ /*
+ * Concatenate SP800-56A otherinfo past DH shared secret -- the
+ * input to the KDF is (DH shared secret || otherinfo)
+ */
+ if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
+ kdfcopy->otherinfolen) != 0) {
+ ret = -EFAULT;
+ goto out6;
+ }
+
+ ret = keyctl_dh_compute_kdf(hash, buffer, buflen, outbuf,
+ req->dst_len + kdfcopy->otherinfolen);
+ } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
+ ret = req->dst_len;
+ } else {
+ ret = -EFAULT;
+ }
+
+out6:
+ kpp_request_free(req);
+out5:
+ kfree_sensitive(outbuf);
+out4:
+ crypto_free_kpp(tfm);
+out3:
+ kfree_sensitive(secret);
+out2:
+ dh_free_data(&dh_inputs);
+out1:
+ kdf_dealloc(hash);
+ return ret;
+}
+
+long keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params __user *kdf)
+{
+ struct keyctl_kdf_params kdfcopy;
+
+ if (!kdf)
+ return __keyctl_dh_compute(params, buffer, buflen, NULL);
+
+ if (copy_from_user(&kdfcopy, kdf, sizeof(kdfcopy)) != 0)
+ return -EFAULT;
+
+ return __keyctl_dh_compute(params, buffer, buflen, &kdfcopy);
+}
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 000000000..7a44dce6f
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
+
+encrypted-keys-y := encrypted.o ecryptfs_format.o
+masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
+masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
+encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
new file mode 100644
index 000000000..8fdd76105
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ecryptfs_format.c: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+#include "ecryptfs_format.h"
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok)
+{
+ return auth_tok->token.password.session_key_encryption_key;
+}
+EXPORT_SYMBOL(ecryptfs_get_auth_tok_key);
+
+/*
+ * ecryptfs_get_versions()
+ *
+ * Source code taken from the software 'ecryptfs-utils' version 83.
+ *
+ */
+void ecryptfs_get_versions(int *major, int *minor, int *file_version)
+{
+ *major = ECRYPTFS_VERSION_MAJOR;
+ *minor = ECRYPTFS_VERSION_MINOR;
+ if (file_version)
+ *file_version = ECRYPTFS_SUPPORTED_FILE_VERSION;
+}
+EXPORT_SYMBOL(ecryptfs_get_versions);
+
+/*
+ * ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure
+ *
+ * Fill the ecryptfs_auth_tok structure with required ecryptfs data.
+ * The source code is inspired to the original function generate_payload()
+ * shipped with the software 'ecryptfs-utils' version 83.
+ *
+ */
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+ const char *key_desc)
+{
+ int major, minor;
+
+ ecryptfs_get_versions(&major, &minor, NULL);
+ auth_tok->version = (((uint16_t)(major << 8) & 0xFF00)
+ | ((uint16_t)minor & 0x00FF));
+ auth_tok->token_type = ECRYPTFS_PASSWORD;
+ strncpy((char *)auth_tok->token.password.signature, key_desc,
+ ECRYPTFS_PASSWORD_SIG_SIZE);
+ auth_tok->token.password.session_key_encryption_key_bytes =
+ ECRYPTFS_MAX_KEY_BYTES;
+ /*
+ * Removed auth_tok->token.password.salt and
+ * auth_tok->token.password.session_key_encryption_key
+ * initialization from the original code
+ */
+ /* TODO: Make the hash parameterizable via policy */
+ auth_tok->token.password.flags |=
+ ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET;
+ /* The kernel code will encrypt the session key. */
+ auth_tok->session_key.encrypted_key[0] = 0;
+ auth_tok->session_key.encrypted_key_size = 0;
+ /* Default; subject to change by kernel eCryptfs */
+ auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512;
+ auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD);
+ return 0;
+}
+EXPORT_SYMBOL(ecryptfs_fill_auth_tok);
diff --git a/security/keys/encrypted-keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
new file mode 100644
index 000000000..ed8466578
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ecryptfs_format.h: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ */
+
+#ifndef __KEYS_ECRYPTFS_H
+#define __KEYS_ECRYPTFS_H
+
+#include <linux/ecryptfs.h>
+
+#define PGP_DIGEST_ALGO_SHA512 10
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok);
+void ecryptfs_get_versions(int *major, int *minor, int *file_version);
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+ const char *key_desc);
+
+#endif /* __KEYS_ECRYPTFS_H */
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
new file mode 100644
index 000000000..1e313982a
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/key-type.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/scatterlist.h>
+#include <linux/ctype.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha2.h>
+#include <crypto/skcipher.h>
+
+#include "encrypted.h"
+#include "ecryptfs_format.h"
+
+static const char KEY_TRUSTED_PREFIX[] = "trusted:";
+static const char KEY_USER_PREFIX[] = "user:";
+static const char hash_alg[] = "sha256";
+static const char hmac_alg[] = "hmac(sha256)";
+static const char blkcipher_alg[] = "cbc(aes)";
+static const char key_format_default[] = "default";
+static const char key_format_ecryptfs[] = "ecryptfs";
+static const char key_format_enc32[] = "enc32";
+static unsigned int ivsize;
+static int blksize;
+
+#define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1)
+#define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1)
+#define KEY_ECRYPTFS_DESC_LEN 16
+#define HASH_SIZE SHA256_DIGEST_SIZE
+#define MAX_DATA_SIZE 4096
+#define MIN_DATA_SIZE 20
+#define KEY_ENC32_PAYLOAD_LEN 32
+
+static struct crypto_shash *hash_tfm;
+
+enum {
+ Opt_new, Opt_load, Opt_update, Opt_err
+};
+
+enum {
+ Opt_default, Opt_ecryptfs, Opt_enc32, Opt_error
+};
+
+static const match_table_t key_format_tokens = {
+ {Opt_default, "default"},
+ {Opt_ecryptfs, "ecryptfs"},
+ {Opt_enc32, "enc32"},
+ {Opt_error, NULL}
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_err, NULL}
+};
+
+static bool user_decrypted_data = IS_ENABLED(CONFIG_USER_DECRYPTED_DATA);
+module_param(user_decrypted_data, bool, 0);
+MODULE_PARM_DESC(user_decrypted_data,
+ "Allow instantiation of encrypted keys using provided decrypted data");
+
+static int aes_get_sizes(void)
+{
+ struct crypto_skcipher *tfm;
+
+ tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ ivsize = crypto_skcipher_ivsize(tfm);
+ blksize = crypto_skcipher_blocksize(tfm);
+ crypto_free_skcipher(tfm);
+ return 0;
+}
+
+/*
+ * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key
+ *
+ * The description of a encrypted key with format 'ecryptfs' must contain
+ * exactly 16 hexadecimal characters.
+ *
+ */
+static int valid_ecryptfs_desc(const char *ecryptfs_desc)
+{
+ int i;
+
+ if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) {
+ pr_err("encrypted_key: key description must be %d hexadecimal "
+ "characters long\n", KEY_ECRYPTFS_DESC_LEN);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) {
+ if (!isxdigit(ecryptfs_desc[i])) {
+ pr_err("encrypted_key: key description must contain "
+ "only hexadecimal characters\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key
+ *
+ * key-type:= "trusted:" | "user:"
+ * desc:= master-key description
+ *
+ * Verify that 'key-type' is valid and that 'desc' exists. On key update,
+ * only the master key description is permitted to change, not the key-type.
+ * The key-type remains constant.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int valid_master_desc(const char *new_desc, const char *orig_desc)
+{
+ int prefix_len;
+
+ if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
+ prefix_len = KEY_TRUSTED_PREFIX_LEN;
+ else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
+ prefix_len = KEY_USER_PREFIX_LEN;
+ else
+ return -EINVAL;
+
+ if (!new_desc[prefix_len])
+ return -EINVAL;
+
+ if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * datablob_parse - parse the keyctl data
+ *
+ * datablob format:
+ * new [<format>] <master-key name> <decrypted data length> [<decrypted data>]
+ * load [<format>] <master-key name> <decrypted data length>
+ * <encrypted iv + data>
+ * update <new-master-key name>
+ *
+ * Tokenizes a copy of the keyctl data, returning a pointer to each token,
+ * which is null terminated.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, const char **format,
+ char **master_desc, char **decrypted_datalen,
+ char **hex_encoded_iv, char **decrypted_data)
+{
+ substring_t args[MAX_OPT_ARGS];
+ int ret = -EINVAL;
+ int key_cmd;
+ int key_format;
+ char *p, *keyword;
+
+ keyword = strsep(&datablob, " \t");
+ if (!keyword) {
+ pr_info("encrypted_key: insufficient parameters specified\n");
+ return ret;
+ }
+ key_cmd = match_token(keyword, key_tokens, args);
+
+ /* Get optional format: default | ecryptfs */
+ p = strsep(&datablob, " \t");
+ if (!p) {
+ pr_err("encrypted_key: insufficient parameters specified\n");
+ return ret;
+ }
+
+ key_format = match_token(p, key_format_tokens, args);
+ switch (key_format) {
+ case Opt_ecryptfs:
+ case Opt_enc32:
+ case Opt_default:
+ *format = p;
+ *master_desc = strsep(&datablob, " \t");
+ break;
+ case Opt_error:
+ *master_desc = p;
+ break;
+ }
+
+ if (!*master_desc) {
+ pr_info("encrypted_key: master key parameter is missing\n");
+ goto out;
+ }
+
+ if (valid_master_desc(*master_desc, NULL) < 0) {
+ pr_info("encrypted_key: master key parameter \'%s\' "
+ "is invalid\n", *master_desc);
+ goto out;
+ }
+
+ if (decrypted_datalen) {
+ *decrypted_datalen = strsep(&datablob, " \t");
+ if (!*decrypted_datalen) {
+ pr_info("encrypted_key: keylen parameter is missing\n");
+ goto out;
+ }
+ }
+
+ switch (key_cmd) {
+ case Opt_new:
+ if (!decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .update method\n", keyword);
+ break;
+ }
+ *decrypted_data = strsep(&datablob, " \t");
+ ret = 0;
+ break;
+ case Opt_load:
+ if (!decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .update method\n", keyword);
+ break;
+ }
+ *hex_encoded_iv = strsep(&datablob, " \t");
+ if (!*hex_encoded_iv) {
+ pr_info("encrypted_key: hex blob is missing\n");
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_update:
+ if (decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .instantiate method\n",
+ keyword);
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_err:
+ pr_info("encrypted_key: keyword \'%s\' not recognized\n",
+ keyword);
+ break;
+ }
+out:
+ return ret;
+}
+
+/*
+ * datablob_format - format as an ascii string, before copying to userspace
+ */
+static char *datablob_format(struct encrypted_key_payload *epayload,
+ size_t asciiblob_len)
+{
+ char *ascii_buf, *bufp;
+ u8 *iv = epayload->iv;
+ int len;
+ int i;
+
+ ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL);
+ if (!ascii_buf)
+ goto out;
+
+ ascii_buf[asciiblob_len] = '\0';
+
+ /* copy datablob master_desc and datalen strings */
+ len = sprintf(ascii_buf, "%s %s %s ", epayload->format,
+ epayload->master_desc, epayload->datalen);
+
+ /* convert the hex encoded iv, encrypted-data and HMAC to ascii */
+ bufp = &ascii_buf[len];
+ for (i = 0; i < (asciiblob_len - len) / 2; i++)
+ bufp = hex_byte_pack(bufp, iv[i]);
+out:
+ return ascii_buf;
+}
+
+/*
+ * request_user_key - request the user key
+ *
+ * Use a user provided key to encrypt/decrypt an encrypted-key.
+ */
+static struct key *request_user_key(const char *master_desc, const u8 **master_key,
+ size_t *master_keylen)
+{
+ const struct user_key_payload *upayload;
+ struct key *ukey;
+
+ ukey = request_key(&key_type_user, master_desc, NULL);
+ if (IS_ERR(ukey))
+ goto error;
+
+ down_read(&ukey->sem);
+ upayload = user_key_payload_locked(ukey);
+ if (!upayload) {
+ /* key was revoked before we acquired its semaphore */
+ up_read(&ukey->sem);
+ key_put(ukey);
+ ukey = ERR_PTR(-EKEYREVOKED);
+ goto error;
+ }
+ *master_key = upayload->data;
+ *master_keylen = upayload->datalen;
+error:
+ return ukey;
+}
+
+static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+ const u8 *buf, unsigned int buflen)
+{
+ struct crypto_shash *tfm;
+ int err;
+
+ tfm = crypto_alloc_shash(hmac_alg, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: can't alloc %s transform: %ld\n",
+ hmac_alg, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+
+ err = crypto_shash_setkey(tfm, key, keylen);
+ if (!err)
+ err = crypto_shash_tfm_digest(tfm, buf, buflen, digest);
+ crypto_free_shash(tfm);
+ return err;
+}
+
+enum derived_key_type { ENC_KEY, AUTH_KEY };
+
+/* Derive authentication/encryption key from trusted key */
+static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
+ const u8 *master_key, size_t master_keylen)
+{
+ u8 *derived_buf;
+ unsigned int derived_buf_len;
+ int ret;
+
+ derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen;
+ if (derived_buf_len < HASH_SIZE)
+ derived_buf_len = HASH_SIZE;
+
+ derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
+ if (!derived_buf)
+ return -ENOMEM;
+
+ if (key_type)
+ strcpy(derived_buf, "AUTH_KEY");
+ else
+ strcpy(derived_buf, "ENC_KEY");
+
+ memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
+ master_keylen);
+ ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len,
+ derived_key);
+ kfree_sensitive(derived_buf);
+ return ret;
+}
+
+static struct skcipher_request *init_skcipher_req(const u8 *key,
+ unsigned int key_len)
+{
+ struct skcipher_request *req;
+ struct crypto_skcipher *tfm;
+ int ret;
+
+ tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: failed to load %s transform (%ld)\n",
+ blkcipher_alg, PTR_ERR(tfm));
+ return ERR_CAST(tfm);
+ }
+
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0) {
+ pr_err("encrypted_key: failed to setkey (%d)\n", ret);
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(ret);
+ }
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("encrypted_key: failed to allocate request for %s\n",
+ blkcipher_alg);
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ return req;
+}
+
+static struct key *request_master_key(struct encrypted_key_payload *epayload,
+ const u8 **master_key, size_t *master_keylen)
+{
+ struct key *mkey = ERR_PTR(-EINVAL);
+
+ if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+ KEY_TRUSTED_PREFIX_LEN)) {
+ mkey = request_trusted_key(epayload->master_desc +
+ KEY_TRUSTED_PREFIX_LEN,
+ master_key, master_keylen);
+ } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX,
+ KEY_USER_PREFIX_LEN)) {
+ mkey = request_user_key(epayload->master_desc +
+ KEY_USER_PREFIX_LEN,
+ master_key, master_keylen);
+ } else
+ goto out;
+
+ if (IS_ERR(mkey)) {
+ int ret = PTR_ERR(mkey);
+
+ if (ret == -ENOTSUPP)
+ pr_info("encrypted_key: key %s not supported",
+ epayload->master_desc);
+ else
+ pr_info("encrypted_key: key %s not found",
+ epayload->master_desc);
+ goto out;
+ }
+
+ dump_master_key(*master_key, *master_keylen);
+out:
+ return mkey;
+}
+
+/* Before returning data to userspace, encrypt decrypted data. */
+static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+ const u8 *derived_key,
+ unsigned int derived_keylen)
+{
+ struct scatterlist sg_in[2];
+ struct scatterlist sg_out[1];
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ unsigned int encrypted_datalen;
+ u8 iv[AES_BLOCK_SIZE];
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+
+ req = init_skcipher_req(derived_key, derived_keylen);
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+ dump_decrypted_data(epayload);
+
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
+
+ sg_init_table(sg_out, 1);
+ sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
+
+ memcpy(iv, epayload->iv, sizeof(iv));
+ skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
+ ret = crypto_skcipher_encrypt(req);
+ tfm = crypto_skcipher_reqtfm(req);
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ if (ret < 0)
+ pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
+ else
+ dump_encrypted_data(epayload, encrypted_datalen);
+out:
+ return ret;
+}
+
+static int datablob_hmac_append(struct encrypted_key_payload *epayload,
+ const u8 *master_key, size_t master_keylen)
+{
+ u8 derived_key[HASH_SIZE];
+ u8 *digest;
+ int ret;
+
+ ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ digest = epayload->format + epayload->datablob_len;
+ ret = calc_hmac(digest, derived_key, sizeof derived_key,
+ epayload->format, epayload->datablob_len);
+ if (!ret)
+ dump_hmac(NULL, digest, HASH_SIZE);
+out:
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+/* verify HMAC before decrypting encrypted key */
+static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
+ const u8 *format, const u8 *master_key,
+ size_t master_keylen)
+{
+ u8 derived_key[HASH_SIZE];
+ u8 digest[HASH_SIZE];
+ int ret;
+ char *p;
+ unsigned short len;
+
+ ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ len = epayload->datablob_len;
+ if (!format) {
+ p = epayload->master_desc;
+ len -= strlen(epayload->format) + 1;
+ } else
+ p = epayload->format;
+
+ ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
+ if (ret < 0)
+ goto out;
+ ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
+ sizeof(digest));
+ if (ret) {
+ ret = -EINVAL;
+ dump_hmac("datablob",
+ epayload->format + epayload->datablob_len,
+ HASH_SIZE);
+ dump_hmac("calc", digest, HASH_SIZE);
+ }
+out:
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+ const u8 *derived_key,
+ unsigned int derived_keylen)
+{
+ struct scatterlist sg_in[1];
+ struct scatterlist sg_out[2];
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ unsigned int encrypted_datalen;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 *pad;
+ int ret;
+
+ /* Throwaway buffer to hold the unused zero padding at the end */
+ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+ if (!pad)
+ return -ENOMEM;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ req = init_skcipher_req(derived_key, derived_keylen);
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+ dump_encrypted_data(epayload, encrypted_datalen);
+
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+ sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
+ sg_set_buf(&sg_out[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
+
+ memcpy(iv, epayload->iv, sizeof(iv));
+ skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
+ ret = crypto_skcipher_decrypt(req);
+ tfm = crypto_skcipher_reqtfm(req);
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ if (ret < 0)
+ goto out;
+ dump_decrypted_data(epayload);
+out:
+ kfree(pad);
+ return ret;
+}
+
+/* Allocate memory for decrypted key and datablob. */
+static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+ const char *format,
+ const char *master_desc,
+ const char *datalen,
+ const char *decrypted_data)
+{
+ struct encrypted_key_payload *epayload = NULL;
+ unsigned short datablob_len;
+ unsigned short decrypted_datalen;
+ unsigned short payload_datalen;
+ unsigned int encrypted_datalen;
+ unsigned int format_len;
+ long dlen;
+ int i;
+ int ret;
+
+ ret = kstrtol(datalen, 10, &dlen);
+ if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ format_len = (!format) ? strlen(key_format_default) : strlen(format);
+ decrypted_datalen = dlen;
+ payload_datalen = decrypted_datalen;
+
+ if (decrypted_data) {
+ if (!user_decrypted_data) {
+ pr_err("encrypted key: instantiation of keys using provided decrypted data is disabled since CONFIG_USER_DECRYPTED_DATA is set to false\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (strlen(decrypted_data) != decrypted_datalen * 2) {
+ pr_err("encrypted key: decrypted data provided does not match decrypted data length provided\n");
+ return ERR_PTR(-EINVAL);
+ }
+ for (i = 0; i < strlen(decrypted_data); i++) {
+ if (!isxdigit(decrypted_data[i])) {
+ pr_err("encrypted key: decrypted data provided must contain only hexadecimal characters\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ }
+
+ if (format) {
+ if (!strcmp(format, key_format_ecryptfs)) {
+ if (dlen != ECRYPTFS_MAX_KEY_BYTES) {
+ pr_err("encrypted_key: keylen for the ecryptfs format must be equal to %d bytes\n",
+ ECRYPTFS_MAX_KEY_BYTES);
+ return ERR_PTR(-EINVAL);
+ }
+ decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES;
+ payload_datalen = sizeof(struct ecryptfs_auth_tok);
+ } else if (!strcmp(format, key_format_enc32)) {
+ if (decrypted_datalen != KEY_ENC32_PAYLOAD_LEN) {
+ pr_err("encrypted_key: enc32 key payload incorrect length: %d\n",
+ decrypted_datalen);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ }
+
+ encrypted_datalen = roundup(decrypted_datalen, blksize);
+
+ datablob_len = format_len + 1 + strlen(master_desc) + 1
+ + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen;
+
+ ret = key_payload_reserve(key, payload_datalen + datablob_len
+ + HASH_SIZE + 1);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ epayload = kzalloc(sizeof(*epayload) + payload_datalen +
+ datablob_len + HASH_SIZE + 1, GFP_KERNEL);
+ if (!epayload)
+ return ERR_PTR(-ENOMEM);
+
+ epayload->payload_datalen = payload_datalen;
+ epayload->decrypted_datalen = decrypted_datalen;
+ epayload->datablob_len = datablob_len;
+ return epayload;
+}
+
+static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
+ const char *format, const char *hex_encoded_iv)
+{
+ struct key *mkey;
+ u8 derived_key[HASH_SIZE];
+ const u8 *master_key;
+ u8 *hmac;
+ const char *hex_encoded_data;
+ unsigned int encrypted_datalen;
+ size_t master_keylen;
+ size_t asciilen;
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2;
+ if (strlen(hex_encoded_iv) != asciilen)
+ return -EINVAL;
+
+ hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
+ ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+ if (ret < 0)
+ return -EINVAL;
+ ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+ encrypted_datalen);
+ if (ret < 0)
+ return -EINVAL;
+
+ hmac = epayload->format + epayload->datablob_len;
+ ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+ HASH_SIZE);
+ if (ret < 0)
+ return -EINVAL;
+
+ mkey = request_master_key(epayload, &master_key, &master_keylen);
+ if (IS_ERR(mkey))
+ return PTR_ERR(mkey);
+
+ ret = datablob_hmac_verify(epayload, format, master_key, master_keylen);
+ if (ret < 0) {
+ pr_err("encrypted_key: bad hmac (%d)\n", ret);
+ goto out;
+ }
+
+ ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key);
+ if (ret < 0)
+ pr_err("encrypted_key: failed to decrypt key (%d)\n", ret);
+out:
+ up_read(&mkey->sem);
+ key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+static void __ekey_init(struct encrypted_key_payload *epayload,
+ const char *format, const char *master_desc,
+ const char *datalen)
+{
+ unsigned int format_len;
+
+ format_len = (!format) ? strlen(key_format_default) : strlen(format);
+ epayload->format = epayload->payload_data + epayload->payload_datalen;
+ epayload->master_desc = epayload->format + format_len + 1;
+ epayload->datalen = epayload->master_desc + strlen(master_desc) + 1;
+ epayload->iv = epayload->datalen + strlen(datalen) + 1;
+ epayload->encrypted_data = epayload->iv + ivsize + 1;
+ epayload->decrypted_data = epayload->payload_data;
+
+ if (!format)
+ memcpy(epayload->format, key_format_default, format_len);
+ else {
+ if (!strcmp(format, key_format_ecryptfs))
+ epayload->decrypted_data =
+ ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data);
+
+ memcpy(epayload->format, format, format_len);
+ }
+
+ memcpy(epayload->master_desc, master_desc, strlen(master_desc));
+ memcpy(epayload->datalen, datalen, strlen(datalen));
+}
+
+/*
+ * encrypted_init - initialize an encrypted key
+ *
+ * For a new key, use either a random number or user-provided decrypted data in
+ * case it is provided. A random number is used for the iv in both cases. For
+ * an old key, decrypt the hex encoded data.
+ */
+static int encrypted_init(struct encrypted_key_payload *epayload,
+ const char *key_desc, const char *format,
+ const char *master_desc, const char *datalen,
+ const char *hex_encoded_iv, const char *decrypted_data)
+{
+ int ret = 0;
+
+ if (format && !strcmp(format, key_format_ecryptfs)) {
+ ret = valid_ecryptfs_desc(key_desc);
+ if (ret < 0)
+ return ret;
+
+ ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data,
+ key_desc);
+ }
+
+ __ekey_init(epayload, format, master_desc, datalen);
+ if (hex_encoded_iv) {
+ ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv);
+ } else if (decrypted_data) {
+ get_random_bytes(epayload->iv, ivsize);
+ ret = hex2bin(epayload->decrypted_data, decrypted_data,
+ epayload->decrypted_datalen);
+ } else {
+ get_random_bytes(epayload->iv, ivsize);
+ get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen);
+ }
+ return ret;
+}
+
+/*
+ * encrypted_instantiate - instantiate an encrypted key
+ *
+ * Instantiates the key:
+ * - by decrypting an existing encrypted datablob, or
+ * - by creating a new encrypted key based on a kernel random number, or
+ * - using provided decrypted data.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct encrypted_key_payload *epayload = NULL;
+ char *datablob = NULL;
+ const char *format = NULL;
+ char *master_desc = NULL;
+ char *decrypted_datalen = NULL;
+ char *hex_encoded_iv = NULL;
+ char *decrypted_data = NULL;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ datablob[datalen] = 0;
+ memcpy(datablob, prep->data, datalen);
+ ret = datablob_parse(datablob, &format, &master_desc,
+ &decrypted_datalen, &hex_encoded_iv, &decrypted_data);
+ if (ret < 0)
+ goto out;
+
+ epayload = encrypted_key_alloc(key, format, master_desc,
+ decrypted_datalen, decrypted_data);
+ if (IS_ERR(epayload)) {
+ ret = PTR_ERR(epayload);
+ goto out;
+ }
+ ret = encrypted_init(epayload, key->description, format, master_desc,
+ decrypted_datalen, hex_encoded_iv, decrypted_data);
+ if (ret < 0) {
+ kfree_sensitive(epayload);
+ goto out;
+ }
+
+ rcu_assign_keypointer(key, epayload);
+out:
+ kfree_sensitive(datablob);
+ return ret;
+}
+
+static void encrypted_rcu_free(struct rcu_head *rcu)
+{
+ struct encrypted_key_payload *epayload;
+
+ epayload = container_of(rcu, struct encrypted_key_payload, rcu);
+ kfree_sensitive(epayload);
+}
+
+/*
+ * encrypted_update - update the master key description
+ *
+ * Change the master key description for an existing encrypted key.
+ * The next read will return an encrypted datablob using the new
+ * master key description.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct encrypted_key_payload *epayload = key->payload.data[0];
+ struct encrypted_key_payload *new_epayload;
+ char *buf;
+ char *new_master_desc = NULL;
+ const char *format = NULL;
+ size_t datalen = prep->datalen;
+ int ret = 0;
+
+ if (key_is_negative(key))
+ return -ENOKEY;
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ buf = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[datalen] = 0;
+ memcpy(buf, prep->data, datalen);
+ ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = valid_master_desc(new_master_desc, epayload->master_desc);
+ if (ret < 0)
+ goto out;
+
+ new_epayload = encrypted_key_alloc(key, epayload->format,
+ new_master_desc, epayload->datalen, NULL);
+ if (IS_ERR(new_epayload)) {
+ ret = PTR_ERR(new_epayload);
+ goto out;
+ }
+
+ __ekey_init(new_epayload, epayload->format, new_master_desc,
+ epayload->datalen);
+
+ memcpy(new_epayload->iv, epayload->iv, ivsize);
+ memcpy(new_epayload->payload_data, epayload->payload_data,
+ epayload->payload_datalen);
+
+ rcu_assign_keypointer(key, new_epayload);
+ call_rcu(&epayload->rcu, encrypted_rcu_free);
+out:
+ kfree_sensitive(buf);
+ return ret;
+}
+
+/*
+ * encrypted_read - format and copy out the encrypted data
+ *
+ * The resulting datablob format is:
+ * <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
+ *
+ * On success, return to userspace the encrypted key datablob size.
+ */
+static long encrypted_read(const struct key *key, char *buffer,
+ size_t buflen)
+{
+ struct encrypted_key_payload *epayload;
+ struct key *mkey;
+ const u8 *master_key;
+ size_t master_keylen;
+ char derived_key[HASH_SIZE];
+ char *ascii_buf;
+ size_t asciiblob_len;
+ int ret;
+
+ epayload = dereference_key_locked(key);
+
+ /* returns the hex encoded iv, encrypted-data, and hmac as ascii */
+ asciiblob_len = epayload->datablob_len + ivsize + 1
+ + roundup(epayload->decrypted_datalen, blksize)
+ + (HASH_SIZE * 2);
+
+ if (!buffer || buflen < asciiblob_len)
+ return asciiblob_len;
+
+ mkey = request_master_key(epayload, &master_key, &master_keylen);
+ if (IS_ERR(mkey))
+ return PTR_ERR(mkey);
+
+ ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key);
+ if (ret < 0)
+ goto out;
+
+ ret = datablob_hmac_append(epayload, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ascii_buf = datablob_format(epayload, asciiblob_len);
+ if (!ascii_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ up_read(&mkey->sem);
+ key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
+
+ memcpy(buffer, ascii_buf, asciiblob_len);
+ kfree_sensitive(ascii_buf);
+
+ return asciiblob_len;
+out:
+ up_read(&mkey->sem);
+ key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+/*
+ * encrypted_destroy - clear and free the key's payload
+ */
+static void encrypted_destroy(struct key *key)
+{
+ kfree_sensitive(key->payload.data[0]);
+}
+
+struct key_type key_type_encrypted = {
+ .name = "encrypted",
+ .instantiate = encrypted_instantiate,
+ .update = encrypted_update,
+ .destroy = encrypted_destroy,
+ .describe = user_describe,
+ .read = encrypted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_encrypted);
+
+static int __init init_encrypted(void)
+{
+ int ret;
+
+ hash_tfm = crypto_alloc_shash(hash_alg, 0, 0);
+ if (IS_ERR(hash_tfm)) {
+ pr_err("encrypted_key: can't allocate %s transform: %ld\n",
+ hash_alg, PTR_ERR(hash_tfm));
+ return PTR_ERR(hash_tfm);
+ }
+
+ ret = aes_get_sizes();
+ if (ret < 0)
+ goto out;
+ ret = register_key_type(&key_type_encrypted);
+ if (ret < 0)
+ goto out;
+ return 0;
+out:
+ crypto_free_shash(hash_tfm);
+ return ret;
+
+}
+
+static void __exit cleanup_encrypted(void)
+{
+ crypto_free_shash(hash_tfm);
+ unregister_key_type(&key_type_encrypted);
+}
+
+late_initcall(init_encrypted);
+module_exit(cleanup_encrypted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
new file mode 100644
index 000000000..1809995db
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ENCRYPTED_KEY_H
+#define __ENCRYPTED_KEY_H
+
+#define ENCRYPTED_DEBUG 0
+#if defined(CONFIG_TRUSTED_KEYS) || \
+ (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
+extern struct key *request_trusted_key(const char *trusted_desc,
+ const u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+ const u8 **master_key,
+ size_t *master_keylen)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+#if ENCRYPTED_DEBUG
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+ print_hex_dump(KERN_ERR, "master key: ", DUMP_PREFIX_NONE, 32, 1,
+ master_key, master_keylen, 0);
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+ print_hex_dump(KERN_ERR, "decrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+ epayload->decrypted_data,
+ epayload->decrypted_datalen, 0);
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+ unsigned int encrypted_datalen)
+{
+ print_hex_dump(KERN_ERR, "encrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+ epayload->encrypted_data, encrypted_datalen, 0);
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+ unsigned int hmac_size)
+{
+ if (str)
+ pr_info("encrypted_key: %s", str);
+ print_hex_dump(KERN_ERR, "hmac: ", DUMP_PREFIX_NONE, 32, 1, digest,
+ hmac_size, 0);
+}
+#else
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+ unsigned int encrypted_datalen)
+{
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+ unsigned int hmac_size)
+{
+}
+#endif
+#endif
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 000000000..e6d22ce77
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <linux/uaccess.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include "encrypted.h"
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+ const u8 **master_key, size_t *master_keylen)
+{
+ struct trusted_key_payload *tpayload;
+ struct key *tkey;
+
+ tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+ if (IS_ERR(tkey))
+ goto error;
+
+ down_read(&tkey->sem);
+ tpayload = tkey->payload.data[0];
+ *master_key = tpayload->key;
+ *master_keylen = tpayload->key_len;
+error:
+ return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
new file mode 100644
index 000000000..eaddaceda
--- /dev/null
+++ b/security/keys/gc.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Key garbage collector
+ *
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <keys/keyring-type.h>
+#include "internal.h"
+
+/*
+ * Delay between key revocation/expiry in seconds
+ */
+unsigned key_gc_delay = 5 * 60;
+
+/*
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
+ */
+static void key_gc_timer_func(struct timer_list *);
+static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
+
+static time64_t key_gc_next_run = TIME64_MAX;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
+
+
+/*
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+ .name = ".dead",
+};
+
+/*
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
+ */
+void key_schedule_gc(time64_t gc_at)
+{
+ unsigned long expires;
+ time64_t now = ktime_get_real_seconds();
+
+ kenter("%lld", gc_at - now);
+
+ if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+ kdebug("IMMEDIATE");
+ schedule_work(&key_gc_work);
+ } else if (gc_at < key_gc_next_run) {
+ kdebug("DEFERRED");
+ key_gc_next_run = gc_at;
+ expires = jiffies + (gc_at - now) * HZ;
+ mod_timer(&key_gc_timer, expires);
+ }
+}
+
+/*
+ * Set the expiration time on a key.
+ */
+void key_set_expiry(struct key *key, time64_t expiry)
+{
+ key->expiry = expiry;
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ key_schedule_gc(expiry);
+ }
+}
+
+/*
+ * Schedule a dead links collection run.
+ */
+void key_schedule_gc_links(void)
+{
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ schedule_work(&key_gc_work);
+}
+
+/*
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
+ */
+static void key_gc_timer_func(struct timer_list *unused)
+{
+ kenter("");
+ key_gc_next_run = TIME64_MAX;
+ key_schedule_gc_links();
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys. We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+ kenter("%s", ktype->name);
+
+ key_gc_dead_keytype = ktype;
+ set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ smp_mb();
+ set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+ kdebug("schedule");
+ schedule_work(&key_gc_work);
+
+ kdebug("sleep");
+ wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
+ TASK_UNINTERRUPTIBLE);
+
+ key_gc_dead_keytype = NULL;
+ kleave("");
+}
+
+/*
+ * Garbage collect a list of unreferenced, detached keys
+ */
+static noinline void key_gc_unused_keys(struct list_head *keys)
+{
+ while (!list_empty(keys)) {
+ struct key *key =
+ list_entry(keys->next, struct key, graveyard_link);
+ short state = key->state;
+
+ list_del(&key->graveyard_link);
+
+ kdebug("- %u", key->serial);
+ key_check(key);
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ remove_watch_list(key->watchers, key->serial);
+ key->watchers = NULL;
+#endif
+
+ /* Throw away the key data if the key is instantiated */
+ if (state == KEY_IS_POSITIVE && key->type->destroy)
+ key->type->destroy(key);
+
+ security_key_free(key);
+
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
+
+ atomic_dec(&key->user->nkeys);
+ if (state != KEY_IS_UNINSTANTIATED)
+ atomic_dec(&key->user->nikeys);
+
+ key_user_put(key->user);
+ key_put_tag(key->domain_tag);
+ kfree(key->description);
+
+ memzero_explicit(key, sizeof(*key));
+ kmem_cache_free(key_jar, key);
+ }
+}
+
+/*
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place. key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+ static LIST_HEAD(graveyard);
+ static u8 gc_state; /* Internal persistent state */
+#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
+
+ struct rb_node *cursor;
+ struct key *key;
+ time64_t new_timer, limit, expiry;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = ktime_get_real_seconds();
+
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_LINKS;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+ kdebug("new pass %x", gc_state);
+
+ new_timer = TIME64_MAX;
+
+ /* As only this function is permitted to remove things from the key
+ * serial tree, if cursor is non-NULL then it will always point to a
+ * valid node in the tree - even if lock got dropped.
+ */
+ spin_lock(&key_serial_lock);
+ cursor = rb_first(&key_serial_tree);
+
+continue_scanning:
+ while (cursor) {
+ key = rb_entry(cursor, struct key, serial_node);
+ cursor = rb_next(cursor);
+
+ if (refcount_read(&key->usage) == 0)
+ goto found_unreferenced_key;
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ if (key->type == key_gc_dead_keytype) {
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ key->perm = 0;
+ goto skip_dead_key;
+ } else if (key->type == &key_type_keyring &&
+ key->restrict_link) {
+ goto found_restricted_keyring;
+ }
+ }
+
+ expiry = key->expiry;
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ if (expiry > limit && expiry < new_timer) {
+ kdebug("will expire %x in %lld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+ if (key->type == key_gc_dead_keytype)
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+
+ if ((gc_state & KEY_GC_REAPING_LINKS) ||
+ unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ if (key->type == &key_type_keyring)
+ goto found_keyring;
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+ if (key->type == key_gc_dead_keytype)
+ goto destroy_dead_key;
+
+ skip_dead_key:
+ if (spin_is_contended(&key_serial_lock) || need_resched())
+ goto contended;
+ }
+
+contended:
+ spin_unlock(&key_serial_lock);
+
+maybe_resched:
+ if (cursor) {
+ cond_resched();
+ spin_lock(&key_serial_lock);
+ goto continue_scanning;
+ }
+
+ /* We've completed the pass. Set the timer if we need to and queue a
+ * new cycle if necessary. We keep executing cycles until we find one
+ * where we didn't reap any keys.
+ */
+ kdebug("pass complete");
+
+ if (new_timer != TIME64_MAX) {
+ new_timer += key_gc_delay;
+ key_schedule_gc(new_timer);
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
+ !list_empty(&graveyard)) {
+ /* Make sure that all pending keyring payload destructions are
+ * fulfilled and that people aren't now looking at dead or
+ * dying keys that they don't have a reference upon or a link
+ * to.
+ */
+ kdebug("gc sync");
+ synchronize_rcu();
+ }
+
+ if (!list_empty(&graveyard)) {
+ kdebug("gc keys");
+ key_gc_unused_keys(&graveyard);
+ }
+
+ if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+ KEY_GC_REAPING_DEAD_2))) {
+ if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+ /* No remaining dead keys: short circuit the remaining
+ * keytype reap cycles.
+ */
+ kdebug("dead short");
+ gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+ gc_state |= KEY_GC_REAPING_DEAD_3;
+ } else {
+ gc_state |= KEY_GC_REAP_AGAIN;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+ kdebug("dead wake");
+ smp_mb();
+ clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+ }
+
+ if (gc_state & KEY_GC_REAP_AGAIN)
+ schedule_work(&key_gc_work);
+ kleave(" [end %x]", gc_state);
+ return;
+
+ /* We found an unreferenced key - once we've removed it from the tree,
+ * we can safely drop the lock.
+ */
+found_unreferenced_key:
+ kdebug("unrefd key %d", key->serial);
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ list_add_tail(&key->graveyard_link, &graveyard);
+ gc_state |= KEY_GC_REAP_AGAIN;
+ goto maybe_resched;
+
+ /* We found a restricted keyring and need to update the restriction if
+ * it is associated with the dead key type.
+ */
+found_restricted_keyring:
+ spin_unlock(&key_serial_lock);
+ keyring_restriction_gc(key, key_gc_dead_keytype);
+ goto maybe_resched;
+
+ /* We found a keyring and we need to check the payload for links to
+ * dead or expired keys. We don't flag another reap immediately as we
+ * have to wait for the old payload to be destroyed by RCU before we
+ * can reap the keys to which it refers.
+ */
+found_keyring:
+ spin_unlock(&key_serial_lock);
+ keyring_gc(key, limit);
+ goto maybe_resched;
+
+ /* We found a dead key that is still referenced. Reset its type and
+ * destroy its payload with its semaphore held.
+ */
+destroy_dead_key:
+ spin_unlock(&key_serial_lock);
+ kdebug("destroy key %d", key->serial);
+ down_write(&key->sem);
+ key->type = &key_type_dead;
+ if (key_gc_dead_keytype->destroy)
+ key_gc_dead_keytype->destroy(key);
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+ up_write(&key->sem);
+ goto maybe_resched;
+}
diff --git a/security/keys/internal.h b/security/keys/internal.h
new file mode 100644
index 000000000..ec2ec335b
--- /dev/null
+++ b/security/keys/internal.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Authentication token and access key management internal defs
+ *
+ * Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _INTERNAL_H
+#define _INTERNAL_H
+
+#include <linux/sched.h>
+#include <linux/wait_bit.h>
+#include <linux/cred.h>
+#include <linux/key-type.h>
+#include <linux/task_work.h>
+#include <linux/keyctl.h>
+#include <linux/refcount.h>
+#include <linux/watch_queue.h>
+#include <linux/compat.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+struct iovec;
+
+#ifdef __KDEBUG
+#define kenter(FMT, ...) \
+ printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ printk(KERN_DEBUG " "FMT"\n", ##__VA_ARGS__)
+#else
+#define kenter(FMT, ...) \
+ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
+
+extern struct key_type key_type_dead;
+extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
+
+/*****************************************************************************/
+/*
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
+ */
+struct key_user {
+ struct rb_node node;
+ struct mutex cons_lock; /* construction initiation lock */
+ spinlock_t lock;
+ refcount_t usage; /* for accessing qnkeys & qnbytes */
+ atomic_t nkeys; /* number of keys */
+ atomic_t nikeys; /* number of instantiated keys */
+ kuid_t uid;
+ int qnkeys; /* number of keys allocated to this user */
+ int qnbytes; /* number of bytes allocated to this user */
+};
+
+extern struct rb_root key_user_tree;
+extern spinlock_t key_user_lock;
+extern struct key_user root_key_user;
+
+extern struct key_user *key_user_lookup(kuid_t uid);
+extern void key_user_put(struct key_user *user);
+
+/*
+ * Key quota limits.
+ * - root has its own separate limits to everyone else
+ */
+extern unsigned key_quota_root_maxkeys;
+extern unsigned key_quota_root_maxbytes;
+extern unsigned key_quota_maxkeys;
+extern unsigned key_quota_maxbytes;
+
+#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+
+
+extern struct kmem_cache *key_jar;
+extern struct rb_root key_serial_tree;
+extern spinlock_t key_serial_lock;
+extern struct mutex key_construction_mutex;
+extern wait_queue_head_t request_key_conswq;
+
+extern void key_set_index_key(struct keyring_index_key *index_key);
+extern struct key_type *key_type_lookup(const char *type);
+extern void key_type_put(struct key_type *ktype);
+
+extern int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key);
+extern int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key);
+extern int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit);
+extern int __key_link_check_live_key(struct key *keyring, struct key *key);
+extern void __key_link(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit);
+extern void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit);
+
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key);
+
+extern struct key *keyring_search_instkey(struct key *keyring,
+ key_serial_t target_id);
+
+extern int iterate_over_keyring(const struct key *keyring,
+ int (*func)(const struct key *key, void *data),
+ void *data);
+
+struct keyring_search_context {
+ struct keyring_index_key index_key;
+ const struct cred *cred;
+ struct key_match_data match_data;
+ unsigned flags;
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0001 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0002 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
+#define KEYRING_SEARCH_RECURSE 0x0040 /* Search child keyrings also */
+
+ int (*iterator)(const void *object, void *iterator_data);
+
+ /* Internal stuff */
+ int skipped_ret;
+ bool possessed;
+ key_ref_t result;
+ time64_t now;
+};
+
+extern bool key_default_cmp(const struct key *key,
+ const struct key_match_data *match_data);
+extern key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx);
+
+extern key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx);
+
+extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+
+extern int look_up_user_keyrings(struct key **, struct key **);
+extern struct key *get_user_session_keyring_rcu(const struct cred *);
+extern int install_thread_keyring_to_cred(struct cred *);
+extern int install_process_keyring_to_cred(struct cred *);
+extern int install_session_keyring_to_cred(struct cred *, struct key *);
+
+extern struct key *request_key_and_link(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags);
+
+extern bool lookup_user_key_possessed(const struct key *key,
+ const struct key_match_data *match_data);
+
+extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct callback_head *twork);
+
+extern struct work_struct key_gc_work;
+extern unsigned key_gc_delay;
+extern void keyring_gc(struct key *keyring, time64_t limit);
+extern void keyring_restriction_gc(struct key *keyring,
+ struct key_type *dead_type);
+void key_set_expiry(struct key *key, time64_t expiry);
+extern void key_schedule_gc(time64_t gc_at);
+extern void key_schedule_gc_links(void);
+extern void key_gc_keytype(struct key_type *ktype);
+
+extern int key_task_permission(const key_ref_t key_ref,
+ const struct cred *cred,
+ enum key_need_perm need_perm);
+
+static inline void notify_key(struct key *key,
+ enum key_notification_subtype subtype, u32 aux)
+{
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct key_notification n = {
+ .watch.type = WATCH_TYPE_KEY_NOTIFY,
+ .watch.subtype = subtype,
+ .watch.info = watch_sizeof(n),
+ .key_id = key_serial(key),
+ .aux = aux,
+ };
+
+ post_watch_notification(key->watchers, &n.watch, current_cred(),
+ n.key_id);
+#endif
+}
+
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
+static inline int key_permission(const key_ref_t key_ref,
+ enum key_need_perm need_perm)
+{
+ return key_task_permission(key_ref, current_cred(), need_perm);
+}
+
+extern struct key_type key_type_request_key_auth;
+extern struct key *request_key_auth_new(struct key *target,
+ const char *op,
+ const void *callout_info,
+ size_t callout_len,
+ struct key *dest_keyring);
+
+extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
+
+/*
+ * Determine whether a key is dead.
+ */
+static inline bool key_is_dead(const struct key *key, time64_t limit)
+{
+ time64_t expiry = key->expiry;
+
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ if (expiry <= limit)
+ return true;
+ }
+
+ return
+ key->flags & ((1 << KEY_FLAG_DEAD) |
+ (1 << KEY_FLAG_INVALIDATED)) ||
+ key->domain_tag->removed;
+}
+
+/*
+ * keyctl() functions
+ */
+extern long keyctl_get_keyring_ID(key_serial_t, int);
+extern long keyctl_join_session_keyring(const char __user *);
+extern long keyctl_update_key(key_serial_t, const void __user *, size_t);
+extern long keyctl_revoke_key(key_serial_t);
+extern long keyctl_keyring_clear(key_serial_t);
+extern long keyctl_keyring_link(key_serial_t, key_serial_t);
+extern long keyctl_keyring_move(key_serial_t, key_serial_t, key_serial_t, unsigned int);
+extern long keyctl_keyring_unlink(key_serial_t, key_serial_t);
+extern long keyctl_describe_key(key_serial_t, char __user *, size_t);
+extern long keyctl_keyring_search(key_serial_t, const char __user *,
+ const char __user *, key_serial_t);
+extern long keyctl_read_key(key_serial_t, char __user *, size_t);
+extern long keyctl_chown_key(key_serial_t, uid_t, gid_t);
+extern long keyctl_setperm_key(key_serial_t, key_perm_t);
+extern long keyctl_instantiate_key(key_serial_t, const void __user *,
+ size_t, key_serial_t);
+extern long keyctl_negate_key(key_serial_t, unsigned, key_serial_t);
+extern long keyctl_set_reqkey_keyring(int);
+extern long keyctl_set_timeout(key_serial_t, unsigned);
+extern long keyctl_assume_authority(key_serial_t);
+extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
+ size_t buflen);
+extern long keyctl_session_to_parent(void);
+extern long keyctl_reject_key(key_serial_t, unsigned, unsigned, key_serial_t);
+extern long keyctl_instantiate_key_iov(key_serial_t,
+ const struct iovec __user *,
+ unsigned, key_serial_t);
+extern long keyctl_invalidate_key(key_serial_t);
+extern long keyctl_restrict_keyring(key_serial_t id,
+ const char __user *_type,
+ const char __user *_restriction);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_KEY_DH_OPERATIONS
+extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
+ size_t, struct keyctl_kdf_params __user *);
+extern long __keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
+ size_t, struct keyctl_kdf_params *);
+#ifdef CONFIG_COMPAT
+extern long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct compat_keyctl_kdf_params __user *kdf);
+#endif
+#define KEYCTL_KDF_MAX_OUTPUT_LEN 1024 /* max length of KDF output */
+#define KEYCTL_KDF_MAX_OI_LEN 64 /* max length of otherinfo */
+#else
+static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params __user *kdf)
+{
+ return -EOPNOTSUPP;
+}
+
+#ifdef CONFIG_COMPAT
+static inline long compat_keyctl_dh_compute(
+ struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params __user *kdf)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
+
+#ifdef CONFIG_ASYMMETRIC_KEY_TYPE
+extern long keyctl_pkey_query(key_serial_t,
+ const char __user *,
+ struct keyctl_pkey_query __user *);
+
+extern long keyctl_pkey_verify(const struct keyctl_pkey_params __user *,
+ const char __user *,
+ const void __user *, const void __user *);
+
+extern long keyctl_pkey_e_d_s(int,
+ const struct keyctl_pkey_params __user *,
+ const char __user *,
+ const void __user *, void __user *);
+#else
+static inline long keyctl_pkey_query(key_serial_t id,
+ const char __user *_info,
+ struct keyctl_pkey_query __user *_res)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_verify(const struct keyctl_pkey_params __user *params,
+ const char __user *_info,
+ const void __user *_in,
+ const void __user *_in2)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_e_d_s(int op,
+ const struct keyctl_pkey_params __user *params,
+ const char __user *_info,
+ const void __user *_in,
+ void __user *_out)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+extern long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen);
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+extern long keyctl_watch_key(key_serial_t, int, int);
+#else
+static inline long keyctl_watch_key(key_serial_t key_id, int watch_fd, int watch_id)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/*
+ * Debugging key validation
+ */
+#ifdef KEY_DEBUGGING
+extern void __key_check(const struct key *);
+
+static inline void key_check(const struct key *key)
+{
+ if (key && (IS_ERR(key) || key->magic != KEY_DEBUG_MAGIC))
+ __key_check(key);
+}
+
+#else
+
+#define key_check(key) do {} while(0)
+
+#endif
+#endif /* _INTERNAL_H */
diff --git a/security/keys/key.c b/security/keys/key.c
new file mode 100644
index 000000000..e65240641
--- /dev/null
+++ b/security/keys/key.c
@@ -0,0 +1,1215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Basic authentication token and access key management
+ *
+ * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/workqueue.h>
+#include <linux/random.h>
+#include <linux/ima.h>
+#include <linux/err.h>
+#include "internal.h"
+
+struct kmem_cache *key_jar;
+struct rb_root key_serial_tree; /* tree of keys indexed by serial */
+DEFINE_SPINLOCK(key_serial_lock);
+
+struct rb_root key_user_tree; /* tree of quota records indexed by UID */
+DEFINE_SPINLOCK(key_user_lock);
+
+unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
+unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
+unsigned int key_quota_maxkeys = 200; /* general key count quota */
+unsigned int key_quota_maxbytes = 20000; /* general key space quota */
+
+static LIST_HEAD(key_types_list);
+static DECLARE_RWSEM(key_types_sem);
+
+/* We serialise key instantiation and link */
+DEFINE_MUTEX(key_construction_mutex);
+
+#ifdef KEY_DEBUGGING
+void __key_check(const struct key *key)
+{
+ printk("__key_check: key %p {%08x} should be {%08x}\n",
+ key, key->magic, KEY_DEBUG_MAGIC);
+ BUG();
+}
+#endif
+
+/*
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
+ */
+struct key_user *key_user_lookup(kuid_t uid)
+{
+ struct key_user *candidate = NULL, *user;
+ struct rb_node *parent, **p;
+
+try_again:
+ parent = NULL;
+ p = &key_user_tree.rb_node;
+ spin_lock(&key_user_lock);
+
+ /* search the tree for a user record with a matching UID */
+ while (*p) {
+ parent = *p;
+ user = rb_entry(parent, struct key_user, node);
+
+ if (uid_lt(uid, user->uid))
+ p = &(*p)->rb_left;
+ else if (uid_gt(uid, user->uid))
+ p = &(*p)->rb_right;
+ else
+ goto found;
+ }
+
+ /* if we get here, we failed to find a match in the tree */
+ if (!candidate) {
+ /* allocate a candidate user record if we don't already have
+ * one */
+ spin_unlock(&key_user_lock);
+
+ user = NULL;
+ candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
+ if (unlikely(!candidate))
+ goto out;
+
+ /* the allocation may have scheduled, so we need to repeat the
+ * search lest someone else added the record whilst we were
+ * asleep */
+ goto try_again;
+ }
+
+ /* if we get here, then the user record still hadn't appeared on the
+ * second pass - so we use the candidate record */
+ refcount_set(&candidate->usage, 1);
+ atomic_set(&candidate->nkeys, 0);
+ atomic_set(&candidate->nikeys, 0);
+ candidate->uid = uid;
+ candidate->qnkeys = 0;
+ candidate->qnbytes = 0;
+ spin_lock_init(&candidate->lock);
+ mutex_init(&candidate->cons_lock);
+
+ rb_link_node(&candidate->node, parent, p);
+ rb_insert_color(&candidate->node, &key_user_tree);
+ spin_unlock(&key_user_lock);
+ user = candidate;
+ goto out;
+
+ /* okay - we found a user record for this UID */
+found:
+ refcount_inc(&user->usage);
+ spin_unlock(&key_user_lock);
+ kfree(candidate);
+out:
+ return user;
+}
+
+/*
+ * Dispose of a user structure
+ */
+void key_user_put(struct key_user *user)
+{
+ if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
+ rb_erase(&user->node, &key_user_tree);
+ spin_unlock(&key_user_lock);
+
+ kfree(user);
+ }
+}
+
+/*
+ * Allocate a serial number for a key. These are assigned randomly to avoid
+ * security issues through covert channel problems.
+ */
+static inline void key_alloc_serial(struct key *key)
+{
+ struct rb_node *parent, **p;
+ struct key *xkey;
+
+ /* propose a random serial number and look for a hole for it in the
+ * serial number tree */
+ do {
+ get_random_bytes(&key->serial, sizeof(key->serial));
+
+ key->serial >>= 1; /* negative numbers are not permitted */
+ } while (key->serial < 3);
+
+ spin_lock(&key_serial_lock);
+
+attempt_insertion:
+ parent = NULL;
+ p = &key_serial_tree.rb_node;
+
+ while (*p) {
+ parent = *p;
+ xkey = rb_entry(parent, struct key, serial_node);
+
+ if (key->serial < xkey->serial)
+ p = &(*p)->rb_left;
+ else if (key->serial > xkey->serial)
+ p = &(*p)->rb_right;
+ else
+ goto serial_exists;
+ }
+
+ /* we've found a suitable hole - arrange for this key to occupy it */
+ rb_link_node(&key->serial_node, parent, p);
+ rb_insert_color(&key->serial_node, &key_serial_tree);
+
+ spin_unlock(&key_serial_lock);
+ return;
+
+ /* we found a key with the proposed serial number - walk the tree from
+ * that point looking for the next unused serial number */
+serial_exists:
+ for (;;) {
+ key->serial++;
+ if (key->serial < 3) {
+ key->serial = 3;
+ goto attempt_insertion;
+ }
+
+ parent = rb_next(parent);
+ if (!parent)
+ goto attempt_insertion;
+
+ xkey = rb_entry(parent, struct key, serial_node);
+ if (key->serial < xkey->serial)
+ goto attempt_insertion;
+ }
+}
+
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ * @restrict_link: Optional link restriction for new keyrings.
+ *
+ * Allocate a key of the specified type with the attributes given. The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The restrict_link structure (if not NULL) will be freed when the
+ * keyring is destroyed, so it must be dynamically allocated.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved. The
+ * instantiation function should amend this as necessary. If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem. Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
+ */
+struct key *key_alloc(struct key_type *type, const char *desc,
+ kuid_t uid, kgid_t gid, const struct cred *cred,
+ key_perm_t perm, unsigned long flags,
+ struct key_restriction *restrict_link)
+{
+ struct key_user *user = NULL;
+ struct key *key;
+ size_t desclen, quotalen;
+ int ret;
+
+ key = ERR_PTR(-EINVAL);
+ if (!desc || !*desc)
+ goto error;
+
+ if (type->vet_description) {
+ ret = type->vet_description(desc);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ desclen = strlen(desc);
+ quotalen = desclen + 1 + type->def_datalen;
+
+ /* get hold of the key tracking for this user */
+ user = key_user_lookup(uid);
+ if (!user)
+ goto no_memory_1;
+
+ /* check that the user's quota permits allocation of another key and
+ * its description */
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxkeys : key_quota_maxkeys;
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ spin_lock(&user->lock);
+ if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
+ if (user->qnkeys + 1 > maxkeys ||
+ user->qnbytes + quotalen > maxbytes ||
+ user->qnbytes + quotalen < user->qnbytes)
+ goto no_quota;
+ }
+
+ user->qnkeys++;
+ user->qnbytes += quotalen;
+ spin_unlock(&user->lock);
+ }
+
+ /* allocate and initialise the key and its description */
+ key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
+ if (!key)
+ goto no_memory_2;
+
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
+ if (!key->index_key.description)
+ goto no_memory_3;
+ key->index_key.type = type;
+ key_set_index_key(&key->index_key);
+
+ refcount_set(&key->usage, 1);
+ init_rwsem(&key->sem);
+ lockdep_set_class(&key->sem, &type->lock_class);
+ key->user = user;
+ key->quotalen = quotalen;
+ key->datalen = type->def_datalen;
+ key->uid = uid;
+ key->gid = gid;
+ key->perm = perm;
+ key->expiry = TIME64_MAX;
+ key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
+
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
+ key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+ if (flags & KEY_ALLOC_BUILT_IN)
+ key->flags |= 1 << KEY_FLAG_BUILTIN;
+ if (flags & KEY_ALLOC_UID_KEYRING)
+ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ if (flags & KEY_ALLOC_SET_KEEP)
+ key->flags |= 1 << KEY_FLAG_KEEP;
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC;
+#endif
+
+ /* let the security module know about the key */
+ ret = security_key_alloc(key, cred, flags);
+ if (ret < 0)
+ goto security_error;
+
+ /* publish the key by giving it a serial number */
+ refcount_inc(&key->domain_tag->usage);
+ atomic_inc(&user->nkeys);
+ key_alloc_serial(key);
+
+error:
+ return key;
+
+security_error:
+ kfree(key->description);
+ kmem_cache_free(key_jar, key);
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+ spin_lock(&user->lock);
+ user->qnkeys--;
+ user->qnbytes -= quotalen;
+ spin_unlock(&user->lock);
+ }
+ key_user_put(user);
+ key = ERR_PTR(ret);
+ goto error;
+
+no_memory_3:
+ kmem_cache_free(key_jar, key);
+no_memory_2:
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+ spin_lock(&user->lock);
+ user->qnkeys--;
+ user->qnbytes -= quotalen;
+ spin_unlock(&user->lock);
+ }
+ key_user_put(user);
+no_memory_1:
+ key = ERR_PTR(-ENOMEM);
+ goto error;
+
+no_quota:
+ spin_unlock(&user->lock);
+ key_user_put(user);
+ key = ERR_PTR(-EDQUOT);
+ goto error;
+}
+EXPORT_SYMBOL(key_alloc);
+
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
+ */
+int key_payload_reserve(struct key *key, size_t datalen)
+{
+ int delta = (int)datalen - key->datalen;
+ int ret = 0;
+
+ key_check(key);
+
+ /* contemplate the quota adjustment */
+ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ spin_lock(&key->user->lock);
+
+ if (delta > 0 &&
+ (key->user->qnbytes + delta > maxbytes ||
+ key->user->qnbytes + delta < key->user->qnbytes)) {
+ ret = -EDQUOT;
+ }
+ else {
+ key->user->qnbytes += delta;
+ key->quotalen += delta;
+ }
+ spin_unlock(&key->user->lock);
+ }
+
+ /* change the recorded data length if that didn't generate an error */
+ if (ret == 0)
+ key->datalen = datalen;
+
+ return ret;
+}
+EXPORT_SYMBOL(key_payload_reserve);
+
+/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+ /* Commit the payload before setting the state; barrier versus
+ * key_read_state().
+ */
+ smp_store_release(&key->state,
+ (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
+ * Instantiate a key and link it into the target keyring atomically. Must be
+ * called with the target keyring's semaphore writelocked. The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
+ */
+static int __key_instantiate_and_link(struct key *key,
+ struct key_preparsed_payload *prep,
+ struct key *keyring,
+ struct key *authkey,
+ struct assoc_array_edit **_edit)
+{
+ int ret, awaken;
+
+ key_check(key);
+ key_check(keyring);
+
+ awaken = 0;
+ ret = -EBUSY;
+
+ mutex_lock(&key_construction_mutex);
+
+ /* can't instantiate twice */
+ if (key->state == KEY_IS_UNINSTANTIATED) {
+ /* instantiate the key */
+ ret = key->type->instantiate(key, prep);
+
+ if (ret == 0) {
+ /* mark the key as being instantiated */
+ atomic_inc(&key->user->nikeys);
+ mark_key_instantiated(key, 0);
+ notify_key(key, NOTIFY_KEY_INSTANTIATED, 0);
+
+ if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ awaken = 1;
+
+ /* and link it into the destination keyring */
+ if (keyring) {
+ if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
+ set_bit(KEY_FLAG_KEEP, &key->flags);
+
+ __key_link(keyring, key, _edit);
+ }
+
+ /* disable the authorisation key */
+ if (authkey)
+ key_invalidate(authkey);
+
+ key_set_expiry(key, prep->expiry);
+ }
+ }
+
+ mutex_unlock(&key_construction_mutex);
+
+ /* wake up anyone waiting for a key to be constructed */
+ if (awaken)
+ wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
+
+ return ret;
+}
+
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
+ */
+int key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring,
+ struct key *authkey)
+{
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit = NULL;
+ int ret;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.orig_description = key->description;
+ prep.data = data;
+ prep.datalen = datalen;
+ prep.quotalen = key->type->def_datalen;
+ prep.expiry = TIME64_MAX;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (keyring) {
+ ret = __key_link_lock(keyring, &key->index_key);
+ if (ret < 0)
+ goto error;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_link_end;
+
+ if (keyring->restrict_link && keyring->restrict_link->check) {
+ struct key_restriction *keyres = keyring->restrict_link;
+
+ ret = keyres->check(keyring, key->type, &prep.payload,
+ keyres->key);
+ if (ret < 0)
+ goto error_link_end;
+ }
+ }
+
+ ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
+
+error_link_end:
+ if (keyring)
+ __key_link_end(keyring, &key->index_key, edit);
+
+error:
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+ return ret;
+}
+
+EXPORT_SYMBOL(key_instantiate_and_link);
+
+/**
+ * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @error: The error to return when the key is hit.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and stored error and link it in to the
+ * destination keyring if one is supplied. The key and any links to the key
+ * will be automatically garbage collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the stored error code (typically ENOKEY) until the negative
+ * key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
+ */
+int key_reject_and_link(struct key *key,
+ unsigned timeout,
+ unsigned error,
+ struct key *keyring,
+ struct key *authkey)
+{
+ struct assoc_array_edit *edit = NULL;
+ int ret, awaken, link_ret = 0;
+
+ key_check(key);
+ key_check(keyring);
+
+ awaken = 0;
+ ret = -EBUSY;
+
+ if (keyring) {
+ if (keyring->restrict_link)
+ return -EPERM;
+
+ link_ret = __key_link_lock(keyring, &key->index_key);
+ if (link_ret == 0) {
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (link_ret < 0)
+ __key_link_end(keyring, &key->index_key, edit);
+ }
+ }
+
+ mutex_lock(&key_construction_mutex);
+
+ /* can't instantiate twice */
+ if (key->state == KEY_IS_UNINSTANTIATED) {
+ /* mark the key as being negatively instantiated */
+ atomic_inc(&key->user->nikeys);
+ mark_key_instantiated(key, -error);
+ notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
+ key_set_expiry(key, ktime_get_real_seconds() + timeout);
+
+ if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ awaken = 1;
+
+ ret = 0;
+
+ /* and link it into the destination keyring */
+ if (keyring && link_ret == 0)
+ __key_link(keyring, key, &edit);
+
+ /* disable the authorisation key */
+ if (authkey)
+ key_invalidate(authkey);
+ }
+
+ mutex_unlock(&key_construction_mutex);
+
+ if (keyring && link_ret == 0)
+ __key_link_end(keyring, &key->index_key, edit);
+
+ /* wake up anyone waiting for a key to be constructed */
+ if (awaken)
+ wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
+
+ return ret == 0 ? link_ret : ret;
+}
+EXPORT_SYMBOL(key_reject_and_link);
+
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
+ */
+void key_put(struct key *key)
+{
+ if (key) {
+ key_check(key);
+
+ if (refcount_dec_and_test(&key->usage))
+ schedule_work(&key_gc_work);
+ }
+}
+EXPORT_SYMBOL(key_put);
+
+/*
+ * Find a key by its serial number.
+ */
+struct key *key_lookup(key_serial_t id)
+{
+ struct rb_node *n;
+ struct key *key;
+
+ spin_lock(&key_serial_lock);
+
+ /* search the tree for the specified key */
+ n = key_serial_tree.rb_node;
+ while (n) {
+ key = rb_entry(n, struct key, serial_node);
+
+ if (id < key->serial)
+ n = n->rb_left;
+ else if (id > key->serial)
+ n = n->rb_right;
+ else
+ goto found;
+ }
+
+not_found:
+ key = ERR_PTR(-ENOKEY);
+ goto error;
+
+found:
+ /* A key is allowed to be looked up only if someone still owns a
+ * reference to it - otherwise it's awaiting the gc.
+ */
+ if (!refcount_inc_not_zero(&key->usage))
+ goto not_found;
+
+error:
+ spin_unlock(&key_serial_lock);
+ return key;
+}
+
+/*
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful. If the type wasn't
+ * available -ENOKEY is returned instead.
+ */
+struct key_type *key_type_lookup(const char *type)
+{
+ struct key_type *ktype;
+
+ down_read(&key_types_sem);
+
+ /* look up the key type to see if it's one of the registered kernel
+ * types */
+ list_for_each_entry(ktype, &key_types_list, link) {
+ if (strcmp(ktype->name, type) == 0)
+ goto found_kernel_type;
+ }
+
+ up_read(&key_types_sem);
+ ktype = ERR_PTR(-ENOKEY);
+
+found_kernel_type:
+ return ktype;
+}
+
+void key_set_timeout(struct key *key, unsigned timeout)
+{
+ time64_t expiry = TIME64_MAX;
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ if (timeout > 0)
+ expiry = ktime_get_real_seconds() + timeout;
+ key_set_expiry(key, expiry);
+
+ up_write(&key->sem);
+}
+EXPORT_SYMBOL_GPL(key_set_timeout);
+
+/*
+ * Unlock a key type locked by key_type_lookup().
+ */
+void key_type_put(struct key_type *ktype)
+{
+ up_read(&key_types_sem);
+}
+
+/*
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
+ */
+static inline key_ref_t __key_update(key_ref_t key_ref,
+ struct key_preparsed_payload *prep)
+{
+ struct key *key = key_ref_to_ptr(key_ref);
+ int ret;
+
+ /* need write permission on the key to update it */
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
+ if (ret < 0)
+ goto error;
+
+ ret = -EEXIST;
+ if (!key->type->update)
+ goto error;
+
+ down_write(&key->sem);
+
+ ret = key->type->update(key, prep);
+ if (ret == 0) {
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
+ notify_key(key, NOTIFY_KEY_UPDATED, 0);
+ }
+
+ up_write(&key->sem);
+
+ if (ret < 0)
+ goto error;
+out:
+ return key_ref;
+
+error:
+ key_put(key);
+ key_ref = ERR_PTR(ret);
+ goto out;
+}
+
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
+ */
+key_ref_t key_create_or_update(key_ref_t keyring_ref,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags)
+{
+ struct keyring_index_key index_key = {
+ .description = description,
+ };
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit = NULL;
+ const struct cred *cred = current_cred();
+ struct key *keyring, *key = NULL;
+ key_ref_t key_ref;
+ int ret;
+ struct key_restriction *restrict_link = NULL;
+
+ /* look up the key type to see if it's one of the registered kernel
+ * types */
+ index_key.type = key_type_lookup(type);
+ if (IS_ERR(index_key.type)) {
+ key_ref = ERR_PTR(-ENODEV);
+ goto error;
+ }
+
+ key_ref = ERR_PTR(-EINVAL);
+ if (!index_key.type->instantiate ||
+ (!index_key.description && !index_key.type->preparse))
+ goto error_put_type;
+
+ keyring = key_ref_to_ptr(keyring_ref);
+
+ key_check(keyring);
+
+ if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
+ restrict_link = keyring->restrict_link;
+
+ key_ref = ERR_PTR(-ENOTDIR);
+ if (keyring->type != &key_type_keyring)
+ goto error_put_type;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.orig_description = description;
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = index_key.type->def_datalen;
+ prep.expiry = TIME64_MAX;
+ if (index_key.type->preparse) {
+ ret = index_key.type->preparse(&prep);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ if (!index_key.description)
+ index_key.description = prep.description;
+ key_ref = ERR_PTR(-EINVAL);
+ if (!index_key.description)
+ goto error_free_prep;
+ }
+ index_key.desc_len = strlen(index_key.description);
+ key_set_index_key(&index_key);
+
+ ret = __key_link_lock(keyring, &index_key);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+
+ ret = __key_link_begin(keyring, &index_key, &edit);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
+ if (restrict_link && restrict_link->check) {
+ ret = restrict_link->check(keyring, index_key.type,
+ &prep.payload, restrict_link->key);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+ }
+
+ /* if we're going to allocate a new key, we're going to have
+ * to modify the keyring */
+ ret = key_permission(keyring_ref, KEY_NEED_WRITE);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
+ /* if it's possible to update this type of key, search for an existing
+ * key of the same type and description in the destination keyring and
+ * update that instead if possible
+ */
+ if (index_key.type->update) {
+ key_ref = find_key_to_update(keyring_ref, &index_key);
+ if (key_ref)
+ goto found_matching_key;
+ }
+
+ /* if the client doesn't provide, decide on the permissions we want */
+ if (perm == KEY_PERM_UNDEF) {
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+
+ if (index_key.type->read)
+ perm |= KEY_POS_READ;
+
+ if (index_key.type == &key_type_keyring ||
+ index_key.type->update)
+ perm |= KEY_POS_WRITE;
+ }
+
+ /* allocate a new key */
+ key = key_alloc(index_key.type, index_key.description,
+ cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
+ if (IS_ERR(key)) {
+ key_ref = ERR_CAST(key);
+ goto error_link_end;
+ }
+
+ /* instantiate it and link it into the target keyring */
+ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
+ if (ret < 0) {
+ key_put(key);
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
+ ima_post_key_create_or_update(keyring, key, payload, plen,
+ flags, true);
+
+ key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
+
+error_link_end:
+ __key_link_end(keyring, &index_key, edit);
+error_free_prep:
+ if (index_key.type->preparse)
+ index_key.type->free_preparse(&prep);
+error_put_type:
+ key_type_put(index_key.type);
+error:
+ return key_ref;
+
+ found_matching_key:
+ /* we found a matching key, so we're going to try to update it
+ * - we can drop the locks first as we have the key pinned
+ */
+ __key_link_end(keyring, &index_key, edit);
+
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+ ret = wait_for_key_construction(key, true);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ }
+
+ key_ref = __key_update(key_ref, &prep);
+
+ if (!IS_ERR(key_ref))
+ ima_post_key_create_or_update(keyring, key,
+ payload, plen,
+ flags, false);
+
+ goto error_free_prep;
+}
+EXPORT_SYMBOL(key_create_or_update);
+
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data. The
+ * caller must be granted Write permission on the key. Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating. The key type may return other errors.
+ */
+int key_update(key_ref_t key_ref, const void *payload, size_t plen)
+{
+ struct key_preparsed_payload prep;
+ struct key *key = key_ref_to_ptr(key_ref);
+ int ret;
+
+ key_check(key);
+
+ /* the key must be writable */
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
+ if (ret < 0)
+ return ret;
+
+ /* attempt to update it if supported */
+ if (!key->type->update)
+ return -EOPNOTSUPP;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = key->type->def_datalen;
+ prep.expiry = TIME64_MAX;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
+ }
+
+ down_write(&key->sem);
+
+ ret = key->type->update(key, &prep);
+ if (ret == 0) {
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
+ notify_key(key, NOTIFY_KEY_UPDATED, 0);
+ }
+
+ up_write(&key->sem);
+
+error:
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+ return ret;
+}
+EXPORT_SYMBOL(key_update);
+
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources. The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
+ */
+void key_revoke(struct key *key)
+{
+ time64_t time;
+
+ key_check(key);
+
+ /* make sure no one's trying to change or use the key when we mark it
+ * - we tell lockdep that we might nest because we might be revoking an
+ * authorisation key whilst holding the sem on a key we've just
+ * instantiated
+ */
+ down_write_nested(&key->sem, 1);
+ if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags)) {
+ notify_key(key, NOTIFY_KEY_REVOKED, 0);
+ if (key->type->revoke)
+ key->type->revoke(key);
+
+ /* set the death time to no more than the expiry time */
+ time = ktime_get_real_seconds();
+ if (key->revoked_at == 0 || key->revoked_at > time) {
+ key->revoked_at = time;
+ key_schedule_gc(key->revoked_at + key_gc_delay);
+ }
+ }
+
+ up_write(&key->sem);
+}
+EXPORT_SYMBOL(key_revoke);
+
+/**
+ * key_invalidate - Invalidate a key.
+ * @key: The key to be invalidated.
+ *
+ * Mark a key as being invalidated and have it cleaned up immediately. The key
+ * is ignored by all searches and other operations from this point.
+ */
+void key_invalidate(struct key *key)
+{
+ kenter("%d", key_serial(key));
+
+ key_check(key);
+
+ if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ down_write_nested(&key->sem, 1);
+ if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ notify_key(key, NOTIFY_KEY_INVALIDATED, 0);
+ key_schedule_gc_links();
+ }
+ up_write(&key->sem);
+ }
+}
+EXPORT_SYMBOL(key_invalidate);
+
+/**
+ * generic_key_instantiate - Simple instantiation of a key from preparsed data
+ * @key: The key to be instantiated
+ * @prep: The preparsed data to load.
+ *
+ * Instantiate a key from preparsed data. We assume we can just copy the data
+ * in directly and clear the old pointers.
+ *
+ * This can be pointed to directly by the key type instantiate op pointer.
+ */
+int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ ret = key_payload_reserve(key, prep->quotalen);
+ if (ret == 0) {
+ rcu_assign_keypointer(key, prep->payload.data[0]);
+ key->payload.data[1] = prep->payload.data[1];
+ key->payload.data[2] = prep->payload.data[2];
+ key->payload.data[3] = prep->payload.data[3];
+ prep->payload.data[0] = NULL;
+ prep->payload.data[1] = NULL;
+ prep->payload.data[2] = NULL;
+ prep->payload.data[3] = NULL;
+ }
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL(generic_key_instantiate);
+
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
+ */
+int register_key_type(struct key_type *ktype)
+{
+ struct key_type *p;
+ int ret;
+
+ memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
+
+ ret = -EEXIST;
+ down_write(&key_types_sem);
+
+ /* disallow key types with the same name */
+ list_for_each_entry(p, &key_types_list, link) {
+ if (strcmp(p->name, ktype->name) == 0)
+ goto out;
+ }
+
+ /* store the type */
+ list_add(&ktype->link, &key_types_list);
+
+ pr_notice("Key type %s registered\n", ktype->name);
+ ret = 0;
+
+out:
+ up_write(&key_types_sem);
+ return ret;
+}
+EXPORT_SYMBOL(register_key_type);
+
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
+ */
+void unregister_key_type(struct key_type *ktype)
+{
+ down_write(&key_types_sem);
+ list_del_init(&ktype->link);
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ pr_notice("Key type %s unregistered\n", ktype->name);
+ up_read(&key_types_sem);
+}
+EXPORT_SYMBOL(unregister_key_type);
+
+/*
+ * Initialise the key management state.
+ */
+void __init key_init(void)
+{
+ /* allocate a slab in which we can store keys */
+ key_jar = kmem_cache_create("key_jar", sizeof(struct key),
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+
+ /* add the special key types */
+ list_add_tail(&key_type_keyring.link, &key_types_list);
+ list_add_tail(&key_type_dead.link, &key_types_list);
+ list_add_tail(&key_type_user.link, &key_types_list);
+ list_add_tail(&key_type_logon.link, &key_types_list);
+
+ /* record the root user tracking */
+ rb_link_node(&root_key_user.node,
+ NULL,
+ &key_user_tree.rb_node);
+
+ rb_insert_color(&root_key_user.node,
+ &key_user_tree);
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
new file mode 100644
index 000000000..19be69fa4
--- /dev/null
+++ b/security/keys/keyctl.c
@@ -0,0 +1,2026 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Userspace key control operations
+ *
+ * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/capability.h>
+#include <linux/cred.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
+#include "internal.h"
+
+#define KEY_MAX_DESC_SIZE 4096
+
+static const unsigned char keyrings_capabilities[2] = {
+ [0] = (KEYCTL_CAPS0_CAPABILITIES |
+ (IS_ENABLED(CONFIG_PERSISTENT_KEYRINGS) ? KEYCTL_CAPS0_PERSISTENT_KEYRINGS : 0) |
+ (IS_ENABLED(CONFIG_KEY_DH_OPERATIONS) ? KEYCTL_CAPS0_DIFFIE_HELLMAN : 0) |
+ (IS_ENABLED(CONFIG_ASYMMETRIC_KEY_TYPE) ? KEYCTL_CAPS0_PUBLIC_KEY : 0) |
+ (IS_ENABLED(CONFIG_BIG_KEYS) ? KEYCTL_CAPS0_BIG_KEY : 0) |
+ KEYCTL_CAPS0_INVALIDATE |
+ KEYCTL_CAPS0_RESTRICT_KEYRING |
+ KEYCTL_CAPS0_MOVE
+ ),
+ [1] = (KEYCTL_CAPS1_NS_KEYRING_NAME |
+ KEYCTL_CAPS1_NS_KEY_TAG |
+ (IS_ENABLED(CONFIG_KEY_NOTIFICATIONS) ? KEYCTL_CAPS1_NOTIFICATIONS : 0)
+ ),
+};
+
+static int key_get_type_from_user(char *type,
+ const char __user *_type,
+ unsigned len)
+{
+ int ret;
+
+ ret = strncpy_from_user(type, _type, len);
+ if (ret < 0)
+ return ret;
+ if (ret == 0 || ret >= len)
+ return -EINVAL;
+ if (type[0] == '.')
+ return -EPERM;
+ type[len - 1] = '\0';
+ return 0;
+}
+
+/*
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * If the description is NULL or an empty string, the key type is asked to
+ * generate one from the payload.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
+ */
+SYSCALL_DEFINE5(add_key, const char __user *, _type,
+ const char __user *, _description,
+ const void __user *, _payload,
+ size_t, plen,
+ key_serial_t, ringid)
+{
+ key_ref_t keyring_ref, key_ref;
+ char type[32], *description;
+ void *payload;
+ long ret;
+
+ ret = -EINVAL;
+ if (plen > 1024 * 1024 - 1)
+ goto error;
+
+ /* draw all the data into kernel space */
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ description = NULL;
+ if (_description) {
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+ if (!*description) {
+ kfree(description);
+ description = NULL;
+ } else if ((description[0] == '.') &&
+ (strncmp(type, "keyring", 7) == 0)) {
+ ret = -EPERM;
+ goto error2;
+ }
+ }
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+ if (plen) {
+ ret = -ENOMEM;
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error2;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error3;
+ }
+
+ /* find the target keyring (which must be writable) */
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error3;
+ }
+
+ /* create or update the requested key and add it to the target
+ * keyring */
+ key_ref = key_create_or_update(keyring_ref, type, description,
+ payload, plen, KEY_PERM_UNDEF,
+ KEY_ALLOC_IN_QUOTA);
+ if (!IS_ERR(key_ref)) {
+ ret = key_ref_to_ptr(key_ref)->serial;
+ key_ref_put(key_ref);
+ }
+ else {
+ ret = PTR_ERR(key_ref);
+ }
+
+ key_ref_put(keyring_ref);
+ error3:
+ kvfree_sensitive(payload, plen);
+ error2:
+ kfree(description);
+ error:
+ return ret;
+}
+
+/*
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key. Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key. The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request. If the
+ * _callout_info string is "" then it will be changed to "-".
+ */
+SYSCALL_DEFINE4(request_key, const char __user *, _type,
+ const char __user *, _description,
+ const char __user *, _callout_info,
+ key_serial_t, destringid)
+{
+ struct key_type *ktype;
+ struct key *key;
+ key_ref_t dest_ref;
+ size_t callout_len;
+ char type[32], *description, *callout_info;
+ long ret;
+
+ /* pull the type into kernel space */
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ /* pull the description into kernel space */
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+
+ /* pull the callout info into kernel space */
+ callout_info = NULL;
+ callout_len = 0;
+ if (_callout_info) {
+ callout_info = strndup_user(_callout_info, PAGE_SIZE);
+ if (IS_ERR(callout_info)) {
+ ret = PTR_ERR(callout_info);
+ goto error2;
+ }
+ callout_len = strlen(callout_info);
+ }
+
+ /* get the destination keyring if specified */
+ dest_ref = NULL;
+ if (destringid) {
+ dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+ KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref)) {
+ ret = PTR_ERR(dest_ref);
+ goto error3;
+ }
+ }
+
+ /* find the key type */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ ret = PTR_ERR(ktype);
+ goto error4;
+ }
+
+ /* do the search */
+ key = request_key_and_link(ktype, description, NULL, callout_info,
+ callout_len, NULL, key_ref_to_ptr(dest_ref),
+ KEY_ALLOC_IN_QUOTA);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error5;
+ }
+
+ /* wait for the key to finish being constructed */
+ ret = wait_for_key_construction(key, 1);
+ if (ret < 0)
+ goto error6;
+
+ ret = key->serial;
+
+error6:
+ key_put(key);
+error5:
+ key_type_put(ktype);
+error4:
+ key_ref_put(dest_ref);
+error3:
+ kfree(callout_info);
+error2:
+ kfree(description);
+error:
+ return ret;
+}
+
+/*
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
+ */
+long keyctl_get_keyring_ID(key_serial_t id, int create)
+{
+ key_ref_t key_ref;
+ unsigned long lflags;
+ long ret;
+
+ lflags = create ? KEY_LOOKUP_CREATE : 0;
+ key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ ret = key_ref_to_ptr(key_ref)->serial;
+ key_ref_put(key_ref);
+error:
+ return ret;
+}
+
+/*
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary. A named session keyring must have Search
+ * permission for it to be joined. Session keyrings without this permit will
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
+ */
+long keyctl_join_session_keyring(const char __user *_name)
+{
+ char *name;
+ long ret;
+
+ /* fetch the name from userspace */
+ name = NULL;
+ if (_name) {
+ name = strndup_user(_name, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(name)) {
+ ret = PTR_ERR(name);
+ goto error;
+ }
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
+ }
+
+ /* join the session */
+ ret = join_session_keyring(name);
+error_name:
+ kfree(name);
+error:
+ return ret;
+}
+
+/*
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work. A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned. If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
+ */
+long keyctl_update_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen)
+{
+ key_ref_t key_ref;
+ void *payload;
+ long ret;
+
+ ret = -EINVAL;
+ if (plen > PAGE_SIZE)
+ goto error;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+ if (plen) {
+ ret = -ENOMEM;
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error2;
+ }
+
+ /* find the target key (which must be writable) */
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error2;
+ }
+
+ /* update the key */
+ ret = key_update(key_ref, payload, plen);
+
+ key_ref_put(key_ref);
+error2:
+ kvfree_sensitive(payload, plen);
+error:
+ return ret;
+}
+
+/*
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work. The key type should give up its quota claim when revoked. The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * Keys with KEY_FLAG_KEEP set should not be revoked.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_revoke_key(key_serial_t id)
+{
+ key_ref_t key_ref;
+ struct key *key;
+ long ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ if (ret != -EACCES)
+ goto error;
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ ret = 0;
+ if (test_bit(KEY_FLAG_KEEP, &key->flags))
+ ret = -EPERM;
+ else
+ key_revoke(key);
+
+ key_ref_put(key_ref);
+error:
+ return ret;
+}
+
+/*
+ * Invalidate a key.
+ *
+ * The key must be grant the caller Invalidate permission for this to work.
+ * The key and any links to the key will be automatically garbage collected
+ * immediately.
+ *
+ * Keys with KEY_FLAG_KEEP set should not be invalidated.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_invalidate_key(key_serial_t id)
+{
+ key_ref_t key_ref;
+ struct key *key;
+ long ret;
+
+ kenter("%d", id);
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+
+ /* Root is permitted to invalidate certain special keys */
+ if (capable(CAP_SYS_ADMIN)) {
+ key_ref = lookup_user_key(id, 0, KEY_SYSADMIN_OVERRIDE);
+ if (IS_ERR(key_ref))
+ goto error;
+ if (test_bit(KEY_FLAG_ROOT_CAN_INVAL,
+ &key_ref_to_ptr(key_ref)->flags))
+ goto invalidate;
+ goto error_put;
+ }
+
+ goto error;
+ }
+
+invalidate:
+ key = key_ref_to_ptr(key_ref);
+ ret = 0;
+ if (test_bit(KEY_FLAG_KEEP, &key->flags))
+ ret = -EPERM;
+ else
+ key_invalidate(key);
+error_put:
+ key_ref_put(key_ref);
+error:
+ kleave(" = %ld", ret);
+ return ret;
+}
+
+/*
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission and not have
+ * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned.
+ */
+long keyctl_keyring_clear(key_serial_t ringid)
+{
+ key_ref_t keyring_ref;
+ struct key *keyring;
+ long ret;
+
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+
+ /* Root is permitted to invalidate certain special keyrings */
+ if (capable(CAP_SYS_ADMIN)) {
+ keyring_ref = lookup_user_key(ringid, 0,
+ KEY_SYSADMIN_OVERRIDE);
+ if (IS_ERR(keyring_ref))
+ goto error;
+ if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
+ &key_ref_to_ptr(keyring_ref)->flags))
+ goto clear;
+ goto error_put;
+ }
+
+ goto error;
+ }
+
+clear:
+ keyring = key_ref_to_ptr(keyring_ref);
+ if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
+ ret = -EPERM;
+ else
+ ret = keyring_clear(keyring);
+error_put:
+ key_ref_put(keyring_ref);
+error:
+ return ret;
+}
+
+/*
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the keyring must grant
+ * the caller Write permission. Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
+{
+ key_ref_t keyring_ref, key_ref;
+ long ret;
+
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error;
+ }
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error2;
+ }
+
+ ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
+
+ key_ref_put(key_ref);
+error2:
+ key_ref_put(keyring_ref);
+error:
+ return ret;
+}
+
+/*
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything. If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
+{
+ key_ref_t keyring_ref, key_ref;
+ struct key *keyring, *key;
+ long ret;
+
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error;
+ }
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, KEY_NEED_UNLINK);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error2;
+ }
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_KEEP, &keyring->flags) &&
+ test_bit(KEY_FLAG_KEEP, &key->flags))
+ ret = -EPERM;
+ else
+ ret = key_unlink(keyring, key);
+
+ key_ref_put(key_ref);
+error2:
+ key_ref_put(keyring_ref);
+error:
+ return ret;
+}
+
+/*
+ * Move a link to a key from one keyring to another, displacing any matching
+ * key from the destination keyring.
+ *
+ * The key must grant the caller Link permission and both keyrings must grant
+ * the caller Write permission. There must also be a link in the from keyring
+ * to the key. If both keyrings are the same, nothing is done.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_move(key_serial_t id, key_serial_t from_ringid,
+ key_serial_t to_ringid, unsigned int flags)
+{
+ key_ref_t key_ref, from_ref, to_ref;
+ long ret;
+
+ if (flags & ~KEYCTL_MOVE_EXCL)
+ return -EINVAL;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+
+ from_ref = lookup_user_key(from_ringid, 0, KEY_NEED_WRITE);
+ if (IS_ERR(from_ref)) {
+ ret = PTR_ERR(from_ref);
+ goto error2;
+ }
+
+ to_ref = lookup_user_key(to_ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(to_ref)) {
+ ret = PTR_ERR(to_ref);
+ goto error3;
+ }
+
+ ret = key_move(key_ref_to_ptr(key_ref), key_ref_to_ptr(from_ref),
+ key_ref_to_ptr(to_ref), flags);
+
+ key_ref_put(to_ref);
+error3:
+ key_ref_put(from_ref);
+error2:
+ key_ref_put(key_ref);
+ return ret;
+}
+
+/*
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
+ * type;uid;gid;perm;description<NUL>
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
+ */
+long keyctl_describe_key(key_serial_t keyid,
+ char __user *buffer,
+ size_t buflen)
+{
+ struct key *key, *instkey;
+ key_ref_t key_ref;
+ char *infobuf;
+ long ret;
+ int desclen, infolen;
+
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
+ if (IS_ERR(key_ref)) {
+ /* viewing a key under construction is permitted if we have the
+ * authorisation token handy */
+ if (PTR_ERR(key_ref) == -EACCES) {
+ instkey = key_get_instantiation_authkey(keyid);
+ if (!IS_ERR(instkey)) {
+ key_put(instkey);
+ key_ref = lookup_user_key(keyid,
+ KEY_LOOKUP_PARTIAL,
+ KEY_AUTHTOKEN_OVERRIDE);
+ if (!IS_ERR(key_ref))
+ goto okay;
+ }
+ }
+
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+okay:
+ key = key_ref_to_ptr(key_ref);
+ desclen = strlen(key->description);
+
+ /* calculate how much information we're going to return */
+ ret = -ENOMEM;
+ infobuf = kasprintf(GFP_KERNEL,
+ "%s;%d;%d;%08x;",
+ key->type->name,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
+ key->perm);
+ if (!infobuf)
+ goto error2;
+ infolen = strlen(infobuf);
+ ret = infolen + desclen + 1;
+
+ /* consider returning the data */
+ if (buffer && buflen >= ret) {
+ if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+ copy_to_user(buffer + infolen, key->description,
+ desclen + 1) != 0)
+ ret = -EFAULT;
+ }
+
+ kfree(infobuf);
+error2:
+ key_ref_put(key_ref);
+error:
+ return ret;
+}
+
+/*
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key. Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring). Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
+ */
+long keyctl_keyring_search(key_serial_t ringid,
+ const char __user *_type,
+ const char __user *_description,
+ key_serial_t destringid)
+{
+ struct key_type *ktype;
+ key_ref_t keyring_ref, key_ref, dest_ref;
+ char type[32], *description;
+ long ret;
+
+ /* pull the type and description into kernel space */
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+
+ /* get the keyring at which to begin the search */
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error2;
+ }
+
+ /* get the destination keyring if specified */
+ dest_ref = NULL;
+ if (destringid) {
+ dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+ KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref)) {
+ ret = PTR_ERR(dest_ref);
+ goto error3;
+ }
+ }
+
+ /* find the key type */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ ret = PTR_ERR(ktype);
+ goto error4;
+ }
+
+ /* do the search */
+ key_ref = keyring_search(keyring_ref, ktype, description, true);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+
+ /* treat lack or presence of a negative key the same */
+ if (ret == -EAGAIN)
+ ret = -ENOKEY;
+ goto error5;
+ }
+
+ /* link the resulting key to the destination keyring if we can */
+ if (dest_ref) {
+ ret = key_permission(key_ref, KEY_NEED_LINK);
+ if (ret < 0)
+ goto error6;
+
+ ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
+ if (ret < 0)
+ goto error6;
+ }
+
+ ret = key_ref_to_ptr(key_ref)->serial;
+
+error6:
+ key_ref_put(key_ref);
+error5:
+ key_type_put(ktype);
+error4:
+ key_ref_put(dest_ref);
+error3:
+ key_ref_put(keyring_ref);
+error2:
+ kfree(description);
+error:
+ return ret;
+}
+
+/*
+ * Call the read method
+ */
+static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen)
+{
+ long ret;
+
+ down_read(&key->sem);
+ ret = key_validate(key);
+ if (ret == 0)
+ ret = key->type->read(key, buffer, buflen);
+ up_read(&key->sem);
+ return ret;
+}
+
+/*
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
+ */
+long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+{
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+ char *key_data = NULL;
+ size_t key_data_len;
+
+ /* find the key first */
+ key_ref = lookup_user_key(keyid, 0, KEY_DEFER_PERM_CHECK);
+ if (IS_ERR(key_ref)) {
+ ret = -ENOKEY;
+ goto out;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ ret = key_read_state(key);
+ if (ret < 0)
+ goto key_put_out; /* Negatively instantiated */
+
+ /* see if we can read it directly */
+ ret = key_permission(key_ref, KEY_NEED_READ);
+ if (ret == 0)
+ goto can_read_key;
+ if (ret != -EACCES)
+ goto key_put_out;
+
+ /* we can't; see if it's searchable from this process's keyrings
+ * - we automatically take account of the fact that it may be
+ * dangling off an instantiation key
+ */
+ if (!is_key_possessed(key_ref)) {
+ ret = -EACCES;
+ goto key_put_out;
+ }
+
+ /* the key is probably readable - now try to read it */
+can_read_key:
+ if (!key->type->read) {
+ ret = -EOPNOTSUPP;
+ goto key_put_out;
+ }
+
+ if (!buffer || !buflen) {
+ /* Get the key length from the read method */
+ ret = __keyctl_read_key(key, NULL, 0);
+ goto key_put_out;
+ }
+
+ /*
+ * Read the data with the semaphore held (since we might sleep)
+ * to protect against the key being updated or revoked.
+ *
+ * Allocating a temporary buffer to hold the keys before
+ * transferring them to user buffer to avoid potential
+ * deadlock involving page fault and mmap_lock.
+ *
+ * key_data_len = (buflen <= PAGE_SIZE)
+ * ? buflen : actual length of key data
+ *
+ * This prevents allocating arbitrary large buffer which can
+ * be much larger than the actual key length. In the latter case,
+ * at least 2 passes of this loop is required.
+ */
+ key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
+ for (;;) {
+ if (key_data_len) {
+ key_data = kvmalloc(key_data_len, GFP_KERNEL);
+ if (!key_data) {
+ ret = -ENOMEM;
+ goto key_put_out;
+ }
+ }
+
+ ret = __keyctl_read_key(key, key_data, key_data_len);
+
+ /*
+ * Read methods will just return the required length without
+ * any copying if the provided length isn't large enough.
+ */
+ if (ret <= 0 || ret > buflen)
+ break;
+
+ /*
+ * The key may change (unlikely) in between 2 consecutive
+ * __keyctl_read_key() calls. In this case, we reallocate
+ * a larger buffer and redo the key read when
+ * key_data_len < ret <= buflen.
+ */
+ if (ret > key_data_len) {
+ if (unlikely(key_data))
+ kvfree_sensitive(key_data, key_data_len);
+ key_data_len = ret;
+ continue; /* Allocate buffer */
+ }
+
+ if (copy_to_user(buffer, key_data, ret))
+ ret = -EFAULT;
+ break;
+ }
+ kvfree_sensitive(key_data, key_data_len);
+
+key_put_out:
+ key_put(key);
+out:
+ return ret;
+}
+
+/*
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability. If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key. The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
+{
+ struct key_user *newowner, *zapowner = NULL;
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+ kuid_t uid;
+ kgid_t gid;
+
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+ ret = -EINVAL;
+ if ((user != (uid_t) -1) && !uid_valid(uid))
+ goto error;
+ if ((group != (gid_t) -1) && !gid_valid(gid))
+ goto error;
+
+ ret = 0;
+ if (user == (uid_t) -1 && group == (gid_t) -1)
+ goto error;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* make the changes with the locks held to prevent chown/chown races */
+ ret = -EACCES;
+ down_write(&key->sem);
+
+ {
+ bool is_privileged_op = false;
+
+ /* only the sysadmin can chown a key to some other UID */
+ if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
+ is_privileged_op = true;
+
+ /* only the sysadmin can set the key's GID to a group other
+ * than one of those that the current process subscribes to */
+ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
+ is_privileged_op = true;
+
+ if (is_privileged_op && !capable(CAP_SYS_ADMIN))
+ goto error_put;
+ }
+
+ /* change the UID */
+ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
+ ret = -ENOMEM;
+ newowner = key_user_lookup(uid);
+ if (!newowner)
+ goto error_put;
+
+ /* transfer the quota burden to the new user */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxkeys : key_quota_maxkeys;
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ spin_lock(&newowner->lock);
+ if (newowner->qnkeys + 1 > maxkeys ||
+ newowner->qnbytes + key->quotalen > maxbytes ||
+ newowner->qnbytes + key->quotalen <
+ newowner->qnbytes)
+ goto quota_overrun;
+
+ newowner->qnkeys++;
+ newowner->qnbytes += key->quotalen;
+ spin_unlock(&newowner->lock);
+
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
+
+ atomic_dec(&key->user->nkeys);
+ atomic_inc(&newowner->nkeys);
+
+ if (key->state != KEY_IS_UNINSTANTIATED) {
+ atomic_dec(&key->user->nikeys);
+ atomic_inc(&newowner->nikeys);
+ }
+
+ zapowner = key->user;
+ key->user = newowner;
+ key->uid = uid;
+ }
+
+ /* change the GID */
+ if (group != (gid_t) -1)
+ key->gid = gid;
+
+ notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ ret = 0;
+
+error_put:
+ up_write(&key->sem);
+ key_put(key);
+ if (zapowner)
+ key_user_put(zapowner);
+error:
+ return ret;
+
+quota_overrun:
+ spin_unlock(&newowner->lock);
+ zapowner = newowner;
+ ret = -EDQUOT;
+ goto error_put;
+}
+
+/*
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
+ */
+long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
+{
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+
+ ret = -EINVAL;
+ if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
+ goto error;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* make the changes with the locks held to prevent chown/chmod races */
+ ret = -EACCES;
+ down_write(&key->sem);
+
+ /* if we're not the sysadmin, we can only change a key that we own */
+ if (uid_eq(key->uid, current_fsuid()) || capable(CAP_SYS_ADMIN)) {
+ key->perm = perm;
+ notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ ret = 0;
+ }
+
+ up_write(&key->sem);
+ key_put(key);
+error:
+ return ret;
+}
+
+/*
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
+ */
+static long get_instantiation_keyring(key_serial_t ringid,
+ struct request_key_auth *rka,
+ struct key **_dest_keyring)
+{
+ key_ref_t dkref;
+
+ *_dest_keyring = NULL;
+
+ /* just return a NULL pointer if we weren't asked to make a link */
+ if (ringid == 0)
+ return 0;
+
+ /* if a specific keyring is nominated by ID, then use that */
+ if (ringid > 0) {
+ dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(dkref))
+ return PTR_ERR(dkref);
+ *_dest_keyring = key_ref_to_ptr(dkref);
+ return 0;
+ }
+
+ if (ringid == KEY_SPEC_REQKEY_AUTH_KEY)
+ return -EINVAL;
+
+ /* otherwise specify the destination keyring recorded in the
+ * authorisation key (any KEY_SPEC_*_KEYRING) */
+ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) {
+ *_dest_keyring = key_get(rka->dest_keyring);
+ return 0;
+ }
+
+ return -ENOKEY;
+}
+
+/*
+ * Change the request_key authorisation key on the current process.
+ */
+static int keyctl_change_reqkey_auth(struct key *key)
+{
+ struct cred *new;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ key_put(new->request_key_auth);
+ new->request_key_auth = key_get(key);
+
+ return commit_creds(new);
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+static long keyctl_instantiate_key_common(key_serial_t id,
+ struct iov_iter *from,
+ key_serial_t ringid)
+{
+ const struct cred *cred = current_cred();
+ struct request_key_auth *rka;
+ struct key *instkey, *dest_keyring;
+ size_t plen = from ? iov_iter_count(from) : 0;
+ void *payload;
+ long ret;
+
+ kenter("%d,,%zu,%d", id, plen, ringid);
+
+ if (!plen)
+ from = NULL;
+
+ ret = -EINVAL;
+ if (plen > 1024 * 1024 - 1)
+ goto error;
+
+ /* the appropriate instantiation authorisation key must have been
+ * assumed before calling this */
+ ret = -EPERM;
+ instkey = cred->request_key_auth;
+ if (!instkey)
+ goto error;
+
+ rka = instkey->payload.data[0];
+ if (rka->target_key->serial != id)
+ goto error;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+ if (from) {
+ ret = -ENOMEM;
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
+
+ ret = -EFAULT;
+ if (!copy_from_iter_full(payload, plen, from))
+ goto error2;
+ }
+
+ /* find the destination keyring amongst those belonging to the
+ * requesting task */
+ ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
+ if (ret < 0)
+ goto error2;
+
+ /* instantiate the key and link it into a keyring */
+ ret = key_instantiate_and_link(rka->target_key, payload, plen,
+ dest_keyring, instkey);
+
+ key_put(dest_keyring);
+
+ /* discard the assumed authority if it's just been disabled by
+ * instantiation of the key */
+ if (ret == 0)
+ keyctl_change_reqkey_auth(NULL);
+
+error2:
+ kvfree_sensitive(payload, plen);
+error:
+ return ret;
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t ringid)
+{
+ if (_payload && plen) {
+ struct iovec iov;
+ struct iov_iter from;
+ int ret;
+
+ ret = import_single_range(ITER_SOURCE, (void __user *)_payload, plen,
+ &iov, &from);
+ if (unlikely(ret))
+ return ret;
+
+ return keyctl_instantiate_key_common(id, &from, ringid);
+ }
+
+ return keyctl_instantiate_key_common(id, NULL, ringid);
+}
+
+/*
+ * Instantiate a key with the specified multipart payload and link the key into
+ * the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key_iov(key_serial_t id,
+ const struct iovec __user *_payload_iov,
+ unsigned ioc,
+ key_serial_t ringid)
+{
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ struct iov_iter from;
+ long ret;
+
+ if (!_payload_iov)
+ ioc = 0;
+
+ ret = import_iovec(ITER_SOURCE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), &iov, &from);
+ if (ret < 0)
+ return ret;
+ ret = keyctl_instantiate_key_common(id, &from, ringid);
+ kfree(iov);
+ return ret;
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
+{
+ return keyctl_reject_key(id, timeout, ENOKEY, ringid);
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and error
+ * code and link the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the specified error code until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
+ key_serial_t ringid)
+{
+ const struct cred *cred = current_cred();
+ struct request_key_auth *rka;
+ struct key *instkey, *dest_keyring;
+ long ret;
+
+ kenter("%d,%u,%u,%d", id, timeout, error, ringid);
+
+ /* must be a valid error code and mustn't be a kernel special */
+ if (error <= 0 ||
+ error >= MAX_ERRNO ||
+ error == ERESTARTSYS ||
+ error == ERESTARTNOINTR ||
+ error == ERESTARTNOHAND ||
+ error == ERESTART_RESTARTBLOCK)
+ return -EINVAL;
+
+ /* the appropriate instantiation authorisation key must have been
+ * assumed before calling this */
+ ret = -EPERM;
+ instkey = cred->request_key_auth;
+ if (!instkey)
+ goto error;
+
+ rka = instkey->payload.data[0];
+ if (rka->target_key->serial != id)
+ goto error;
+
+ /* find the destination keyring if present (which must also be
+ * writable) */
+ ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
+ if (ret < 0)
+ goto error;
+
+ /* instantiate the key and link it into a keyring */
+ ret = key_reject_and_link(rka->target_key, timeout, error,
+ dest_keyring, instkey);
+
+ key_put(dest_keyring);
+
+ /* discard the assumed authority if it's just been disabled by
+ * instantiation of the key */
+ if (ret == 0)
+ keyctl_change_reqkey_auth(NULL);
+
+error:
+ return ret;
+}
+
+/*
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
+ */
+long keyctl_set_reqkey_keyring(int reqkey_defl)
+{
+ struct cred *new;
+ int ret, old_setting;
+
+ old_setting = current_cred_xxx(jit_keyring);
+
+ if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE)
+ return old_setting;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ switch (reqkey_defl) {
+ case KEY_REQKEY_DEFL_THREAD_KEYRING:
+ ret = install_thread_keyring_to_cred(new);
+ if (ret < 0)
+ goto error;
+ goto set;
+
+ case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ ret = install_process_keyring_to_cred(new);
+ if (ret < 0)
+ goto error;
+ goto set;
+
+ case KEY_REQKEY_DEFL_DEFAULT:
+ case KEY_REQKEY_DEFL_SESSION_KEYRING:
+ case KEY_REQKEY_DEFL_USER_KEYRING:
+ case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+ case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
+ goto set;
+
+ case KEY_REQKEY_DEFL_NO_CHANGE:
+ case KEY_REQKEY_DEFL_GROUP_KEYRING:
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+
+set:
+ new->jit_keyring = reqkey_defl;
+ commit_creds(new);
+ return old_setting;
+error:
+ abort_creds(new);
+ return ret;
+}
+
+/*
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time. The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * Keys with KEY_FLAG_KEEP set should not be timed out.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_set_timeout(key_serial_t id, unsigned timeout)
+{
+ struct key *key, *instkey;
+ key_ref_t key_ref;
+ long ret;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ /* setting the timeout on a key under construction is permitted
+ * if we have the authorisation token handy */
+ if (PTR_ERR(key_ref) == -EACCES) {
+ instkey = key_get_instantiation_authkey(id);
+ if (!IS_ERR(instkey)) {
+ key_put(instkey);
+ key_ref = lookup_user_key(id,
+ KEY_LOOKUP_PARTIAL,
+ KEY_AUTHTOKEN_OVERRIDE);
+ if (!IS_ERR(key_ref))
+ goto okay;
+ }
+ }
+
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+okay:
+ key = key_ref_to_ptr(key_ref);
+ ret = 0;
+ if (test_bit(KEY_FLAG_KEEP, &key->flags)) {
+ ret = -EPERM;
+ } else {
+ key_set_timeout(key, timeout);
+ notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ }
+ key_put(key);
+
+error:
+ return ret;
+}
+
+/*
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated. It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned. The authorisation key can be read to get
+ * the callout information passed to request_key().
+ */
+long keyctl_assume_authority(key_serial_t id)
+{
+ struct key *authkey;
+ long ret;
+
+ /* special key IDs aren't permitted */
+ ret = -EINVAL;
+ if (id < 0)
+ goto error;
+
+ /* we divest ourselves of authority if given an ID of 0 */
+ if (id == 0) {
+ ret = keyctl_change_reqkey_auth(NULL);
+ goto error;
+ }
+
+ /* attempt to assume the authority temporarily granted to us whilst we
+ * instantiate the specified key
+ * - the authorisation key must be in the current task's keyrings
+ * somewhere
+ */
+ authkey = key_get_instantiation_authkey(id);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ goto error;
+ }
+
+ ret = keyctl_change_reqkey_auth(authkey);
+ if (ret == 0)
+ ret = authkey->serial;
+ key_put(authkey);
+error:
+ return ret;
+}
+
+/*
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
+ */
+long keyctl_get_security(key_serial_t keyid,
+ char __user *buffer,
+ size_t buflen)
+{
+ struct key *key, *instkey;
+ key_ref_t key_ref;
+ char *context;
+ long ret;
+
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
+ if (IS_ERR(key_ref)) {
+ if (PTR_ERR(key_ref) != -EACCES)
+ return PTR_ERR(key_ref);
+
+ /* viewing a key under construction is also permitted if we
+ * have the authorisation token handy */
+ instkey = key_get_instantiation_authkey(keyid);
+ if (IS_ERR(instkey))
+ return PTR_ERR(instkey);
+ key_put(instkey);
+
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL,
+ KEY_AUTHTOKEN_OVERRIDE);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ ret = security_key_getsecurity(key, &context);
+ if (ret == 0) {
+ /* if no information was returned, give userspace an empty
+ * string */
+ ret = 1;
+ if (buffer && buflen > 0 &&
+ copy_to_user(buffer, "", 1) != 0)
+ ret = -EFAULT;
+ } else if (ret > 0) {
+ /* return as much data as there's room for */
+ if (buffer && buflen > 0) {
+ if (buflen > ret)
+ buflen = ret;
+
+ if (copy_to_user(buffer, context, buflen) != 0)
+ ret = -EFAULT;
+ }
+
+ kfree(context);
+ }
+
+ key_ref_put(key_ref);
+ return ret;
+}
+
+/*
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_session_to_parent(void)
+{
+ struct task_struct *me, *parent;
+ const struct cred *mycred, *pcred;
+ struct callback_head *newwork, *oldwork;
+ key_ref_t keyring_r;
+ struct cred *cred;
+ int ret;
+
+ keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
+ if (IS_ERR(keyring_r))
+ return PTR_ERR(keyring_r);
+
+ ret = -ENOMEM;
+
+ /* our parent is going to need a new cred struct, a new tgcred struct
+ * and new security data, so we allocate them here to prevent ENOMEM in
+ * our parent */
+ cred = cred_alloc_blank();
+ if (!cred)
+ goto error_keyring;
+ newwork = &cred->rcu;
+
+ cred->session_keyring = key_ref_to_ptr(keyring_r);
+ keyring_r = NULL;
+ init_task_work(newwork, key_change_session_keyring);
+
+ me = current;
+ rcu_read_lock();
+ write_lock_irq(&tasklist_lock);
+
+ ret = -EPERM;
+ oldwork = NULL;
+ parent = rcu_dereference_protected(me->real_parent,
+ lockdep_is_held(&tasklist_lock));
+
+ /* the parent mustn't be init and mustn't be a kernel thread */
+ if (parent->pid <= 1 || !parent->mm)
+ goto unlock;
+
+ /* the parent must be single threaded */
+ if (!thread_group_empty(parent))
+ goto unlock;
+
+ /* the parent and the child must have different session keyrings or
+ * there's no point */
+ mycred = current_cred();
+ pcred = __task_cred(parent);
+ if (mycred == pcred ||
+ mycred->session_keyring == pcred->session_keyring) {
+ ret = 0;
+ goto unlock;
+ }
+
+ /* the parent must have the same effective ownership and mustn't be
+ * SUID/SGID */
+ if (!uid_eq(pcred->uid, mycred->euid) ||
+ !uid_eq(pcred->euid, mycred->euid) ||
+ !uid_eq(pcred->suid, mycred->euid) ||
+ !gid_eq(pcred->gid, mycred->egid) ||
+ !gid_eq(pcred->egid, mycred->egid) ||
+ !gid_eq(pcred->sgid, mycred->egid))
+ goto unlock;
+
+ /* the keyrings must have the same UID */
+ if ((pcred->session_keyring &&
+ !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+ !uid_eq(mycred->session_keyring->uid, mycred->euid))
+ goto unlock;
+
+ /* cancel an already pending keyring replacement */
+ oldwork = task_work_cancel(parent, key_change_session_keyring);
+
+ /* the replacement session keyring is applied just prior to userspace
+ * restarting */
+ ret = task_work_add(parent, newwork, TWA_RESUME);
+ if (!ret)
+ newwork = NULL;
+unlock:
+ write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
+ if (oldwork)
+ put_cred(container_of(oldwork, struct cred, rcu));
+ if (newwork)
+ put_cred(cred);
+ return ret;
+
+error_keyring:
+ key_ref_put(keyring_r);
+ return ret;
+}
+
+/*
+ * Apply a restriction to a given keyring.
+ *
+ * The caller must have Setattr permission to change keyring restrictions.
+ *
+ * The requested type name may be a NULL pointer to reject all attempts
+ * to link to the keyring. In this case, _restriction must also be NULL.
+ * Otherwise, both _type and _restriction must be non-NULL.
+ *
+ * Returns 0 if successful.
+ */
+long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
+ const char __user *_restriction)
+{
+ key_ref_t key_ref;
+ char type[32];
+ char *restriction = NULL;
+ long ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+
+ ret = -EINVAL;
+ if (_type) {
+ if (!_restriction)
+ goto error;
+
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ restriction = strndup_user(_restriction, PAGE_SIZE);
+ if (IS_ERR(restriction)) {
+ ret = PTR_ERR(restriction);
+ goto error;
+ }
+ } else {
+ if (_restriction)
+ goto error;
+ }
+
+ ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
+ kfree(restriction);
+error:
+ key_ref_put(key_ref);
+ return ret;
+}
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+/*
+ * Watch for changes to a key.
+ *
+ * The caller must have View permission to watch a key or keyring.
+ */
+long keyctl_watch_key(key_serial_t id, int watch_queue_fd, int watch_id)
+{
+ struct watch_queue *wqueue;
+ struct watch_list *wlist = NULL;
+ struct watch *watch = NULL;
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+
+ if (watch_id < -1 || watch_id > 0xff)
+ return -EINVAL;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_VIEW);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ key = key_ref_to_ptr(key_ref);
+
+ wqueue = get_watch_queue(watch_queue_fd);
+ if (IS_ERR(wqueue)) {
+ ret = PTR_ERR(wqueue);
+ goto err_key;
+ }
+
+ if (watch_id >= 0) {
+ ret = -ENOMEM;
+ if (!key->watchers) {
+ wlist = kzalloc(sizeof(*wlist), GFP_KERNEL);
+ if (!wlist)
+ goto err_wqueue;
+ init_watch_list(wlist, NULL);
+ }
+
+ watch = kzalloc(sizeof(*watch), GFP_KERNEL);
+ if (!watch)
+ goto err_wlist;
+
+ init_watch(watch, wqueue);
+ watch->id = key->serial;
+ watch->info_id = (u32)watch_id << WATCH_INFO_ID__SHIFT;
+
+ ret = security_watch_key(key);
+ if (ret < 0)
+ goto err_watch;
+
+ down_write(&key->sem);
+ if (!key->watchers) {
+ key->watchers = wlist;
+ wlist = NULL;
+ }
+
+ ret = add_watch_to_object(watch, key->watchers);
+ up_write(&key->sem);
+
+ if (ret == 0)
+ watch = NULL;
+ } else {
+ ret = -EBADSLT;
+ if (key->watchers) {
+ down_write(&key->sem);
+ ret = remove_watch_from_object(key->watchers,
+ wqueue, key_serial(key),
+ false);
+ up_write(&key->sem);
+ }
+ }
+
+err_watch:
+ kfree(watch);
+err_wlist:
+ kfree(wlist);
+err_wqueue:
+ put_watch_queue(wqueue);
+err_key:
+ key_put(key);
+ return ret;
+}
+#endif /* CONFIG_KEY_NOTIFICATIONS */
+
+/*
+ * Get keyrings subsystem capabilities.
+ */
+long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen)
+{
+ size_t size = buflen;
+
+ if (size > 0) {
+ if (size > sizeof(keyrings_capabilities))
+ size = sizeof(keyrings_capabilities);
+ if (copy_to_user(_buffer, keyrings_capabilities, size) != 0)
+ return -EFAULT;
+ if (size < buflen &&
+ clear_user(_buffer + size, buflen - size) != 0)
+ return -EFAULT;
+ }
+
+ return sizeof(keyrings_capabilities);
+}
+
+/*
+ * The key control system call
+ */
+SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ unsigned long, arg4, unsigned long, arg5)
+{
+ switch (option) {
+ case KEYCTL_GET_KEYRING_ID:
+ return keyctl_get_keyring_ID((key_serial_t) arg2,
+ (int) arg3);
+
+ case KEYCTL_JOIN_SESSION_KEYRING:
+ return keyctl_join_session_keyring((const char __user *) arg2);
+
+ case KEYCTL_UPDATE:
+ return keyctl_update_key((key_serial_t) arg2,
+ (const void __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_REVOKE:
+ return keyctl_revoke_key((key_serial_t) arg2);
+
+ case KEYCTL_DESCRIBE:
+ return keyctl_describe_key((key_serial_t) arg2,
+ (char __user *) arg3,
+ (unsigned) arg4);
+
+ case KEYCTL_CLEAR:
+ return keyctl_keyring_clear((key_serial_t) arg2);
+
+ case KEYCTL_LINK:
+ return keyctl_keyring_link((key_serial_t) arg2,
+ (key_serial_t) arg3);
+
+ case KEYCTL_UNLINK:
+ return keyctl_keyring_unlink((key_serial_t) arg2,
+ (key_serial_t) arg3);
+
+ case KEYCTL_SEARCH:
+ return keyctl_keyring_search((key_serial_t) arg2,
+ (const char __user *) arg3,
+ (const char __user *) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_READ:
+ return keyctl_read_key((key_serial_t) arg2,
+ (char __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_CHOWN:
+ return keyctl_chown_key((key_serial_t) arg2,
+ (uid_t) arg3,
+ (gid_t) arg4);
+
+ case KEYCTL_SETPERM:
+ return keyctl_setperm_key((key_serial_t) arg2,
+ (key_perm_t) arg3);
+
+ case KEYCTL_INSTANTIATE:
+ return keyctl_instantiate_key((key_serial_t) arg2,
+ (const void __user *) arg3,
+ (size_t) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_NEGATE:
+ return keyctl_negate_key((key_serial_t) arg2,
+ (unsigned) arg3,
+ (key_serial_t) arg4);
+
+ case KEYCTL_SET_REQKEY_KEYRING:
+ return keyctl_set_reqkey_keyring(arg2);
+
+ case KEYCTL_SET_TIMEOUT:
+ return keyctl_set_timeout((key_serial_t) arg2,
+ (unsigned) arg3);
+
+ case KEYCTL_ASSUME_AUTHORITY:
+ return keyctl_assume_authority((key_serial_t) arg2);
+
+ case KEYCTL_GET_SECURITY:
+ return keyctl_get_security((key_serial_t) arg2,
+ (char __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_SESSION_TO_PARENT:
+ return keyctl_session_to_parent();
+
+ case KEYCTL_REJECT:
+ return keyctl_reject_key((key_serial_t) arg2,
+ (unsigned) arg3,
+ (unsigned) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_INSTANTIATE_IOV:
+ return keyctl_instantiate_key_iov(
+ (key_serial_t) arg2,
+ (const struct iovec __user *) arg3,
+ (unsigned) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key((key_serial_t) arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
+ case KEYCTL_DH_COMPUTE:
+ return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
+ (char __user *) arg3, (size_t) arg4,
+ (struct keyctl_kdf_params __user *) arg5);
+
+ case KEYCTL_RESTRICT_KEYRING:
+ return keyctl_restrict_keyring((key_serial_t) arg2,
+ (const char __user *) arg3,
+ (const char __user *) arg4);
+
+ case KEYCTL_PKEY_QUERY:
+ if (arg3 != 0)
+ return -EINVAL;
+ return keyctl_pkey_query((key_serial_t)arg2,
+ (const char __user *)arg4,
+ (struct keyctl_pkey_query __user *)arg5);
+
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ case KEYCTL_PKEY_SIGN:
+ return keyctl_pkey_e_d_s(
+ option,
+ (const struct keyctl_pkey_params __user *)arg2,
+ (const char __user *)arg3,
+ (const void __user *)arg4,
+ (void __user *)arg5);
+
+ case KEYCTL_PKEY_VERIFY:
+ return keyctl_pkey_verify(
+ (const struct keyctl_pkey_params __user *)arg2,
+ (const char __user *)arg3,
+ (const void __user *)arg4,
+ (const void __user *)arg5);
+
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move((key_serial_t)arg2,
+ (key_serial_t)arg3,
+ (key_serial_t)arg4,
+ (unsigned int)arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities((unsigned char __user *)arg2, (size_t)arg3);
+
+ case KEYCTL_WATCH_KEY:
+ return keyctl_watch_key((key_serial_t)arg2, (int)arg3, (int)arg4);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
new file mode 100644
index 000000000..97bc27bbf
--- /dev/null
+++ b/security/keys/keyctl_pkey.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Public-key operation keyctls
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/parser.h>
+#include <linux/uaccess.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
+{
+ kfree(params->info);
+ key_put(params->key);
+}
+
+enum {
+ Opt_err,
+ Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */
+ Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */
+};
+
+static const match_table_t param_keys = {
+ { Opt_enc, "enc=%s" },
+ { Opt_hash, "hash=%s" },
+ { Opt_err, NULL }
+};
+
+/*
+ * Parse the information string which consists of key=val pairs.
+ */
+static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
+{
+ unsigned long token_mask = 0;
+ substring_t args[MAX_OPT_ARGS];
+ char *c = params->info, *p, *q;
+ int token;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, param_keys, args);
+ if (token == Opt_err)
+ return -EINVAL;
+ if (__test_and_set_bit(token, &token_mask))
+ return -EINVAL;
+ q = args[0].from;
+ if (!q[0])
+ return -EINVAL;
+
+ switch (token) {
+ case Opt_enc:
+ params->encoding = q;
+ break;
+
+ case Opt_hash:
+ params->hash_algo = q;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Interpret parameters. Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get(key_serial_t id,
+ const char __user *_info,
+ struct kernel_pkey_params *params)
+{
+ key_ref_t key_ref;
+ void *p;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->encoding = "raw";
+
+ p = strndup_user(_info, PAGE_SIZE);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+ params->info = p;
+
+ ret = keyctl_pkey_params_parse(params);
+ if (ret < 0)
+ return ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ params->key = key_ref_to_ptr(key_ref);
+
+ if (!params->key->type->asym_query)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/*
+ * Get parameters from userspace. Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ int op,
+ struct kernel_pkey_params *params)
+{
+ struct keyctl_pkey_params uparams;
+ struct kernel_pkey_query info;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->encoding = "raw";
+
+ if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0)
+ return -EFAULT;
+
+ ret = keyctl_pkey_params_get(uparams.key_id, _info, params);
+ if (ret < 0)
+ return ret;
+
+ ret = params->key->type->asym_query(params, &info);
+ if (ret < 0)
+ return ret;
+
+ switch (op) {
+ case KEYCTL_PKEY_ENCRYPT:
+ if (uparams.in_len > info.max_dec_size ||
+ uparams.out_len > info.max_enc_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_DECRYPT:
+ if (uparams.in_len > info.max_enc_size ||
+ uparams.out_len > info.max_dec_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_SIGN:
+ if (uparams.in_len > info.max_data_size ||
+ uparams.out_len > info.max_sig_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_VERIFY:
+ if (uparams.in_len > info.max_data_size ||
+ uparams.in2_len > info.max_sig_size)
+ return -EINVAL;
+ break;
+ default:
+ BUG();
+ }
+
+ params->in_len = uparams.in_len;
+ params->out_len = uparams.out_len; /* Note: same as in2_len */
+ return 0;
+}
+
+/*
+ * Query information about an asymmetric key.
+ */
+long keyctl_pkey_query(key_serial_t id,
+ const char __user *_info,
+ struct keyctl_pkey_query __user *_res)
+{
+ struct kernel_pkey_params params;
+ struct kernel_pkey_query res;
+ long ret;
+
+ ret = keyctl_pkey_params_get(id, _info, &params);
+ if (ret < 0)
+ goto error;
+
+ ret = params.key->type->asym_query(&params, &res);
+ if (ret < 0)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_to_user(_res, &res, sizeof(res)) == 0 &&
+ clear_user(_res->__spare, sizeof(_res->__spare)) == 0)
+ ret = 0;
+
+error:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
+
+/*
+ * Encrypt/decrypt/sign
+ *
+ * Encrypt data, decrypt data or sign data using a public key.
+ *
+ * _info is a string of supplementary information in key=val format. For
+ * instance, it might contain:
+ *
+ * "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the encoding and hash= selects the OID to go in that
+ * particular encoding if required. If enc= isn't supplied, it's assumed that
+ * the caller is supplying raw values.
+ *
+ * If successful, the amount of data written into the output buffer is
+ * returned.
+ */
+long keyctl_pkey_e_d_s(int op,
+ const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ const void __user *_in,
+ void __user *_out)
+{
+ struct kernel_pkey_params params;
+ void *in, *out;
+ long ret;
+
+ ret = keyctl_pkey_params_get_2(_params, _info, op, &params);
+ if (ret < 0)
+ goto error_params;
+
+ ret = -EOPNOTSUPP;
+ if (!params.key->type->asym_eds_op)
+ goto error_params;
+
+ switch (op) {
+ case KEYCTL_PKEY_ENCRYPT:
+ params.op = kernel_pkey_encrypt;
+ break;
+ case KEYCTL_PKEY_DECRYPT:
+ params.op = kernel_pkey_decrypt;
+ break;
+ case KEYCTL_PKEY_SIGN:
+ params.op = kernel_pkey_sign;
+ break;
+ default:
+ BUG();
+ }
+
+ in = memdup_user(_in, params.in_len);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto error_params;
+ }
+
+ ret = -ENOMEM;
+ out = kmalloc(params.out_len, GFP_KERNEL);
+ if (!out)
+ goto error_in;
+
+ ret = params.key->type->asym_eds_op(&params, in, out);
+ if (ret < 0)
+ goto error_out;
+
+ if (copy_to_user(_out, out, ret) != 0)
+ ret = -EFAULT;
+
+error_out:
+ kfree(out);
+error_in:
+ kfree(in);
+error_params:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
+
+/*
+ * Verify a signature.
+ *
+ * Verify a public key signature using the given key, or if not given, search
+ * for a matching key.
+ *
+ * _info is a string of supplementary information in key=val format. For
+ * instance, it might contain:
+ *
+ * "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the signature blob encoding and hash= selects the OID
+ * to go in that particular encoding. If enc= isn't supplied, it's assumed
+ * that the caller is supplying raw values.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ const void __user *_in,
+ const void __user *_in2)
+{
+ struct kernel_pkey_params params;
+ void *in, *in2;
+ long ret;
+
+ ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY,
+ &params);
+ if (ret < 0)
+ goto error_params;
+
+ ret = -EOPNOTSUPP;
+ if (!params.key->type->asym_verify_signature)
+ goto error_params;
+
+ in = memdup_user(_in, params.in_len);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto error_params;
+ }
+
+ in2 = memdup_user(_in2, params.in2_len);
+ if (IS_ERR(in2)) {
+ ret = PTR_ERR(in2);
+ goto error_in;
+ }
+
+ params.op = kernel_pkey_verify;
+ ret = params.key->type->asym_verify_signature(&params, in, in2);
+
+ kfree(in2);
+error_in:
+ kfree(in);
+error_params:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
new file mode 100644
index 000000000..4448758f6
--- /dev/null
+++ b/security/keys/keyring.c
@@ -0,0 +1,1794 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Keyring handling
+ *
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/user_namespace.h>
+#include <linux/nsproxy.h>
+#include <keys/keyring-type.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
+#include <linux/uaccess.h>
+#include <net/net_namespace.h>
+#include "internal.h"
+
+/*
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
+ */
+#define KEYRING_SEARCH_MAX_DEPTH 6
+
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE 0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+ void *object = assoc_array_ptr_to_leaf(x);
+ return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+ return key;
+}
+
+static DEFINE_RWLOCK(keyring_name_lock);
+
+/*
+ * Clean up the bits of user_namespace that belong to us.
+ */
+void key_free_user_ns(struct user_namespace *ns)
+{
+ write_lock(&keyring_name_lock);
+ list_del_init(&ns->keyring_name_list);
+ write_unlock(&keyring_name_lock);
+
+ key_put(ns->user_keyring_register);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
+}
+
+/*
+ * The keyring key type definition. Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
+ */
+static int keyring_preparse(struct key_preparsed_payload *prep);
+static void keyring_free_preparse(struct key_preparsed_payload *prep);
+static int keyring_instantiate(struct key *keyring,
+ struct key_preparsed_payload *prep);
+static void keyring_revoke(struct key *keyring);
+static void keyring_destroy(struct key *keyring);
+static void keyring_describe(const struct key *keyring, struct seq_file *m);
+static long keyring_read(const struct key *keyring,
+ char *buffer, size_t buflen);
+
+struct key_type key_type_keyring = {
+ .name = "keyring",
+ .def_datalen = 0,
+ .preparse = keyring_preparse,
+ .free_preparse = keyring_free_preparse,
+ .instantiate = keyring_instantiate,
+ .revoke = keyring_revoke,
+ .destroy = keyring_destroy,
+ .describe = keyring_describe,
+ .read = keyring_read,
+};
+EXPORT_SYMBOL(key_type_keyring);
+
+/*
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
+ */
+static DEFINE_MUTEX(keyring_serialise_link_lock);
+
+/*
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one and it doesn't begin with a dot).
+ */
+static void keyring_publish_name(struct key *keyring)
+{
+ struct user_namespace *ns = current_user_ns();
+
+ if (keyring->description &&
+ keyring->description[0] &&
+ keyring->description[0] != '.') {
+ write_lock(&keyring_name_lock);
+ list_add_tail(&keyring->name_link, &ns->keyring_name_list);
+ write_unlock(&keyring_name_lock);
+ }
+}
+
+/*
+ * Preparse a keyring payload
+ */
+static int keyring_preparse(struct key_preparsed_payload *prep)
+{
+ return prep->datalen != 0 ? -EINVAL : 0;
+}
+
+/*
+ * Free a preparse of a user defined key payload
+ */
+static void keyring_free_preparse(struct key_preparsed_payload *prep)
+{
+}
+
+/*
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
+ */
+static int keyring_instantiate(struct key *keyring,
+ struct key_preparsed_payload *prep)
+{
+ assoc_array_init(&keyring->keys);
+ /* make the keyring available by name if it has one */
+ keyring_publish_name(keyring);
+ return 0;
+}
+
+/*
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+ u64 hi = (u64)(u32)(x >> 32) * y;
+ u64 lo = (u64)(u32)(x) * y;
+ return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
+ */
+static void hash_key_type_and_desc(struct keyring_index_key *index_key)
+{
+ const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+ const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
+ const char *description = index_key->description;
+ unsigned long hash, type;
+ u32 piece;
+ u64 acc;
+ int n, desc_len = index_key->desc_len;
+
+ type = (unsigned long)index_key->type;
+ acc = mult_64x32_and_fold(type, desc_len + 13);
+ acc = mult_64x32_and_fold(acc, 9207);
+ piece = (unsigned long)index_key->domain_tag;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+
+ for (;;) {
+ n = desc_len;
+ if (n <= 0)
+ break;
+ if (n > 4)
+ n = 4;
+ piece = 0;
+ memcpy(&piece, description, n);
+ description += n;
+ desc_len -= n;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+ }
+
+ /* Fold the hash down to 32 bits if need be. */
+ hash = acc;
+ if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+ hash ^= acc >> 32;
+
+ /* Squidge all the keyrings into a separate part of the tree to
+ * ordinary keys by making sure the lowest level segment in the hash is
+ * zero for keyrings and non-zero otherwise.
+ */
+ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
+ hash |= (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ else if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ hash = (hash + (hash << level_shift)) & ~fan_mask;
+ index_key->hash = hash;
+}
+
+/*
+ * Finalise an index key to include a part of the description actually in the
+ * index key, to set the domain tag and to calculate the hash.
+ */
+void key_set_index_key(struct keyring_index_key *index_key)
+{
+ static struct key_tag default_domain_tag = { .usage = REFCOUNT_INIT(1), };
+ size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc));
+
+ memcpy(index_key->desc, index_key->description, n);
+
+ if (!index_key->domain_tag) {
+ if (index_key->type->flags & KEY_TYPE_NET_DOMAIN)
+ index_key->domain_tag = current->nsproxy->net_ns->key_domain;
+ else
+ index_key->domain_tag = &default_domain_tag;
+ }
+
+ hash_key_type_and_desc(index_key);
+}
+
+/**
+ * key_put_tag - Release a ref on a tag.
+ * @tag: The tag to release.
+ *
+ * This releases a reference the given tag and returns true if that ref was the
+ * last one.
+ */
+bool key_put_tag(struct key_tag *tag)
+{
+ if (refcount_dec_and_test(&tag->usage)) {
+ kfree_rcu(tag, rcu);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * key_remove_domain - Kill off a key domain and gc its keys
+ * @domain_tag: The domain tag to release.
+ *
+ * This marks a domain tag as being dead and releases a ref on it. If that
+ * wasn't the last reference, the garbage collector is poked to try and delete
+ * all keys that were in the domain.
+ */
+void key_remove_domain(struct key_tag *domain_tag)
+{
+ domain_tag->removed = true;
+ if (!key_put_tag(domain_tag))
+ key_schedule_gc_links();
+}
+
+/*
+ * Build the next index key chunk.
+ *
+ * We return it one word-sized chunk at a time.
+ */
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+ const struct keyring_index_key *index_key = data;
+ unsigned long chunk = 0;
+ const u8 *d;
+ int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+ level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ switch (level) {
+ case 0:
+ return index_key->hash;
+ case 1:
+ return index_key->x;
+ case 2:
+ return (unsigned long)index_key->type;
+ case 3:
+ return (unsigned long)index_key->domain_tag;
+ default:
+ level -= 4;
+ if (desc_len <= sizeof(index_key->desc))
+ return 0;
+
+ d = index_key->description + sizeof(index_key->desc);
+ d += level * sizeof(long);
+ desc_len -= sizeof(index_key->desc);
+ if (desc_len > n)
+ desc_len = n;
+ do {
+ chunk <<= 8;
+ chunk |= *d++;
+ } while (--desc_len > 0);
+ return chunk;
+ }
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
+{
+ const struct keyring_index_key *index_key = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ return key->index_key.type == index_key->type &&
+ key->index_key.domain_tag == index_key->domain_tag &&
+ key->index_key.desc_len == index_key->desc_len &&
+ memcmp(key->index_key.description, index_key->description,
+ index_key->desc_len) == 0;
+}
+
+/*
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
+ */
+static int keyring_diff_objects(const void *object, const void *data)
+{
+ const struct key *key_a = keyring_ptr_to_key(object);
+ const struct keyring_index_key *a = &key_a->index_key;
+ const struct keyring_index_key *b = data;
+ unsigned long seg_a, seg_b;
+ int level, i;
+
+ level = 0;
+ seg_a = a->hash;
+ seg_b = b->hash;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+
+ /* The number of bits contributed by the hash is controlled by a
+ * constant in the assoc_array headers. Everything else thereafter we
+ * can deal with as being machine word-size dependent.
+ */
+ seg_a = a->x;
+ seg_b = b->x;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += sizeof(unsigned long);
+
+ /* The next bit may not work on big endian */
+ seg_a = (unsigned long)a->type;
+ seg_b = (unsigned long)b->type;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += sizeof(unsigned long);
+
+ seg_a = (unsigned long)a->domain_tag;
+ seg_b = (unsigned long)b->domain_tag;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += sizeof(unsigned long);
+
+ i = sizeof(a->desc);
+ if (a->desc_len <= i)
+ goto same;
+
+ for (; i < a->desc_len; i++) {
+ seg_a = *(unsigned char *)(a->description + i);
+ seg_b = *(unsigned char *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ }
+
+same:
+ return -1;
+
+differ_plus_i:
+ level += i;
+differ:
+ i = level * 8 + __ffs(seg_a ^ seg_b);
+ return i;
+}
+
+/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
+{
+ key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+ .get_key_chunk = keyring_get_key_chunk,
+ .get_object_key_chunk = keyring_get_object_key_chunk,
+ .compare_object = keyring_compare_object,
+ .diff_objects = keyring_diff_objects,
+ .free_object = keyring_free_object,
+};
+
+/*
+ * Clean up a keyring when it is destroyed. Unpublish its name if it had one
+ * and dispose of its data.
+ *
+ * The garbage collector detects the final key_put(), removes the keyring from
+ * the serial number tree and then does RCU synchronisation before coming here,
+ * so we shouldn't need to worry about code poking around here with the RCU
+ * readlock held by this time.
+ */
+static void keyring_destroy(struct key *keyring)
+{
+ if (keyring->description) {
+ write_lock(&keyring_name_lock);
+
+ if (keyring->name_link.next != NULL &&
+ !list_empty(&keyring->name_link))
+ list_del(&keyring->name_link);
+
+ write_unlock(&keyring_name_lock);
+ }
+
+ if (keyring->restrict_link) {
+ struct key_restriction *keyres = keyring->restrict_link;
+
+ key_put(keyres->key);
+ kfree(keyres);
+ }
+
+ assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
+}
+
+/*
+ * Describe a keyring for /proc.
+ */
+static void keyring_describe(const struct key *keyring, struct seq_file *m)
+{
+ if (keyring->description)
+ seq_puts(m, keyring->description);
+ else
+ seq_puts(m, "[anon]");
+
+ if (key_is_positive(keyring)) {
+ if (keyring->keys.nr_leaves_on_tree != 0)
+ seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
+ else
+ seq_puts(m, ": empty");
+ }
+}
+
+struct keyring_read_iterator_context {
+ size_t buflen;
+ size_t count;
+ key_serial_t *buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+ struct keyring_read_iterator_context *ctx = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%s,%d},,{%zu/%zu}",
+ key->type->name, key->serial, ctx->count, ctx->buflen);
+
+ if (ctx->count >= ctx->buflen)
+ return 1;
+
+ *ctx->buffer++ = key->serial;
+ ctx->count += sizeof(key->serial);
+ return 0;
+}
+
+/*
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller. This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
+ */
+static long keyring_read(const struct key *keyring,
+ char *buffer, size_t buflen)
+{
+ struct keyring_read_iterator_context ctx;
+ long ret;
+
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
+
+ /* Copy as many key IDs as fit into the buffer */
+ if (buffer && buflen) {
+ ctx.buffer = (key_serial_t *)buffer;
+ ctx.buflen = buflen;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys,
+ keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %ld [iterate]", ret);
+ return ret;
+ }
+ }
+
+ /* Return the size of the buffer needed */
+ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
+ if (ret <= buflen)
+ kleave("= %ld [ok]", ret);
+ else
+ kleave("= %ld [buffer too small]", ret);
+ return ret;
+}
+
+/*
+ * Allocate a keyring and link into the destination keyring.
+ */
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
+ const struct cred *cred, key_perm_t perm,
+ unsigned long flags,
+ struct key_restriction *restrict_link,
+ struct key *dest)
+{
+ struct key *keyring;
+ int ret;
+
+ keyring = key_alloc(&key_type_keyring, description,
+ uid, gid, cred, perm, flags, restrict_link);
+ if (!IS_ERR(keyring)) {
+ ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
+ if (ret < 0) {
+ key_put(keyring);
+ keyring = ERR_PTR(ret);
+ }
+ }
+
+ return keyring;
+}
+EXPORT_SYMBOL(keyring_alloc);
+
+/**
+ * restrict_link_reject - Give -EPERM to restrict link
+ * @keyring: The keyring being added to.
+ * @type: The type of key being added.
+ * @payload: The payload of the key intended to be added.
+ * @restriction_key: Keys providing additional data for evaluating restriction.
+ *
+ * Reject the addition of any links to a keyring. It can be overridden by
+ * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
+ * adding a key to a keyring.
+ *
+ * This is meant to be stored in a key_restriction structure which is passed
+ * in the restrict_link parameter to keyring_alloc().
+ */
+int restrict_link_reject(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key)
+{
+ return -EPERM;
+}
+
+/*
+ * By default, we keys found by getting an exact match on their descriptions.
+ */
+bool key_default_cmp(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ return strcmp(key->description, match_data->raw_data) == 0;
+}
+
+/*
+ * Iteration function to consider each key found.
+ */
+static int keyring_search_iterator(const void *object, void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+ unsigned long kflags = READ_ONCE(key->flags);
+ short state = READ_ONCE(key->state);
+
+ kenter("{%d}", key->serial);
+
+ /* ignore keys not of this type */
+ if (key->type != ctx->index_key.type) {
+ kleave(" = 0 [!type]");
+ return 0;
+ }
+
+ /* skip invalidated, revoked and expired keys */
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ time64_t expiry = READ_ONCE(key->expiry);
+
+ if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ ctx->result = ERR_PTR(-EKEYREVOKED);
+ kleave(" = %d [invrev]", ctx->skipped_ret);
+ goto skipped;
+ }
+
+ if (expiry && ctx->now >= expiry) {
+ if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
+ kleave(" = %d [expire]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
+
+ /* keys that don't match */
+ if (!ctx->match_data.cmp(key, &ctx->match_data)) {
+ kleave(" = 0 [!match]");
+ return 0;
+ }
+
+ /* key must have search permissions */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0) {
+ ctx->result = ERR_PTR(-EACCES);
+ kleave(" = %d [!perm]", ctx->skipped_ret);
+ goto skipped;
+ }
+
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ /* we set a different error code if we pass a negative key */
+ if (state < 0) {
+ ctx->result = ERR_PTR(state);
+ kleave(" = %d [neg]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
+
+ /* Found */
+ ctx->result = make_key_ref(key, ctx->possessed);
+ kleave(" = 1 [found]");
+ return 1;
+
+skipped:
+ return ctx->skipped_ret;
+}
+
+/*
+ * Search inside a keyring for a key. We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) {
+ const void *object;
+
+ object = assoc_array_find(&keyring->keys,
+ &keyring_assoc_array_ops,
+ &ctx->index_key);
+ return object ? ctx->iterator(object, ctx) : 0;
+ }
+ return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
+
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+ struct keyring_search_context *ctx)
+{
+ struct {
+ struct key *keyring;
+ struct assoc_array_node *node;
+ int slot;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
+
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ struct key *key;
+ int sp = 0, slot;
+
+ kenter("{%d},{%s,%s}",
+ keyring->serial,
+ ctx->index_key.type->name,
+ ctx->index_key.description);
+
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+ BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+ (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
+ if (ctx->index_key.description)
+ key_set_index_key(&ctx->index_key);
+
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
+ keyring_compare_object(keyring, &ctx->index_key)) {
+ ctx->skipped_ret = 2;
+ switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+ case 1:
+ goto found;
+ case 2:
+ return false;
+ default:
+ break;
+ }
+ }
+
+ ctx->skipped_ret = 0;
+
+ /* Start processing a new keyring */
+descend_to_keyring:
+ kdebug("descend to %d", keyring->serial);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto not_this_keyring;
+
+ /* Search through the keys in this keyring before its searching its
+ * subtrees.
+ */
+ if (search_keyring(keyring, ctx))
+ goto found;
+
+ /* Then manually iterate through the keyrings nested in this one.
+ *
+ * Start from the root node of the index tree. Because of the way the
+ * hash function has been set up, keyrings cluster on the leftmost
+ * branch of the root node (root slot 0) or in the root node itself.
+ * Non-keyrings avoid the leftmost branch of the root entirely (root
+ * slots 1-15).
+ */
+ if (!(ctx->flags & KEYRING_SEARCH_RECURSE))
+ goto not_this_keyring;
+
+ ptr = READ_ONCE(keyring->keys.root);
+ if (!ptr)
+ goto not_this_keyring;
+
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ /* If the root is a shortcut, either the keyring only contains
+ * keyring pointers (everything clusters behind root slot 0) or
+ * doesn't contain any keyring pointers.
+ */
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+ goto not_this_keyring;
+
+ ptr = READ_ONCE(shortcut->next_node);
+ node = assoc_array_ptr_to_node(ptr);
+ goto begin_node;
+ }
+
+ node = assoc_array_ptr_to_node(ptr);
+ ptr = node->slots[0];
+ if (!assoc_array_ptr_is_meta(ptr))
+ goto begin_node;
+
+descend_to_node:
+ /* Descend to a more distal node in this keyring's content tree and go
+ * through that.
+ */
+ kdebug("descend");
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = READ_ONCE(shortcut->next_node);
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ }
+ node = assoc_array_ptr_to_node(ptr);
+
+begin_node:
+ kdebug("begin_node");
+ slot = 0;
+ascend_to_node:
+ /* Go through the slots in a node */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = READ_ONCE(node->slots[slot]);
+
+ if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+ goto descend_to_node;
+
+ if (!keyring_ptr_is_keyring(ptr))
+ continue;
+
+ key = keyring_ptr_to_key(ptr);
+
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+ if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+ ctx->result = ERR_PTR(-ELOOP);
+ return false;
+ }
+ goto not_this_keyring;
+ }
+
+ /* Search a nested keyring */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0)
+ continue;
+
+ /* stack the current position */
+ stack[sp].keyring = keyring;
+ stack[sp].node = node;
+ stack[sp].slot = slot;
+ sp++;
+
+ /* begin again with the new keyring */
+ keyring = key;
+ goto descend_to_keyring;
+ }
+
+ /* We've dealt with all the slots in the current node, so now we need
+ * to ascend to the parent and continue processing there.
+ */
+ ptr = READ_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+
+ if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = READ_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ }
+ if (!ptr)
+ goto not_this_keyring;
+ node = assoc_array_ptr_to_node(ptr);
+ slot++;
+
+ /* If we've ascended to the root (zero backpointer), we must have just
+ * finished processing the leftmost branch rather than the root slots -
+ * so there can't be any more keyrings for us to find.
+ */
+ if (node->back_pointer) {
+ kdebug("ascend %d", slot);
+ goto ascend_to_node;
+ }
+
+ /* The keyring we're looking at was disqualified or didn't contain a
+ * matching key.
+ */
+not_this_keyring:
+ kdebug("not_this_keyring %d", sp);
+ if (sp <= 0) {
+ kleave(" = false");
+ return false;
+ }
+
+ /* Resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ node = stack[sp].node;
+ slot = stack[sp].slot + 1;
+ kdebug("ascend to %d [%d]", keyring->serial, slot);
+ goto ascend_to_node;
+
+ /* We found a viable match */
+found:
+ key = key_ref_to_ptr(ctx->result);
+ key_check(key);
+ if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+ key->last_used_at = ctx->now;
+ keyring->last_used_at = ctx->now;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = ctx->now;
+ }
+ kleave(" = true");
+ return true;
+}
+
+/**
+ * keyring_search_rcu - Search a keyring tree for a matching key under RCU
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to
+ * prevent keyrings from being destroyed or rearranged whilst they are being
+ * searched.
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx)
+{
+ struct key *keyring;
+ long err;
+
+ ctx->iterator = keyring_search_iterator;
+ ctx->possessed = is_key_possessed(keyring_ref);
+ ctx->result = ERR_PTR(-EAGAIN);
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return ERR_PTR(-ENOTDIR);
+
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+ err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ ctx->now = ktime_get_real_seconds();
+ if (search_nested_keyrings(keyring, ctx))
+ __key_get(key_ref_to_ptr(ctx->result));
+ return ctx->result;
+}
+
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ * @recurse: True to search the children of @keyring also
+ *
+ * As keyring_search_rcu() above, but using the current task's credentials and
+ * type's default matching function and preferred search method.
+ */
+key_ref_t keyring_search(key_ref_t keyring,
+ struct key_type *type,
+ const char *description,
+ bool recurse)
+{
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ };
+ key_ref_t key;
+ int ret;
+
+ if (recurse)
+ ctx.flags |= KEYRING_SEARCH_RECURSE;
+ if (type->match_preparse) {
+ ret = type->match_preparse(&ctx.match_data);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ rcu_read_lock();
+ key = keyring_search_rcu(keyring, &ctx);
+ rcu_read_unlock();
+
+ if (type->match_free)
+ type->match_free(&ctx.match_data);
+ return key;
+}
+EXPORT_SYMBOL(keyring_search);
+
+static struct key_restriction *keyring_restriction_alloc(
+ key_restrict_link_func_t check)
+{
+ struct key_restriction *keyres =
+ kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+
+ if (!keyres)
+ return ERR_PTR(-ENOMEM);
+
+ keyres->check = check;
+
+ return keyres;
+}
+
+/*
+ * Semaphore to serialise restriction setup to prevent reference count
+ * cycles through restriction key pointers.
+ */
+static DECLARE_RWSEM(keyring_serialise_restrict_sem);
+
+/*
+ * Check for restriction cycles that would prevent keyring garbage collection.
+ * keyring_serialise_restrict_sem must be held.
+ */
+static bool keyring_detect_restriction_cycle(const struct key *dest_keyring,
+ struct key_restriction *keyres)
+{
+ while (keyres && keyres->key &&
+ keyres->key->type == &key_type_keyring) {
+ if (keyres->key == dest_keyring)
+ return true;
+
+ keyres = keyres->key->restrict_link;
+ }
+
+ return false;
+}
+
+/**
+ * keyring_restrict - Look up and apply a restriction to a keyring
+ * @keyring_ref: The keyring to be restricted
+ * @type: The key type that will provide the restriction checker.
+ * @restriction: The restriction options to apply to the keyring
+ *
+ * Look up a keyring and apply a restriction to it. The restriction is managed
+ * by the specific key type, but can be configured by the options specified in
+ * the restriction string.
+ */
+int keyring_restrict(key_ref_t keyring_ref, const char *type,
+ const char *restriction)
+{
+ struct key *keyring;
+ struct key_type *restrict_type = NULL;
+ struct key_restriction *restrict_link;
+ int ret = 0;
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ if (!type) {
+ restrict_link = keyring_restriction_alloc(restrict_link_reject);
+ } else {
+ restrict_type = key_type_lookup(type);
+
+ if (IS_ERR(restrict_type))
+ return PTR_ERR(restrict_type);
+
+ if (!restrict_type->lookup_restriction) {
+ ret = -ENOENT;
+ goto error;
+ }
+
+ restrict_link = restrict_type->lookup_restriction(restriction);
+ }
+
+ if (IS_ERR(restrict_link)) {
+ ret = PTR_ERR(restrict_link);
+ goto error;
+ }
+
+ down_write(&keyring->sem);
+ down_write(&keyring_serialise_restrict_sem);
+
+ if (keyring->restrict_link) {
+ ret = -EEXIST;
+ } else if (keyring_detect_restriction_cycle(keyring, restrict_link)) {
+ ret = -EDEADLK;
+ } else {
+ keyring->restrict_link = restrict_link;
+ notify_key(keyring, NOTIFY_KEY_SETATTR, 0);
+ }
+
+ up_write(&keyring_serialise_restrict_sem);
+ up_write(&keyring->sem);
+
+ if (ret < 0) {
+ key_put(restrict_link->key);
+ kfree(restrict_link);
+ }
+
+error:
+ if (restrict_type)
+ key_type_put(restrict_type);
+
+ return ret;
+}
+EXPORT_SYMBOL(keyring_restrict);
+
+/*
+ * Search the given keyring for a key that might be updated.
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to modify the keyring as no check is made here. The
+ * caller must also hold a lock on the keyring semaphore.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns NULL if not found. Revoked and invalidated keys are
+ * skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
+ */
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key)
+{
+ struct key *keyring, *key;
+ const void *object;
+
+ keyring = key_ref_to_ptr(keyring_ref);
+
+ kenter("{%d},{%s,%s}",
+ keyring->serial, index_key->type->name, index_key->description);
+
+ object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+ index_key);
+
+ if (object)
+ goto found;
+
+ kleave(" = NULL");
+ return NULL;
+
+found:
+ key = keyring_ptr_to_key(object);
+ if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ kleave(" = NULL [x]");
+ return NULL;
+ }
+ __key_get(key);
+ kleave(" = {%d}", key->serial);
+ return make_key_ref(key, is_key_possessed(keyring_ref));
+}
+
+/*
+ * Find a keyring with the specified name.
+ *
+ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
+ * user in the current user namespace are considered. If @uid_keyring is %true,
+ * the keyring additionally must have been allocated as a user or user session
+ * keyring; otherwise, it must grant Search permission directly to the caller.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success. -ENOKEY is returned if a key could not be found.
+ */
+struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+{
+ struct user_namespace *ns = current_user_ns();
+ struct key *keyring;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ read_lock(&keyring_name_lock);
+
+ /* Search this hash bucket for a keyring with a matching name that
+ * grants Search permission and that hasn't been revoked
+ */
+ list_for_each_entry(keyring, &ns->keyring_name_list, name_link) {
+ if (!kuid_has_mapping(ns, keyring->user->uid))
+ continue;
+
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ continue;
+
+ if (strcmp(keyring->description, name) != 0)
+ continue;
+
+ if (uid_keyring) {
+ if (!test_bit(KEY_FLAG_UID_KEYRING,
+ &keyring->flags))
+ continue;
+ } else {
+ if (key_permission(make_key_ref(keyring, 0),
+ KEY_NEED_SEARCH) < 0)
+ continue;
+ }
+
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+ * (ie. it has a zero usage count) */
+ if (!refcount_inc_not_zero(&keyring->usage))
+ continue;
+ keyring->last_used_at = ktime_get_real_seconds();
+ goto out;
+ }
+
+ keyring = ERR_PTR(-ENOKEY);
+out:
+ read_unlock(&keyring_name_lock);
+ return keyring;
+}
+
+static int keyring_detect_cycle_iterator(const void *object,
+ void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%d}", key->serial);
+
+ /* We might get a keyring with matching index-key that is nonetheless a
+ * different keyring. */
+ if (key != ctx->match_data.raw_data)
+ return 0;
+
+ ctx->result = ERR_PTR(-EDEADLK);
+ return 1;
+}
+
+/*
+ * See if a cycle will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
+ */
+static int keyring_detect_cycle(struct key *A, struct key *B)
+{
+ struct keyring_search_context ctx = {
+ .index_key = A->index_key,
+ .match_data.raw_data = A,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP |
+ KEYRING_SEARCH_RECURSE),
+ };
+
+ rcu_read_lock();
+ search_nested_keyrings(B, &ctx);
+ rcu_read_unlock();
+ return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
+}
+
+/*
+ * Lock keyring for link.
+ */
+int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
+ * Lock keyrings for move (link/unlink combination).
+ */
+int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&l_keyring->sem)
+ __acquires(&u_keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (l_keyring->type != &key_type_keyring ||
+ u_keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ /* We have to be very careful here to take the keyring locks in the
+ * right order, lest we open ourselves to deadlocking against another
+ * move operation.
+ */
+ if (l_keyring < u_keyring) {
+ down_write(&l_keyring->sem);
+ down_write_nested(&u_keyring->sem, 1);
+ } else {
+ down_write(&u_keyring->sem);
+ down_write_nested(&l_keyring->sem, 1);
+ }
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
+ * Preallocate memory so that a key can be linked into to a keyring.
+ */
+int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit)
+{
+ struct assoc_array_edit *edit;
+ int ret;
+
+ kenter("%d,%s,%s,",
+ keyring->serial, index_key->type->name, index_key->description);
+
+ BUG_ON(index_key->desc_len == 0);
+ BUG_ON(*_edit != NULL);
+
+ *_edit = NULL;
+
+ ret = -EKEYREVOKED;
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ goto error;
+
+ /* Create an edit script that will insert/replace the key in the
+ * keyring tree.
+ */
+ edit = assoc_array_insert(&keyring->keys,
+ &keyring_assoc_array_ops,
+ index_key,
+ NULL);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error;
+ }
+
+ /* If we're not replacing a link in-place then we're going to need some
+ * extra quota.
+ */
+ if (!edit->dead_leaf) {
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error_cancel;
+ }
+
+ *_edit = edit;
+ kleave(" = 0");
+ return 0;
+
+error_cancel:
+ assoc_array_cancel_edit(edit);
+error:
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
+ */
+int __key_link_check_live_key(struct key *keyring, struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ /* check that we aren't going to create a cycle by linking one
+ * keyring to another */
+ return keyring_detect_cycle(keyring, key);
+ return 0;
+}
+
+/*
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called. Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
+ */
+void __key_link(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ __key_get(key);
+ assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
+ notify_key(keyring, NOTIFY_KEY_LINKED, key_serial(key));
+}
+
+/*
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
+ */
+void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+ __releases(&keyring_serialise_link_lock)
+{
+ BUG_ON(index_key->type == NULL);
+ kenter("%d,%s,", keyring->serial, index_key->type->name);
+
+ if (edit) {
+ if (!edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ }
+ assoc_array_cancel_edit(edit);
+ }
+ up_write(&keyring->sem);
+
+ if (index_key->type == &key_type_keyring)
+ mutex_unlock(&keyring_serialise_link_lock);
+}
+
+/*
+ * Check addition of keys to restricted keyrings.
+ */
+static int __key_link_check_restriction(struct key *keyring, struct key *key)
+{
+ if (!keyring->restrict_link || !keyring->restrict_link->check)
+ return 0;
+ return keyring->restrict_link->check(keyring, key->type, &key->payload,
+ keyring->restrict_link->key);
+}
+
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_link(struct key *keyring, struct key *key)
+{
+ struct assoc_array_edit *edit = NULL;
+ int ret;
+
+ kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage));
+
+ key_check(keyring);
+ key_check(key);
+
+ ret = __key_link_lock(keyring, &key->index_key);
+ if (ret < 0)
+ goto error;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_end;
+
+ kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
+ ret = __key_link_check_restriction(keyring, key);
+ if (ret == 0)
+ ret = __key_link_check_live_key(keyring, key);
+ if (ret == 0)
+ __key_link(keyring, key, &edit);
+
+error_end:
+ __key_link_end(keyring, &key->index_key, edit);
+error:
+ kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage));
+ return ret;
+}
+EXPORT_SYMBOL(key_link);
+
+/*
+ * Lock a keyring for unlink.
+ */
+static int __key_unlink_lock(struct key *keyring)
+ __acquires(&keyring->sem)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+ return 0;
+}
+
+/*
+ * Begin the process of unlinking a key from a keyring.
+ */
+static int __key_unlink_begin(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ struct assoc_array_edit *edit;
+
+ BUG_ON(*_edit != NULL);
+
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit))
+ return PTR_ERR(edit);
+
+ if (!edit)
+ return -ENOENT;
+
+ *_edit = edit;
+ return 0;
+}
+
+/*
+ * Apply an unlink change.
+ */
+static void __key_unlink(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ assoc_array_apply_edit(*_edit);
+ notify_key(keyring, NOTIFY_KEY_UNLINKED, key_serial(key));
+ *_edit = NULL;
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
+}
+
+/*
+ * Finish unlinking a key from to a keyring.
+ */
+static void __key_unlink_end(struct key *keyring,
+ struct key *key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+{
+ if (edit)
+ assoc_array_cancel_edit(edit);
+ up_write(&keyring->sem);
+}
+
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
+ */
+int key_unlink(struct key *keyring, struct key *key)
+{
+ struct assoc_array_edit *edit = NULL;
+ int ret;
+
+ key_check(keyring);
+ key_check(key);
+
+ ret = __key_unlink_lock(keyring);
+ if (ret < 0)
+ return ret;
+
+ ret = __key_unlink_begin(keyring, key, &edit);
+ if (ret == 0)
+ __key_unlink(keyring, key, &edit);
+ __key_unlink_end(keyring, key, edit);
+ return ret;
+}
+EXPORT_SYMBOL(key_unlink);
+
+/**
+ * key_move - Move a key from one keyring to another
+ * @key: The key to move
+ * @from_keyring: The keyring to remove the link from.
+ * @to_keyring: The keyring to make the link in.
+ * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL.
+ *
+ * Make a link in @to_keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring
+ * whilst simultaneously removing a link to the key from @from_keyring.
+ *
+ * This function will write-lock both keyring's semaphores and will consume
+ * some of the user's key data quota to hold the link on @to_keyring.
+ *
+ * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring,
+ * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second
+ * keyring is full, -EDQUOT if there is insufficient key data quota remaining
+ * to add another link or -ENOMEM if there's insufficient memory. If
+ * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a
+ * matching key in @to_keyring.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_move(struct key *key,
+ struct key *from_keyring,
+ struct key *to_keyring,
+ unsigned int flags)
+{
+ struct assoc_array_edit *from_edit = NULL, *to_edit = NULL;
+ int ret;
+
+ kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial);
+
+ if (from_keyring == to_keyring)
+ return 0;
+
+ key_check(key);
+ key_check(from_keyring);
+ key_check(to_keyring);
+
+ ret = __key_move_lock(from_keyring, to_keyring, &key->index_key);
+ if (ret < 0)
+ goto out;
+ ret = __key_unlink_begin(from_keyring, key, &from_edit);
+ if (ret < 0)
+ goto error;
+ ret = __key_link_begin(to_keyring, &key->index_key, &to_edit);
+ if (ret < 0)
+ goto error;
+
+ ret = -EEXIST;
+ if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL))
+ goto error;
+
+ ret = __key_link_check_restriction(to_keyring, key);
+ if (ret < 0)
+ goto error;
+ ret = __key_link_check_live_key(to_keyring, key);
+ if (ret < 0)
+ goto error;
+
+ __key_unlink(from_keyring, key, &from_edit);
+ __key_link(to_keyring, key, &to_edit);
+error:
+ __key_link_end(to_keyring, &key->index_key, to_edit);
+ __key_unlink_end(from_keyring, key, from_edit);
+out:
+ kleave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(key_move);
+
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
+ */
+int keyring_clear(struct key *keyring)
+{
+ struct assoc_array_edit *edit;
+ int ret;
+
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ } else {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ notify_key(keyring, NOTIFY_KEY_CLEARED, 0);
+ key_payload_reserve(keyring, 0);
+ ret = 0;
+ }
+
+ up_write(&keyring->sem);
+ return ret;
+}
+EXPORT_SYMBOL(keyring_clear);
+
+/*
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
+ */
+static void keyring_revoke(struct key *keyring)
+{
+ struct assoc_array_edit *edit;
+
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (!IS_ERR(edit)) {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
+ }
+}
+
+static bool keyring_gc_select_iterator(void *object, void *iterator_data)
+{
+ struct key *key = keyring_ptr_to_key(object);
+ time64_t *limit = iterator_data;
+
+ if (key_is_dead(key, *limit))
+ return false;
+ key_get(key);
+ return true;
+}
+
+static int keyring_gc_check_iterator(const void *object, void *iterator_data)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ time64_t *limit = iterator_data;
+
+ key_check(key);
+ return key_is_dead(key, *limit);
+}
+
+/*
+ * Garbage collect pointers from a keyring.
+ *
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
+ */
+void keyring_gc(struct key *keyring, time64_t limit)
+{
+ int result;
+
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
+
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto dont_gc;
+
+ /* scan the keyring looking for dead keys */
+ rcu_read_lock();
+ result = assoc_array_iterate(&keyring->keys,
+ keyring_gc_check_iterator, &limit);
+ rcu_read_unlock();
+ if (result == true)
+ goto do_gc;
+
+dont_gc:
+ kleave(" [no gc]");
+ return;
+
+do_gc:
+ down_write(&keyring->sem);
+ assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+ keyring_gc_select_iterator, &limit);
+ up_write(&keyring->sem);
+ kleave(" [gc]");
+}
+
+/*
+ * Garbage collect restriction pointers from a keyring.
+ *
+ * Keyring restrictions are associated with a key type, and must be cleaned
+ * up if the key type is unregistered. The restriction is altered to always
+ * reject additional keys so a keyring cannot be opened up by unregistering
+ * a key type.
+ *
+ * Not called with any keyring locks held. The keyring's key struct will not
+ * be deallocated under us as only our caller may deallocate it.
+ *
+ * The caller is required to hold key_types_sem and dead_type->sem. This is
+ * fulfilled by key_gc_keytype() holding the locks on behalf of
+ * key_garbage_collector(), which it invokes on a workqueue.
+ */
+void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type)
+{
+ struct key_restriction *keyres;
+
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
+
+ /*
+ * keyring->restrict_link is only assigned at key allocation time
+ * or with the key type locked, so the only values that could be
+ * concurrently assigned to keyring->restrict_link are for key
+ * types other than dead_type. Given this, it's ok to check
+ * the key type before acquiring keyring->sem.
+ */
+ if (!dead_type || !keyring->restrict_link ||
+ keyring->restrict_link->keytype != dead_type) {
+ kleave(" [no restriction gc]");
+ return;
+ }
+
+ /* Lock the keyring to ensure that a link is not in progress */
+ down_write(&keyring->sem);
+
+ keyres = keyring->restrict_link;
+
+ keyres->check = restrict_link_reject;
+
+ key_put(keyres->key);
+ keyres->key = NULL;
+ keyres->keytype = NULL;
+
+ up_write(&keyring->sem);
+
+ kleave(" [restriction gc]");
+}
diff --git a/security/keys/permission.c b/security/keys/permission.c
new file mode 100644
index 000000000..4a61f804e
--- /dev/null
+++ b/security/keys/permission.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Key permission checking
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/security.h>
+#include "internal.h"
+
+/**
+ * key_task_permission - Check a key can be used
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @need_perm: The permission required.
+ *
+ * Check to see whether permission is granted to use a key in the desired way,
+ * but permit the security modules to override.
+ *
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
+ */
+int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm)
+{
+ struct key *key;
+ key_perm_t kperm, mask;
+ int ret;
+
+ switch (need_perm) {
+ default:
+ WARN_ON(1);
+ return -EACCES;
+ case KEY_NEED_UNLINK:
+ case KEY_SYSADMIN_OVERRIDE:
+ case KEY_AUTHTOKEN_OVERRIDE:
+ case KEY_DEFER_PERM_CHECK:
+ goto lsm;
+
+ case KEY_NEED_VIEW: mask = KEY_OTH_VIEW; break;
+ case KEY_NEED_READ: mask = KEY_OTH_READ; break;
+ case KEY_NEED_WRITE: mask = KEY_OTH_WRITE; break;
+ case KEY_NEED_SEARCH: mask = KEY_OTH_SEARCH; break;
+ case KEY_NEED_LINK: mask = KEY_OTH_LINK; break;
+ case KEY_NEED_SETATTR: mask = KEY_OTH_SETATTR; break;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* use the second 8-bits of permissions for keys the caller owns */
+ if (uid_eq(key->uid, cred->fsuid)) {
+ kperm = key->perm >> 16;
+ goto use_these_perms;
+ }
+
+ /* use the third 8-bits of permissions for keys the caller has a group
+ * membership in common with */
+ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+ if (gid_eq(key->gid, cred->fsgid)) {
+ kperm = key->perm >> 8;
+ goto use_these_perms;
+ }
+
+ ret = groups_search(cred->group_info, key->gid);
+ if (ret) {
+ kperm = key->perm >> 8;
+ goto use_these_perms;
+ }
+ }
+
+ /* otherwise use the least-significant 8-bits */
+ kperm = key->perm;
+
+use_these_perms:
+
+ /* use the top 8-bits of permissions for keys the caller possesses
+ * - possessor permissions are additive with other permissions
+ */
+ if (is_key_possessed(key_ref))
+ kperm |= key->perm >> 24;
+
+ if ((kperm & mask) != mask)
+ return -EACCES;
+
+ /* let LSM be the final arbiter */
+lsm:
+ return security_key_permission(key_ref, cred, need_perm);
+}
+EXPORT_SYMBOL(key_task_permission);
+
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the
+ * key is invalidated, -EKEYREVOKED if the key's type has been removed or if
+ * the key has been revoked or -EKEYEXPIRED if the key has expired.
+ */
+int key_validate(const struct key *key)
+{
+ unsigned long flags = READ_ONCE(key->flags);
+ time64_t expiry = READ_ONCE(key->expiry);
+
+ if (flags & (1 << KEY_FLAG_INVALIDATED))
+ return -ENOKEY;
+
+ /* check it's still accessible */
+ if (flags & ((1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD)))
+ return -EKEYREVOKED;
+
+ /* check it hasn't expired */
+ if (expiry) {
+ if (ktime_get_real_seconds() >= expiry)
+ return -EKEYEXPIRED;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 000000000..97af230aa
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/user_namespace.h>
+#include <linux/cred.h>
+
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+ struct key *reg = keyring_alloc(".persistent_register",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ns->persistent_keyring_register = reg;
+ return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+ struct keyring_index_key *index_key)
+{
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+
+ if (!ns->persistent_keyring_register) {
+ long err = key_create_persistent_register(ns);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ persistent_ref = find_key_to_update(reg_ref, index_key);
+ if (persistent_ref)
+ return persistent_ref;
+ }
+
+ persistent = keyring_alloc(index_key->description,
+ uid, INVALID_GID, current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL,
+ ns->persistent_keyring_register);
+ if (IS_ERR(persistent))
+ return ERR_CAST(persistent);
+
+ return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+ key_ref_t dest_ref)
+{
+ struct keyring_index_key index_key;
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+ char buf[32];
+ long ret;
+
+ /* Look in the register if it exists */
+ memset(&index_key, 0, sizeof(index_key));
+ index_key.type = &key_type_keyring;
+ index_key.description = buf;
+ index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+ key_set_index_key(&index_key);
+
+ if (ns->persistent_keyring_register) {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ down_read(&ns->keyring_sem);
+ persistent_ref = find_key_to_update(reg_ref, &index_key);
+ up_read(&ns->keyring_sem);
+
+ if (persistent_ref)
+ goto found;
+ }
+
+ /* It wasn't in the register, so we'll need to create it. We might
+ * also need to create the register.
+ */
+ down_write(&ns->keyring_sem);
+ persistent_ref = key_create_persistent(ns, uid, &index_key);
+ up_write(&ns->keyring_sem);
+ if (!IS_ERR(persistent_ref))
+ goto found;
+
+ return PTR_ERR(persistent_ref);
+
+found:
+ ret = key_task_permission(persistent_ref, current_cred(), KEY_NEED_LINK);
+ if (ret == 0) {
+ persistent = key_ref_to_ptr(persistent_ref);
+ ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+ if (ret == 0) {
+ key_set_timeout(persistent, persistent_keyring_expiry);
+ ret = persistent->serial;
+ }
+ }
+
+ key_ref_put(persistent_ref);
+ return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+ struct user_namespace *ns = current_user_ns();
+ key_ref_t dest_ref;
+ kuid_t uid;
+ long ret;
+
+ /* -1 indicates the current user */
+ if (_uid == (uid_t)-1) {
+ uid = current_uid();
+ } else {
+ uid = make_kuid(ns, _uid);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ /* You can only see your own persistent cache if you're not
+ * sufficiently privileged.
+ */
+ if (!uid_eq(uid, current_uid()) &&
+ !uid_eq(uid, current_euid()) &&
+ !ns_capable(ns, CAP_SETUID))
+ return -EPERM;
+ }
+
+ /* There must be a destination keyring */
+ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref))
+ return PTR_ERR(dest_ref);
+ if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+ ret = -ENOTDIR;
+ goto out_put_dest;
+ }
+
+ ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+ key_ref_put(dest_ref);
+ return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
new file mode 100644
index 000000000..4f4e2c182
--- /dev/null
+++ b/security/keys/proc.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* procfs files for key database enumeration
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_keys_stop(struct seq_file *p, void *v);
+static int proc_keys_show(struct seq_file *m, void *v);
+
+static const struct seq_operations proc_keys_ops = {
+ .start = proc_keys_start,
+ .next = proc_keys_next,
+ .stop = proc_keys_stop,
+ .show = proc_keys_show,
+};
+
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_key_users_stop(struct seq_file *p, void *v);
+static int proc_key_users_show(struct seq_file *m, void *v);
+
+static const struct seq_operations proc_key_users_ops = {
+ .start = proc_key_users_start,
+ .next = proc_key_users_next,
+ .stop = proc_key_users_stop,
+ .show = proc_key_users_show,
+};
+
+/*
+ * Declare the /proc files.
+ */
+static int __init key_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ p = proc_create_seq("keys", 0, NULL, &proc_keys_ops);
+ if (!p)
+ panic("Cannot create /proc/keys\n");
+
+ p = proc_create_seq("key-users", 0, NULL, &proc_key_users_ops);
+ if (!p)
+ panic("Cannot create /proc/key-users\n");
+
+ return 0;
+}
+
+__initcall(key_proc_init);
+
+/*
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
+ */
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
+{
+ struct user_namespace *user_ns = seq_user_ns(p);
+
+ n = rb_next(n);
+ while (n) {
+ struct key *key = rb_entry(n, struct key, serial_node);
+ if (kuid_has_mapping(user_ns, key->user->uid))
+ break;
+ n = rb_next(n);
+ }
+ return n;
+}
+
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
+{
+ struct user_namespace *user_ns = seq_user_ns(p);
+ struct rb_node *n = key_serial_tree.rb_node;
+ struct key *minkey = NULL;
+
+ while (n) {
+ struct key *key = rb_entry(n, struct key, serial_node);
+ if (id < key->serial) {
+ if (!minkey || minkey->serial > key->serial)
+ minkey = key;
+ n = n->rb_left;
+ } else if (id > key->serial) {
+ n = n->rb_right;
+ } else {
+ minkey = key;
+ break;
+ }
+ key = NULL;
+ }
+
+ if (!minkey)
+ return NULL;
+
+ for (;;) {
+ if (kuid_has_mapping(user_ns, minkey->user->uid))
+ return minkey;
+ n = rb_next(&minkey->serial_node);
+ if (!n)
+ return NULL;
+ minkey = rb_entry(n, struct key, serial_node);
+ }
+}
+
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_serial_lock)
+{
+ key_serial_t pos = *_pos;
+ struct key *key;
+
+ spin_lock(&key_serial_lock);
+
+ if (*_pos > INT_MAX)
+ return NULL;
+ key = find_ge_key(p, pos);
+ if (!key)
+ return NULL;
+ *_pos = key->serial;
+ return &key->serial_node;
+}
+
+static inline key_serial_t key_node_serial(struct rb_node *n)
+{
+ struct key *key = rb_entry(n, struct key, serial_node);
+ return key->serial;
+}
+
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ struct rb_node *n;
+
+ n = key_serial_next(p, v);
+ if (n)
+ *_pos = key_node_serial(n);
+ else
+ (*_pos)++;
+ return n;
+}
+
+static void proc_keys_stop(struct seq_file *p, void *v)
+ __releases(key_serial_lock)
+{
+ spin_unlock(&key_serial_lock);
+}
+
+static int proc_keys_show(struct seq_file *m, void *v)
+{
+ struct rb_node *_p = v;
+ struct key *key = rb_entry(_p, struct key, serial_node);
+ unsigned long flags;
+ key_ref_t key_ref, skey_ref;
+ time64_t now, expiry;
+ char xbuf[16];
+ short state;
+ u64 timo;
+ int rc;
+
+ struct keyring_search_context ctx = {
+ .index_key = key->index_key,
+ .cred = m->file->f_cred,
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.raw_data = key,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
+ };
+
+ key_ref = make_key_ref(key, 0);
+
+ /* determine if the key is possessed by this process (a test we can
+ * skip if the key does not indicate the possessor can view it
+ */
+ if (key->perm & KEY_POS_VIEW) {
+ rcu_read_lock();
+ skey_ref = search_cred_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+ if (!IS_ERR(skey_ref)) {
+ key_ref_put(skey_ref);
+ key_ref = make_key_ref(key, 1);
+ }
+ }
+
+ /* check whether the current task is allowed to view the key */
+ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
+ if (rc < 0)
+ return 0;
+
+ now = ktime_get_real_seconds();
+
+ rcu_read_lock();
+
+ /* come up with a suitable timeout value */
+ expiry = READ_ONCE(key->expiry);
+ if (expiry == TIME64_MAX) {
+ memcpy(xbuf, "perm", 5);
+ } else if (now >= expiry) {
+ memcpy(xbuf, "expd", 5);
+ } else {
+ timo = expiry - now;
+
+ if (timo < 60)
+ sprintf(xbuf, "%llus", timo);
+ else if (timo < 60*60)
+ sprintf(xbuf, "%llum", div_u64(timo, 60));
+ else if (timo < 60*60*24)
+ sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60));
+ else if (timo < 60*60*24*7)
+ sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24));
+ else
+ sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7));
+ }
+
+ state = key_read_state(key);
+
+#define showflag(FLAGS, LETTER, FLAG) \
+ ((FLAGS & (1 << FLAG)) ? LETTER : '-')
+
+ flags = READ_ONCE(key->flags);
+ seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
+ key->serial,
+ state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+ showflag(flags, 'R', KEY_FLAG_REVOKED),
+ showflag(flags, 'D', KEY_FLAG_DEAD),
+ showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
+ showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
+ state < 0 ? 'N' : '-',
+ showflag(flags, 'i', KEY_FLAG_INVALIDATED),
+ refcount_read(&key->usage),
+ xbuf,
+ key->perm,
+ from_kuid_munged(seq_user_ns(m), key->uid),
+ from_kgid_munged(seq_user_ns(m), key->gid),
+ key->type->name);
+
+#undef showflag
+
+ if (key->type->describe)
+ key->type->describe(key, m);
+ seq_putc(m, '\n');
+
+ rcu_read_unlock();
+ return 0;
+}
+
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
+{
+ while (n) {
+ struct key_user *user = rb_entry(n, struct key_user, node);
+ if (kuid_has_mapping(user_ns, user->uid))
+ break;
+ n = rb_next(n);
+ }
+ return n;
+}
+
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
+{
+ return __key_user_next(user_ns, rb_next(n));
+}
+
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
+{
+ struct rb_node *n = rb_first(r);
+ return __key_user_next(user_ns, n);
+}
+
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_user_lock)
+{
+ struct rb_node *_p;
+ loff_t pos = *_pos;
+
+ spin_lock(&key_user_lock);
+
+ _p = key_user_first(seq_user_ns(p), &key_user_tree);
+ while (pos > 0 && _p) {
+ pos--;
+ _p = key_user_next(seq_user_ns(p), _p);
+ }
+
+ return _p;
+}
+
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ (*_pos)++;
+ return key_user_next(seq_user_ns(p), (struct rb_node *)v);
+}
+
+static void proc_key_users_stop(struct seq_file *p, void *v)
+ __releases(key_user_lock)
+{
+ spin_unlock(&key_user_lock);
+}
+
+static int proc_key_users_show(struct seq_file *m, void *v)
+{
+ struct rb_node *_p = v;
+ struct key_user *user = rb_entry(_p, struct key_user, node);
+ unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxkeys : key_quota_maxkeys;
+ unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
+ from_kuid_munged(seq_user_ns(m), user->uid),
+ refcount_read(&user->usage),
+ atomic_read(&user->nkeys),
+ atomic_read(&user->nikeys),
+ user->qnkeys,
+ maxkeys,
+ user->qnbytes,
+ maxbytes);
+
+ return 0;
+}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
new file mode 100644
index 000000000..b5d5333ab
--- /dev/null
+++ b/security/keys/process_keys.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Manage a process's keyrings
+ *
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/user.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/security.h>
+#include <linux/user_namespace.h>
+#include <linux/uaccess.h>
+#include <linux/init_task.h>
+#include <keys/request_key_auth-type.h>
+#include "internal.h"
+
+/* Session keyring create vs join semaphore */
+static DEFINE_MUTEX(key_session_mutex);
+
+/* The root user's tracking struct */
+struct key_user root_key_user = {
+ .usage = REFCOUNT_INIT(3),
+ .cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
+ .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
+ .nkeys = ATOMIC_INIT(2),
+ .nikeys = ATOMIC_INIT(2),
+ .uid = GLOBAL_ROOT_UID,
+};
+
+/*
+ * Get or create a user register keyring.
+ */
+static struct key *get_user_register(struct user_namespace *user_ns)
+{
+ struct key *reg_keyring = READ_ONCE(user_ns->user_keyring_register);
+
+ if (reg_keyring)
+ return reg_keyring;
+
+ down_write(&user_ns->keyring_sem);
+
+ /* Make sure there's a register keyring. It gets owned by the
+ * user_namespace's owner.
+ */
+ reg_keyring = user_ns->user_keyring_register;
+ if (!reg_keyring) {
+ reg_keyring = keyring_alloc(".user_reg",
+ user_ns->owner, INVALID_GID,
+ &init_cred,
+ KEY_POS_WRITE | KEY_POS_SEARCH |
+ KEY_USR_VIEW | KEY_USR_READ,
+ 0,
+ NULL, NULL);
+ if (!IS_ERR(reg_keyring))
+ smp_store_release(&user_ns->user_keyring_register,
+ reg_keyring);
+ }
+
+ up_write(&user_ns->keyring_sem);
+
+ /* We don't return a ref since the keyring is pinned by the user_ns */
+ return reg_keyring;
+}
+
+/*
+ * Look up the user and user session keyrings for the current process's UID,
+ * creating them if they don't exist.
+ */
+int look_up_user_keyrings(struct key **_user_keyring,
+ struct key **_user_session_keyring)
+{
+ const struct cred *cred = current_cred();
+ struct user_namespace *user_ns = current_user_ns();
+ struct key *reg_keyring, *uid_keyring, *session_keyring;
+ key_perm_t user_keyring_perm;
+ key_ref_t uid_keyring_r, session_keyring_r;
+ uid_t uid = from_kuid(user_ns, cred->user->uid);
+ char buf[20];
+ int ret;
+
+ user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
+
+ kenter("%u", uid);
+
+ reg_keyring = get_user_register(user_ns);
+ if (IS_ERR(reg_keyring))
+ return PTR_ERR(reg_keyring);
+
+ down_write(&user_ns->keyring_sem);
+ ret = 0;
+
+ /* Get the user keyring. Note that there may be one in existence
+ * already as it may have been pinned by a session, but the user_struct
+ * pointing to it may have been destroyed by setuid.
+ */
+ snprintf(buf, sizeof(buf), "_uid.%u", uid);
+ uid_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid %p", uid_keyring_r);
+ if (uid_keyring_r == ERR_PTR(-EAGAIN)) {
+ uid_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, reg_keyring);
+ if (IS_ERR(uid_keyring)) {
+ ret = PTR_ERR(uid_keyring);
+ goto error;
+ }
+ } else if (IS_ERR(uid_keyring_r)) {
+ ret = PTR_ERR(uid_keyring_r);
+ goto error;
+ } else {
+ uid_keyring = key_ref_to_ptr(uid_keyring_r);
+ }
+
+ /* Get a default session keyring (which might also exist already) */
+ snprintf(buf, sizeof(buf), "_uid_ses.%u", uid);
+ session_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid_ses %p", session_keyring_r);
+ if (session_keyring_r == ERR_PTR(-EAGAIN)) {
+ session_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, NULL);
+ if (IS_ERR(session_keyring)) {
+ ret = PTR_ERR(session_keyring);
+ goto error_release;
+ }
+
+ /* We install a link from the user session keyring to
+ * the user keyring.
+ */
+ ret = key_link(session_keyring, uid_keyring);
+ if (ret < 0)
+ goto error_release_session;
+
+ /* And only then link the user-session keyring to the
+ * register.
+ */
+ ret = key_link(reg_keyring, session_keyring);
+ if (ret < 0)
+ goto error_release_session;
+ } else if (IS_ERR(session_keyring_r)) {
+ ret = PTR_ERR(session_keyring_r);
+ goto error_release;
+ } else {
+ session_keyring = key_ref_to_ptr(session_keyring_r);
+ }
+
+ up_write(&user_ns->keyring_sem);
+
+ if (_user_session_keyring)
+ *_user_session_keyring = session_keyring;
+ else
+ key_put(session_keyring);
+ if (_user_keyring)
+ *_user_keyring = uid_keyring;
+ else
+ key_put(uid_keyring);
+ kleave(" = 0");
+ return 0;
+
+error_release_session:
+ key_put(session_keyring);
+error_release:
+ key_put(uid_keyring);
+error:
+ up_write(&user_ns->keyring_sem);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Get the user session keyring if it exists, but don't create it if it
+ * doesn't.
+ */
+struct key *get_user_session_keyring_rcu(const struct cred *cred)
+{
+ struct key *reg_keyring = READ_ONCE(cred->user_ns->user_keyring_register);
+ key_ref_t session_keyring_r;
+ char buf[20];
+
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_keyring,
+ .index_key.description = buf,
+ .cred = cred,
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = buf,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ };
+
+ if (!reg_keyring)
+ return NULL;
+
+ ctx.index_key.desc_len = snprintf(buf, sizeof(buf), "_uid_ses.%u",
+ from_kuid(cred->user_ns,
+ cred->user->uid));
+
+ session_keyring_r = keyring_search_rcu(make_key_ref(reg_keyring, true),
+ &ctx);
+ if (IS_ERR(session_keyring_r))
+ return NULL;
+ return key_ref_to_ptr(session_keyring_r);
+}
+
+/*
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
+ */
+int install_thread_keyring_to_cred(struct cred *new)
+{
+ struct key *keyring;
+
+ if (new->thread_keyring)
+ return 0;
+
+ keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN,
+ NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ new->thread_keyring = keyring;
+ return 0;
+}
+
+/*
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
+ */
+static int install_thread_keyring(void)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = install_thread_keyring_to_cred(new);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+/*
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a process keyring is now present; -errno on failure.
+ */
+int install_process_keyring_to_cred(struct cred *new)
+{
+ struct key *keyring;
+
+ if (new->process_keyring)
+ return 0;
+
+ keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN,
+ NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ new->process_keyring = keyring;
+ return 0;
+}
+
+/*
+ * Install a process keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a process keyring is now present; -errno on failure.
+ */
+static int install_process_keyring(void)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = install_process_keyring_to_cred(new);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+/*
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ * @cred can not be in use by any task yet.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
+{
+ unsigned long flags;
+ struct key *old;
+
+ might_sleep();
+
+ /* create an empty session keyring */
+ if (!keyring) {
+ flags = KEY_ALLOC_QUOTA_OVERRUN;
+ if (cred->session_keyring)
+ flags = KEY_ALLOC_IN_QUOTA;
+
+ keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ flags, NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+ } else {
+ __key_get(keyring);
+ }
+
+ /* install the keyring */
+ old = cred->session_keyring;
+ cred->session_keyring = keyring;
+
+ if (old)
+ key_put(old);
+
+ return 0;
+}
+
+/*
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+static int install_session_keyring(struct key *keyring)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = install_session_keyring_to_cred(new, keyring);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+/*
+ * Handle the fsuid changing.
+ */
+void key_fsuid_changed(struct cred *new_cred)
+{
+ /* update the ownership of the thread keyring */
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->uid = new_cred->fsuid;
+ up_write(&new_cred->thread_keyring->sem);
+ }
+}
+
+/*
+ * Handle the fsgid changing.
+ */
+void key_fsgid_changed(struct cred *new_cred)
+{
+ /* update the ownership of the thread keyring */
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->gid = new_cred->fsgid;
+ up_write(&new_cred->thread_keyring->sem);
+ }
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key under RCU conditions (the caller must be holding the RCU read
+ * lock).
+ *
+ * The search criteria are the type and the match function. The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search. Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials. Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too. Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
+ */
+key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx)
+{
+ struct key *user_session;
+ key_ref_t key_ref, ret, err;
+ const struct cred *cred = ctx->cred;
+
+ /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
+ * searchable, but we failed to find a key or we found a negative key;
+ * otherwise we want to return a sample error (probably -EACCES) if
+ * none of the keyrings were searchable
+ *
+ * in terms of priority: success > -ENOKEY > -EAGAIN > other error
+ */
+ key_ref = NULL;
+ ret = NULL;
+ err = ERR_PTR(-EAGAIN);
+
+ /* search the thread keyring first */
+ if (cred->thread_keyring) {
+ key_ref = keyring_search_rcu(
+ make_key_ref(cred->thread_keyring, 1), ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+
+ /* search the process keyring second */
+ if (cred->process_keyring) {
+ key_ref = keyring_search_rcu(
+ make_key_ref(cred->process_keyring, 1), ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ fallthrough;
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+
+ /* search the session keyring */
+ if (cred->session_keyring) {
+ key_ref = keyring_search_rcu(
+ make_key_ref(cred->session_keyring, 1), ctx);
+
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ fallthrough;
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+ /* or search the user-session keyring */
+ else if ((user_session = get_user_session_keyring_rcu(cred))) {
+ key_ref = keyring_search_rcu(make_key_ref(user_session, 1),
+ ctx);
+ key_put(user_session);
+
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ fallthrough;
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+
+ /* no key - decide on the error we're going to go for */
+ key_ref = ret ? ret : err;
+
+found:
+ return key_ref;
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * The caller must be holding the RCU read lock.
+ *
+ * Return same as search_cred_keyrings_rcu().
+ */
+key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx)
+{
+ struct request_key_auth *rka;
+ key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
+
+ key_ref = search_cred_keyrings_rcu(ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+ err = key_ref;
+
+ /* if this process has an instantiation authorisation key, then we also
+ * search the keyrings of the process mentioned there
+ * - we don't permit access to request_key auth keys via this method
+ */
+ if (ctx->cred->request_key_auth &&
+ ctx->cred == current_cred() &&
+ ctx->index_key.type != &key_type_request_key_auth
+ ) {
+ const struct cred *cred = ctx->cred;
+
+ if (key_validate(cred->request_key_auth) == 0) {
+ rka = ctx->cred->request_key_auth->payload.data[0];
+
+ //// was search_process_keyrings() [ie. recursive]
+ ctx->cred = rka->cred;
+ key_ref = search_cred_keyrings_rcu(ctx);
+ ctx->cred = cred;
+
+ if (!IS_ERR(key_ref))
+ goto found;
+ ret = key_ref;
+ }
+ }
+
+ /* no key - decide on the error we're going to go for */
+ if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
+ key_ref = ERR_PTR(-ENOKEY);
+ else if (err == ERR_PTR(-EACCES))
+ key_ref = ret;
+ else
+ key_ref = err;
+
+found:
+ return key_ref;
+}
+/*
+ * See if the key we're looking at is the target key.
+ */
+bool lookup_user_key_possessed(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ return key == match_data->raw_data;
+}
+
+/*
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
+ */
+key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
+ enum key_need_perm need_perm)
+{
+ struct keyring_search_context ctx = {
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
+ };
+ struct request_key_auth *rka;
+ struct key *key, *user_session;
+ key_ref_t key_ref, skey_ref;
+ int ret;
+
+try_again:
+ ctx.cred = get_current_cred();
+ key_ref = ERR_PTR(-ENOKEY);
+
+ switch (id) {
+ case KEY_SPEC_THREAD_KEYRING:
+ if (!ctx.cred->thread_keyring) {
+ if (!(lflags & KEY_LOOKUP_CREATE))
+ goto error;
+
+ ret = install_thread_keyring();
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error;
+ }
+ goto reget_creds;
+ }
+
+ key = ctx.cred->thread_keyring;
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_PROCESS_KEYRING:
+ if (!ctx.cred->process_keyring) {
+ if (!(lflags & KEY_LOOKUP_CREATE))
+ goto error;
+
+ ret = install_process_keyring();
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error;
+ }
+ goto reget_creds;
+ }
+
+ key = ctx.cred->process_keyring;
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_SESSION_KEYRING:
+ if (!ctx.cred->session_keyring) {
+ /* always install a session keyring upon access if one
+ * doesn't exist yet */
+ ret = look_up_user_keyrings(NULL, &user_session);
+ if (ret < 0)
+ goto error;
+ if (lflags & KEY_LOOKUP_CREATE)
+ ret = join_session_keyring(NULL);
+ else
+ ret = install_session_keyring(user_session);
+
+ key_put(user_session);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
+ } else if (test_bit(KEY_FLAG_UID_KEYRING,
+ &ctx.cred->session_keyring->flags) &&
+ lflags & KEY_LOOKUP_CREATE) {
+ ret = join_session_keyring(NULL);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
+ }
+
+ key = ctx.cred->session_keyring;
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_USER_KEYRING:
+ ret = look_up_user_keyrings(&key, NULL);
+ if (ret < 0)
+ goto error;
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_USER_SESSION_KEYRING:
+ ret = look_up_user_keyrings(NULL, &key);
+ if (ret < 0)
+ goto error;
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_GROUP_KEYRING:
+ /* group keyrings are not yet supported */
+ key_ref = ERR_PTR(-EINVAL);
+ goto error;
+
+ case KEY_SPEC_REQKEY_AUTH_KEY:
+ key = ctx.cred->request_key_auth;
+ if (!key)
+ goto error;
+
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_REQUESTOR_KEYRING:
+ if (!ctx.cred->request_key_auth)
+ goto error;
+
+ down_read(&ctx.cred->request_key_auth->sem);
+ if (test_bit(KEY_FLAG_REVOKED,
+ &ctx.cred->request_key_auth->flags)) {
+ key_ref = ERR_PTR(-EKEYREVOKED);
+ key = NULL;
+ } else {
+ rka = ctx.cred->request_key_auth->payload.data[0];
+ key = rka->dest_keyring;
+ __key_get(key);
+ }
+ up_read(&ctx.cred->request_key_auth->sem);
+ if (!key)
+ goto error;
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ default:
+ key_ref = ERR_PTR(-EINVAL);
+ if (id < 1)
+ goto error;
+
+ key = key_lookup(id);
+ if (IS_ERR(key)) {
+ key_ref = ERR_CAST(key);
+ goto error;
+ }
+
+ key_ref = make_key_ref(key, 0);
+
+ /* check to see if we possess the key */
+ ctx.index_key = key->index_key;
+ ctx.match_data.raw_data = key;
+ kdebug("check possessed");
+ rcu_read_lock();
+ skey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+ kdebug("possessed=%p", skey_ref);
+
+ if (!IS_ERR(skey_ref)) {
+ key_put(key);
+ key_ref = skey_ref;
+ }
+
+ break;
+ }
+
+ /* unlink does not use the nominated key in any way, so can skip all
+ * the permission checks as it is only concerned with the keyring */
+ if (need_perm != KEY_NEED_UNLINK) {
+ if (!(lflags & KEY_LOOKUP_PARTIAL)) {
+ ret = wait_for_key_construction(key, true);
+ switch (ret) {
+ case -ERESTARTSYS:
+ goto invalid_key;
+ default:
+ if (need_perm != KEY_AUTHTOKEN_OVERRIDE &&
+ need_perm != KEY_DEFER_PERM_CHECK)
+ goto invalid_key;
+ break;
+ case 0:
+ break;
+ }
+ } else if (need_perm != KEY_DEFER_PERM_CHECK) {
+ ret = key_validate(key);
+ if (ret < 0)
+ goto invalid_key;
+ }
+
+ ret = -EIO;
+ if (!(lflags & KEY_LOOKUP_PARTIAL) &&
+ key_read_state(key) == KEY_IS_UNINSTANTIATED)
+ goto invalid_key;
+ }
+
+ /* check the permissions */
+ ret = key_task_permission(key_ref, ctx.cred, need_perm);
+ if (ret < 0)
+ goto invalid_key;
+
+ key->last_used_at = ktime_get_real_seconds();
+
+error:
+ put_cred(ctx.cred);
+ return key_ref;
+
+invalid_key:
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error;
+
+ /* if we attempted to install a keyring, then it may have caused new
+ * creds to be installed */
+reget_creds:
+ put_cred(ctx.cred);
+ goto try_again;
+}
+EXPORT_SYMBOL(lookup_user_key);
+
+/*
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
+ */
+long join_session_keyring(const char *name)
+{
+ const struct cred *old;
+ struct cred *new;
+ struct key *keyring;
+ long ret, serial;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
+ /* if no name is provided, install an anonymous keyring */
+ if (!name) {
+ ret = install_session_keyring_to_cred(new, NULL);
+ if (ret < 0)
+ goto error;
+
+ serial = new->session_keyring->serial;
+ ret = commit_creds(new);
+ if (ret == 0)
+ ret = serial;
+ goto okay;
+ }
+
+ /* allow the user to join or create a named keyring */
+ mutex_lock(&key_session_mutex);
+
+ /* look for an existing keyring of this name */
+ keyring = find_keyring_by_name(name, false);
+ if (PTR_ERR(keyring) == -ENOKEY) {
+ /* not found - try and create a new one */
+ keyring = keyring_alloc(
+ name, old->uid, old->gid, old,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+ KEY_ALLOC_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ }
+ } else if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ } else if (keyring == new->session_keyring) {
+ ret = 0;
+ goto error3;
+ }
+
+ /* we've got a keyring - now to install it */
+ ret = install_session_keyring_to_cred(new, keyring);
+ if (ret < 0)
+ goto error3;
+
+ commit_creds(new);
+ mutex_unlock(&key_session_mutex);
+
+ ret = keyring->serial;
+ key_put(keyring);
+okay:
+ return ret;
+
+error3:
+ key_put(keyring);
+error2:
+ mutex_unlock(&key_session_mutex);
+error:
+ abort_creds(new);
+ return ret;
+}
+
+/*
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target process is about to resume userspace execution.
+ */
+void key_change_session_keyring(struct callback_head *twork)
+{
+ const struct cred *old = current_cred();
+ struct cred *new = container_of(twork, struct cred, rcu);
+
+ if (unlikely(current->flags & PF_EXITING)) {
+ put_cred(new);
+ return;
+ }
+
+ /* If get_ucounts fails more bits are needed in the refcount */
+ if (unlikely(!get_ucounts(old->ucounts))) {
+ WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
+ put_cred(new);
+ return;
+ }
+
+ new-> uid = old-> uid;
+ new-> euid = old-> euid;
+ new-> suid = old-> suid;
+ new->fsuid = old->fsuid;
+ new-> gid = old-> gid;
+ new-> egid = old-> egid;
+ new-> sgid = old-> sgid;
+ new->fsgid = old->fsgid;
+ new->user = get_uid(old->user);
+ new->ucounts = old->ucounts;
+ new->user_ns = get_user_ns(old->user_ns);
+ new->group_info = get_group_info(old->group_info);
+
+ new->securebits = old->securebits;
+ new->cap_inheritable = old->cap_inheritable;
+ new->cap_permitted = old->cap_permitted;
+ new->cap_effective = old->cap_effective;
+ new->cap_ambient = old->cap_ambient;
+ new->cap_bset = old->cap_bset;
+
+ new->jit_keyring = old->jit_keyring;
+ new->thread_keyring = key_get(old->thread_keyring);
+ new->process_keyring = key_get(old->process_keyring);
+
+ security_transfer_creds(new, old);
+
+ commit_creds(new);
+}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+ return look_up_user_keyrings(NULL, NULL);
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
new file mode 100644
index 000000000..a7673ad86
--- /dev/null
+++ b/security/keys/request_key.c
@@ -0,0 +1,821 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Request a key from userspace
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/security/keys/request-key.rst
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/keyctl.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+#include "internal.h"
+#include <keys/request_key_auth-type.h>
+
+#define key_negative_timeout 60 /* default timeout on a negative key's existence */
+
+static struct key *check_cached_key(struct keyring_search_context *ctx)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct key *key = current->cached_requested_key;
+
+ if (key &&
+ ctx->match_data.cmp(key, &ctx->match_data) &&
+ !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))))
+ return key_get(key);
+#endif
+ return NULL;
+}
+
+static void cache_requested_key(struct key *key)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct task_struct *t = current;
+
+ /* Do not cache key if it is a kernel thread */
+ if (!(t->flags & PF_KTHREAD)) {
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ }
+#endif
+}
+
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @authkey: The authorisation key.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key. The key will be negated
+ * if an error is indicated. The authorisation key will be revoked
+ * unconditionally.
+ */
+void complete_request_key(struct key *authkey, int error)
+{
+ struct request_key_auth *rka = get_request_key_auth(authkey);
+ struct key *key = rka->target_key;
+
+ kenter("%d{%d},%d", authkey->serial, key->serial, error);
+
+ if (error < 0)
+ key_negate_and_link(key, key_negative_timeout, NULL, authkey);
+ else
+ key_revoke(authkey);
+}
+EXPORT_SYMBOL(complete_request_key);
+
+/*
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
+{
+ struct key *keyring = info->data;
+
+ return install_session_keyring_to_cred(cred, keyring);
+}
+
+/*
+ * Clean up a usermode helper with session keyring.
+ */
+static void umh_keys_cleanup(struct subprocess_info *info)
+{
+ struct key *keyring = info->data;
+ key_put(keyring);
+}
+
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
+static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
+ struct key *session_keyring, int wait)
+{
+ struct subprocess_info *info;
+
+ info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
+ umh_keys_init, umh_keys_cleanup,
+ session_keyring);
+ if (!info)
+ return -ENOMEM;
+
+ key_get(session_keyring);
+ return call_usermodehelper_exec(info, wait);
+}
+
+/*
+ * Request userspace finish the construction of a key
+ * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
+ */
+static int call_sbin_request_key(struct key *authkey, void *aux)
+{
+ static char const request_key[] = "/sbin/request-key";
+ struct request_key_auth *rka = get_request_key_auth(authkey);
+ const struct cred *cred = current_cred();
+ key_serial_t prkey, sskey;
+ struct key *key = rka->target_key, *keyring, *session, *user_session;
+ char *argv[9], *envp[3], uid_str[12], gid_str[12];
+ char key_str[12], keyring_str[3][12];
+ char desc[20];
+ int ret, i;
+
+ kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
+
+ ret = look_up_user_keyrings(NULL, &user_session);
+ if (ret < 0)
+ goto error_us;
+
+ /* allocate a new session keyring */
+ sprintf(desc, "_req.%u", key->serial);
+
+ cred = get_current_cred();
+ keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL);
+ put_cred(cred);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error_alloc;
+ }
+
+ /* attach the auth key to the session keyring */
+ ret = key_link(keyring, authkey);
+ if (ret < 0)
+ goto error_link;
+
+ /* record the UID and GID */
+ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+ sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
+
+ /* we say which key is under construction */
+ sprintf(key_str, "%d", key->serial);
+
+ /* we specify the process's default keyrings */
+ sprintf(keyring_str[0], "%d",
+ cred->thread_keyring ? cred->thread_keyring->serial : 0);
+
+ prkey = 0;
+ if (cred->process_keyring)
+ prkey = cred->process_keyring->serial;
+ sprintf(keyring_str[1], "%d", prkey);
+
+ session = cred->session_keyring;
+ if (!session)
+ session = user_session;
+ sskey = session->serial;
+
+ sprintf(keyring_str[2], "%d", sskey);
+
+ /* set up a minimal environment */
+ i = 0;
+ envp[i++] = "HOME=/";
+ envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[i] = NULL;
+
+ /* set up the argument list */
+ i = 0;
+ argv[i++] = (char *)request_key;
+ argv[i++] = (char *)rka->op;
+ argv[i++] = key_str;
+ argv[i++] = uid_str;
+ argv[i++] = gid_str;
+ argv[i++] = keyring_str[0];
+ argv[i++] = keyring_str[1];
+ argv[i++] = keyring_str[2];
+ argv[i] = NULL;
+
+ /* do it */
+ ret = call_usermodehelper_keys(request_key, argv, envp, keyring,
+ UMH_WAIT_PROC);
+ kdebug("usermode -> 0x%x", ret);
+ if (ret >= 0) {
+ /* ret is the exit/wait code */
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) ||
+ key_validate(key) < 0)
+ ret = -ENOKEY;
+ else
+ /* ignore any errors from userspace if the key was
+ * instantiated */
+ ret = 0;
+ }
+
+error_link:
+ key_put(keyring);
+
+error_alloc:
+ key_put(user_session);
+error_us:
+ complete_request_key(authkey, ret);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
+ */
+static int construct_key(struct key *key, const void *callout_info,
+ size_t callout_len, void *aux,
+ struct key *dest_keyring)
+{
+ request_key_actor_t actor;
+ struct key *authkey;
+ int ret;
+
+ kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
+
+ /* allocate an authorisation key */
+ authkey = request_key_auth_new(key, "create", callout_info, callout_len,
+ dest_keyring);
+ if (IS_ERR(authkey))
+ return PTR_ERR(authkey);
+
+ /* Make the call */
+ actor = call_sbin_request_key;
+ if (key->type->request_key)
+ actor = key->type->request_key;
+
+ ret = actor(authkey, aux);
+
+ /* check that the actor called complete_request_key() prior to
+ * returning an error */
+ WARN_ON(ret < 0 &&
+ !test_bit(KEY_FLAG_INVALIDATED, &authkey->flags));
+
+ key_put(authkey);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
+ */
+static int construct_get_dest_keyring(struct key **_dest_keyring)
+{
+ struct request_key_auth *rka;
+ const struct cred *cred = current_cred();
+ struct key *dest_keyring = *_dest_keyring, *authkey;
+ int ret;
+
+ kenter("%p", dest_keyring);
+
+ /* find the appropriate keyring */
+ if (dest_keyring) {
+ /* the caller supplied one */
+ key_get(dest_keyring);
+ } else {
+ bool do_perm_check = true;
+
+ /* use a default keyring; falling through the cases until we
+ * find one that we actually have */
+ switch (cred->jit_keyring) {
+ case KEY_REQKEY_DEFL_DEFAULT:
+ case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
+ if (cred->request_key_auth) {
+ authkey = cred->request_key_auth;
+ down_read(&authkey->sem);
+ rka = get_request_key_auth(authkey);
+ if (!test_bit(KEY_FLAG_REVOKED,
+ &authkey->flags))
+ dest_keyring =
+ key_get(rka->dest_keyring);
+ up_read(&authkey->sem);
+ if (dest_keyring) {
+ do_perm_check = false;
+ break;
+ }
+ }
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_THREAD_KEYRING:
+ dest_keyring = key_get(cred->thread_keyring);
+ if (dest_keyring)
+ break;
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ dest_keyring = key_get(cred->process_keyring);
+ if (dest_keyring)
+ break;
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_SESSION_KEYRING:
+ dest_keyring = key_get(cred->session_keyring);
+
+ if (dest_keyring)
+ break;
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+ ret = look_up_user_keyrings(NULL, &dest_keyring);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case KEY_REQKEY_DEFL_USER_KEYRING:
+ ret = look_up_user_keyrings(&dest_keyring, NULL);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case KEY_REQKEY_DEFL_GROUP_KEYRING:
+ default:
+ BUG();
+ }
+
+ /*
+ * Require Write permission on the keyring. This is essential
+ * because the default keyring may be the session keyring, and
+ * joining a keyring only requires Search permission.
+ *
+ * However, this check is skipped for the "requestor keyring" so
+ * that /sbin/request-key can itself use request_key() to add
+ * keys to the original requestor's destination keyring.
+ */
+ if (dest_keyring && do_perm_check) {
+ ret = key_permission(make_key_ref(dest_keyring, 1),
+ KEY_NEED_WRITE);
+ if (ret) {
+ key_put(dest_keyring);
+ return ret;
+ }
+ }
+ }
+
+ *_dest_keyring = dest_keyring;
+ kleave(" [dk %d]", key_serial(dest_keyring));
+ return 0;
+}
+
+/*
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
+ */
+static int construct_alloc_key(struct keyring_search_context *ctx,
+ struct key *dest_keyring,
+ unsigned long flags,
+ struct key_user *user,
+ struct key **_key)
+{
+ struct assoc_array_edit *edit = NULL;
+ struct key *key;
+ key_perm_t perm;
+ key_ref_t key_ref;
+ int ret;
+
+ kenter("%s,%s,,,",
+ ctx->index_key.type->name, ctx->index_key.description);
+
+ *_key = NULL;
+ mutex_lock(&user->cons_lock);
+
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+ if (ctx->index_key.type->read)
+ perm |= KEY_POS_READ;
+ if (ctx->index_key.type == &key_type_keyring ||
+ ctx->index_key.type->update)
+ perm |= KEY_POS_WRITE;
+
+ key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+ ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
+ perm, flags, NULL);
+ if (IS_ERR(key))
+ goto alloc_failed;
+
+ set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
+
+ if (dest_keyring) {
+ ret = __key_link_lock(dest_keyring, &key->index_key);
+ if (ret < 0)
+ goto link_lock_failed;
+ }
+
+ /*
+ * Attach the key to the destination keyring under lock, but we do need
+ * to do another check just in case someone beat us to it whilst we
+ * waited for locks.
+ *
+ * The caller might specify a comparison function which looks for keys
+ * that do not exactly match but are still equivalent from the caller's
+ * perspective. The __key_link_begin() operation must be done only after
+ * an actual key is determined.
+ */
+ mutex_lock(&key_construction_mutex);
+
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(ctx);
+ rcu_read_unlock();
+ if (!IS_ERR(key_ref))
+ goto key_already_present;
+
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto link_alloc_failed;
+ __key_link(dest_keyring, key, &edit);
+ }
+
+ mutex_unlock(&key_construction_mutex);
+ if (dest_keyring)
+ __key_link_end(dest_keyring, &key->index_key, edit);
+ mutex_unlock(&user->cons_lock);
+ *_key = key;
+ kleave(" = 0 [%d]", key_serial(key));
+ return 0;
+
+ /* the key is now present - we tell the caller that we found it by
+ * returning -EINPROGRESS */
+key_already_present:
+ key_put(key);
+ mutex_unlock(&key_construction_mutex);
+ key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto link_alloc_failed_unlocked;
+ ret = __key_link_check_live_key(dest_keyring, key);
+ if (ret == 0)
+ __key_link(dest_keyring, key, &edit);
+ __key_link_end(dest_keyring, &key->index_key, edit);
+ if (ret < 0)
+ goto link_check_failed;
+ }
+ mutex_unlock(&user->cons_lock);
+ *_key = key;
+ kleave(" = -EINPROGRESS [%d]", key_serial(key));
+ return -EINPROGRESS;
+
+link_check_failed:
+ mutex_unlock(&user->cons_lock);
+ key_put(key);
+ kleave(" = %d [linkcheck]", ret);
+ return ret;
+
+link_alloc_failed:
+ mutex_unlock(&key_construction_mutex);
+link_alloc_failed_unlocked:
+ __key_link_end(dest_keyring, &key->index_key, edit);
+link_lock_failed:
+ mutex_unlock(&user->cons_lock);
+ key_put(key);
+ kleave(" = %d [prelink]", ret);
+ return ret;
+
+alloc_failed:
+ mutex_unlock(&user->cons_lock);
+ kleave(" = %ld", PTR_ERR(key));
+ return PTR_ERR(key);
+}
+
+/*
+ * Commence key construction.
+ */
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
+ const char *callout_info,
+ size_t callout_len,
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags)
+{
+ struct key_user *user;
+ struct key *key;
+ int ret;
+
+ kenter("");
+
+ if (ctx->index_key.type == &key_type_keyring)
+ return ERR_PTR(-EPERM);
+
+ ret = construct_get_dest_keyring(&dest_keyring);
+ if (ret)
+ goto error;
+
+ user = key_user_lookup(current_fsuid());
+ if (!user) {
+ ret = -ENOMEM;
+ goto error_put_dest_keyring;
+ }
+
+ ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
+ key_user_put(user);
+
+ if (ret == 0) {
+ ret = construct_key(key, callout_info, callout_len, aux,
+ dest_keyring);
+ if (ret < 0) {
+ kdebug("cons failed");
+ goto construction_failed;
+ }
+ } else if (ret == -EINPROGRESS) {
+ ret = 0;
+ } else {
+ goto error_put_dest_keyring;
+ }
+
+ key_put(dest_keyring);
+ kleave(" = key %d", key_serial(key));
+ return key;
+
+construction_failed:
+ key_negate_and_link(key, key_negative_timeout, NULL, NULL);
+ key_put(key);
+error_put_dest_keyring:
+ key_put(dest_keyring);
+error:
+ kleave(" = %d", ret);
+ return ERR_PTR(ret);
+}
+
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria (type, description, domain_tag) is
+ * searched for in the process's keyrings and returned with its usage count
+ * incremented if found. Otherwise, if callout_info is not NULL, a key will be
+ * allocated and some service (probably in userspace) will be asked to
+ * instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
+ */
+struct key *request_key_and_link(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags)
+{
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.domain_tag = domain_tag,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED |
+ KEYRING_SEARCH_RECURSE),
+ };
+ struct key *key;
+ key_ref_t key_ref;
+ int ret;
+
+ kenter("%s,%s,%p,%zu,%p,%p,%lx",
+ ctx.index_key.type->name, ctx.index_key.description,
+ callout_info, callout_len, aux, dest_keyring, flags);
+
+ if (type->match_preparse) {
+ ret = type->match_preparse(&ctx.match_data);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ key = check_cached_key(&ctx);
+ if (key)
+ goto error_free;
+
+ /* search all the process keyrings for a key */
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+
+ if (!IS_ERR(key_ref)) {
+ if (dest_keyring) {
+ ret = key_task_permission(key_ref, current_cred(),
+ KEY_NEED_LINK);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key = ERR_PTR(ret);
+ goto error_free;
+ }
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ ret = key_link(dest_keyring, key);
+ if (ret < 0) {
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto error_free;
+ }
+ }
+
+ /* Only cache the key on immediate success */
+ cache_requested_key(key);
+ } else if (PTR_ERR(key_ref) != -EAGAIN) {
+ key = ERR_CAST(key_ref);
+ } else {
+ /* the search failed, but the keyrings were searchable, so we
+ * should consult userspace if we can */
+ key = ERR_PTR(-ENOKEY);
+ if (!callout_info)
+ goto error_free;
+
+ key = construct_key_and_link(&ctx, callout_info, callout_len,
+ aux, dest_keyring, flags);
+ }
+
+error_free:
+ if (type->match_free)
+ type->match_free(&ctx.match_data);
+error:
+ kleave(" = %p", key);
+ return key;
+}
+
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
+ */
+int wait_for_key_construction(struct key *key, bool intr)
+{
+ int ret;
+
+ ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
+ intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (ret)
+ return -ERESTARTSYS;
+ ret = key_read_state(key);
+ if (ret < 0)
+ return ret;
+ return key_validate(key);
+}
+EXPORT_SYMBOL(wait_for_key_construction);
+
+/**
+ * request_key_tag - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+struct key *request_key_tag(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const char *callout_info)
+{
+ struct key *key;
+ size_t callout_len = 0;
+ int ret;
+
+ if (callout_info)
+ callout_len = strlen(callout_info);
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
+ NULL, NULL, KEY_ALLOC_IN_QUOTA);
+ if (!IS_ERR(key)) {
+ ret = wait_for_key_construction(key, false);
+ if (ret < 0) {
+ key_put(key);
+ return ERR_PTR(ret);
+ }
+ }
+ return key;
+}
+EXPORT_SYMBOL(request_key_tag);
+
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+struct key *request_key_with_auxdata(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux)
+{
+ struct key *key;
+ int ret;
+
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
+ aux, NULL, KEY_ALLOC_IN_QUOTA);
+ if (!IS_ERR(key)) {
+ ret = wait_for_key_construction(key, false);
+ if (ret < 0) {
+ key_put(key);
+ return ERR_PTR(ret);
+ }
+ }
+ return key;
+}
+EXPORT_SYMBOL(request_key_with_auxdata);
+
+/**
+ * request_key_rcu - Request key from RCU-read-locked context
+ * @type: The type of key we want.
+ * @description: The name of the key we want.
+ * @domain_tag: The domain in which the key operates.
+ *
+ * Request a key from a context that we may not sleep in (such as RCU-mode
+ * pathwalk). Keys under construction are ignored.
+ *
+ * Return a pointer to the found key if successful, -ENOKEY if we couldn't find
+ * a key or some other error if the key found was unsuitable or inaccessible.
+ */
+struct key *request_key_rcu(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag)
+{
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.domain_tag = domain_tag,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED),
+ };
+ struct key *key;
+ key_ref_t key_ref;
+
+ kenter("%s,%s", type->name, description);
+
+ key = check_cached_key(&ctx);
+ if (key)
+ return key;
+
+ /* search all the process keyrings for a key */
+ key_ref = search_process_keyrings_rcu(&ctx);
+ if (IS_ERR(key_ref)) {
+ key = ERR_CAST(key_ref);
+ if (PTR_ERR(key_ref) == -EAGAIN)
+ key = ERR_PTR(-ENOKEY);
+ } else {
+ key = key_ref_to_ptr(key_ref);
+ cache_requested_key(key);
+ }
+
+ kleave(" = %p", key);
+ return key;
+}
+EXPORT_SYMBOL(request_key_rcu);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
new file mode 100644
index 000000000..41e973500
--- /dev/null
+++ b/security/keys/request_key_auth.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Request key authorisation token key definition.
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/security/keys/request-key.rst
+ */
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "internal.h"
+#include <keys/request_key_auth-type.h>
+
+static int request_key_auth_preparse(struct key_preparsed_payload *);
+static void request_key_auth_free_preparse(struct key_preparsed_payload *);
+static int request_key_auth_instantiate(struct key *,
+ struct key_preparsed_payload *);
+static void request_key_auth_describe(const struct key *, struct seq_file *);
+static void request_key_auth_revoke(struct key *);
+static void request_key_auth_destroy(struct key *);
+static long request_key_auth_read(const struct key *, char *, size_t);
+
+/*
+ * The request-key authorisation key type definition.
+ */
+struct key_type key_type_request_key_auth = {
+ .name = ".request_key_auth",
+ .def_datalen = sizeof(struct request_key_auth),
+ .preparse = request_key_auth_preparse,
+ .free_preparse = request_key_auth_free_preparse,
+ .instantiate = request_key_auth_instantiate,
+ .describe = request_key_auth_describe,
+ .revoke = request_key_auth_revoke,
+ .destroy = request_key_auth_destroy,
+ .read = request_key_auth_read,
+};
+
+static int request_key_auth_preparse(struct key_preparsed_payload *prep)
+{
+ return 0;
+}
+
+static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
+{
+}
+
+/*
+ * Instantiate a request-key authorisation key.
+ */
+static int request_key_auth_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ rcu_assign_keypointer(key, (struct request_key_auth *)prep->data);
+ return 0;
+}
+
+/*
+ * Describe an authorisation token.
+ */
+static void request_key_auth_describe(const struct key *key,
+ struct seq_file *m)
+{
+ struct request_key_auth *rka = dereference_key_rcu(key);
+
+ if (!rka)
+ return;
+
+ seq_puts(m, "key:");
+ seq_puts(m, key->description);
+ if (key_is_positive(key))
+ seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
+
+/*
+ * Read the callout_info data (retrieves the callout information).
+ * - the key's semaphore is read-locked
+ */
+static long request_key_auth_read(const struct key *key,
+ char *buffer, size_t buflen)
+{
+ struct request_key_auth *rka = dereference_key_locked(key);
+ size_t datalen;
+ long ret;
+
+ if (!rka)
+ return -EKEYREVOKED;
+
+ datalen = rka->callout_len;
+ ret = datalen;
+
+ /* we can return the data as is */
+ if (buffer && buflen > 0) {
+ if (buflen > datalen)
+ buflen = datalen;
+
+ memcpy(buffer, rka->callout_info, buflen);
+ }
+
+ return ret;
+}
+
+static void free_request_key_auth(struct request_key_auth *rka)
+{
+ if (!rka)
+ return;
+ key_put(rka->target_key);
+ key_put(rka->dest_keyring);
+ if (rka->cred)
+ put_cred(rka->cred);
+ kfree(rka->callout_info);
+ kfree(rka);
+}
+
+/*
+ * Dispose of the request_key_auth record under RCU conditions
+ */
+static void request_key_auth_rcu_disposal(struct rcu_head *rcu)
+{
+ struct request_key_auth *rka =
+ container_of(rcu, struct request_key_auth, rcu);
+
+ free_request_key_auth(rka);
+}
+
+/*
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
+ */
+static void request_key_auth_revoke(struct key *key)
+{
+ struct request_key_auth *rka = dereference_key_locked(key);
+
+ kenter("{%d}", key->serial);
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+}
+
+/*
+ * Destroy an instantiation authorisation token key.
+ */
+static void request_key_auth_destroy(struct key *key)
+{
+ struct request_key_auth *rka = rcu_access_pointer(key->payload.rcu_data0);
+
+ kenter("{%d}", key->serial);
+ if (rka) {
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+ }
+}
+
+/*
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
+ */
+struct key *request_key_auth_new(struct key *target, const char *op,
+ const void *callout_info, size_t callout_len,
+ struct key *dest_keyring)
+{
+ struct request_key_auth *rka, *irka;
+ const struct cred *cred = current_cred();
+ struct key *authkey = NULL;
+ char desc[20];
+ int ret = -ENOMEM;
+
+ kenter("%d,", target->serial);
+
+ /* allocate a auth record */
+ rka = kzalloc(sizeof(*rka), GFP_KERNEL);
+ if (!rka)
+ goto error;
+ rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL);
+ if (!rka->callout_info)
+ goto error_free_rka;
+ rka->callout_len = callout_len;
+ strlcpy(rka->op, op, sizeof(rka->op));
+
+ /* see if the calling process is already servicing the key request of
+ * another process */
+ if (cred->request_key_auth) {
+ /* it is - use that instantiation context here too */
+ down_read(&cred->request_key_auth->sem);
+
+ /* if the auth key has been revoked, then the key we're
+ * servicing is already instantiated */
+ if (test_bit(KEY_FLAG_REVOKED,
+ &cred->request_key_auth->flags)) {
+ up_read(&cred->request_key_auth->sem);
+ ret = -EKEYREVOKED;
+ goto error_free_rka;
+ }
+
+ irka = cred->request_key_auth->payload.data[0];
+ rka->cred = get_cred(irka->cred);
+ rka->pid = irka->pid;
+
+ up_read(&cred->request_key_auth->sem);
+ }
+ else {
+ /* it isn't - use this process as the context */
+ rka->cred = get_cred(cred);
+ rka->pid = current->pid;
+ }
+
+ rka->target_key = key_get(target);
+ rka->dest_keyring = key_get(dest_keyring);
+
+ /* allocate the auth key */
+ sprintf(desc, "%x", target->serial);
+
+ authkey = key_alloc(&key_type_request_key_auth, desc,
+ cred->fsuid, cred->fsgid, cred,
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_POS_LINK |
+ KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ goto error_free_rka;
+ }
+
+ /* construct the auth key */
+ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
+ if (ret < 0)
+ goto error_put_authkey;
+
+ kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage));
+ return authkey;
+
+error_put_authkey:
+ key_put(authkey);
+error_free_rka:
+ free_request_key_auth(rka);
+error:
+ kleave("= %d", ret);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
+ */
+struct key *key_get_instantiation_authkey(key_serial_t target_id)
+{
+ char description[16];
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_request_key_auth,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
+ };
+ struct key *authkey;
+ key_ref_t authkey_ref;
+
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
+
+ rcu_read_lock();
+ authkey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+
+ if (IS_ERR(authkey_ref)) {
+ authkey = ERR_CAST(authkey_ref);
+ if (authkey == ERR_PTR(-EAGAIN))
+ authkey = ERR_PTR(-ENOKEY);
+ goto error;
+ }
+
+ authkey = key_ref_to_ptr(authkey_ref);
+ if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
+ key_put(authkey);
+ authkey = ERR_PTR(-EKEYREVOKED);
+ }
+
+error:
+ return authkey;
+}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
new file mode 100644
index 000000000..b46b651b3
--- /dev/null
+++ b/security/keys/sysctl.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Key management controls
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/key.h>
+#include <linux/sysctl.h>
+#include "internal.h"
+
+struct ctl_table key_sysctls[] = {
+ {
+ .procname = "maxkeys",
+ .data = &key_quota_maxkeys,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "maxbytes",
+ .data = &key_quota_maxbytes,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "root_maxkeys",
+ .data = &key_quota_root_maxkeys,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "root_maxbytes",
+ .data = &key_quota_root_maxbytes,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "gc_delay",
+ .data = &key_gc_delay,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ZERO,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ {
+ .procname = "persistent_keyring_expiry",
+ .data = &persistent_keyring_expiry,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ZERO,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+#endif
+ { }
+};
diff --git a/security/keys/trusted-keys/Kconfig b/security/keys/trusted-keys/Kconfig
new file mode 100644
index 000000000..dbfdd8536
--- /dev/null
+++ b/security/keys/trusted-keys/Kconfig
@@ -0,0 +1,38 @@
+config TRUSTED_KEYS_TPM
+ bool "TPM-based trusted keys"
+ depends on TCG_TPM >= TRUSTED_KEYS
+ default y
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ select ASN1_ENCODER
+ select OID_REGISTRY
+ select ASN1
+ help
+ Enable use of the Trusted Platform Module (TPM) as trusted key
+ backend. Trusted keys are random number symmetric keys,
+ which will be generated and RSA-sealed by the TPM.
+ The TPM only unseals the keys, if the boot PCRs and other
+ criteria match.
+
+config TRUSTED_KEYS_TEE
+ bool "TEE-based trusted keys"
+ depends on TEE >= TRUSTED_KEYS
+ default y
+ help
+ Enable use of the Trusted Execution Environment (TEE) as trusted
+ key backend.
+
+config TRUSTED_KEYS_CAAM
+ bool "CAAM-based trusted keys"
+ depends on CRYPTO_DEV_FSL_CAAM_JR >= TRUSTED_KEYS
+ select CRYPTO_DEV_FSL_CAAM_BLOB_GEN
+ default y
+ help
+ Enable use of NXP's Cryptographic Accelerator and Assurance Module
+ (CAAM) as trusted key backend.
+
+if !TRUSTED_KEYS_TPM && !TRUSTED_KEYS_TEE && !TRUSTED_KEYS_CAAM
+comment "No trust source selected!"
+endif
diff --git a/security/keys/trusted-keys/Makefile b/security/keys/trusted-keys/Makefile
new file mode 100644
index 000000000..735aa0bc0
--- /dev/null
+++ b/security/keys/trusted-keys/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for trusted keys
+#
+
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_core.o
+trusted-$(CONFIG_TRUSTED_KEYS_TPM) += trusted_tpm1.o
+
+$(obj)/trusted_tpm2.o: $(obj)/tpm2key.asn1.h
+trusted-$(CONFIG_TRUSTED_KEYS_TPM) += trusted_tpm2.o
+trusted-$(CONFIG_TRUSTED_KEYS_TPM) += tpm2key.asn1.o
+
+trusted-$(CONFIG_TRUSTED_KEYS_TEE) += trusted_tee.o
+
+trusted-$(CONFIG_TRUSTED_KEYS_CAAM) += trusted_caam.o
diff --git a/security/keys/trusted-keys/tpm2key.asn1 b/security/keys/trusted-keys/tpm2key.asn1
new file mode 100644
index 000000000..f57f869ad
--- /dev/null
+++ b/security/keys/trusted-keys/tpm2key.asn1
@@ -0,0 +1,11 @@
+---
+--- ASN.1 for TPM 2.0 keys
+---
+
+TPMKey ::= SEQUENCE {
+ type OBJECT IDENTIFIER ({tpm2_key_type}),
+ emptyAuth [0] EXPLICIT BOOLEAN OPTIONAL,
+ parent INTEGER ({tpm2_key_parent}),
+ pubkey OCTET STRING ({tpm2_key_pub}),
+ privkey OCTET STRING ({tpm2_key_priv})
+ }
diff --git a/security/keys/trusted-keys/trusted_caam.c b/security/keys/trusted-keys/trusted_caam.c
new file mode 100644
index 000000000..e3415c520
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_caam.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+#include <keys/trusted_caam.h>
+#include <keys/trusted-type.h>
+#include <linux/build_bug.h>
+#include <linux/key-type.h>
+#include <soc/fsl/caam-blob.h>
+
+static struct caam_blob_priv *blobifier;
+
+#define KEYMOD "SECURE_KEY"
+
+static_assert(MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD <= CAAM_BLOB_MAX_LEN);
+static_assert(MAX_BLOB_SIZE <= CAAM_BLOB_MAX_LEN);
+
+static int trusted_caam_seal(struct trusted_key_payload *p, char *datablob)
+{
+ int ret;
+ struct caam_blob_info info = {
+ .input = p->key, .input_len = p->key_len,
+ .output = p->blob, .output_len = MAX_BLOB_SIZE,
+ .key_mod = KEYMOD, .key_mod_len = sizeof(KEYMOD) - 1,
+ };
+
+ ret = caam_encap_blob(blobifier, &info);
+ if (ret)
+ return ret;
+
+ p->blob_len = info.output_len;
+ return 0;
+}
+
+static int trusted_caam_unseal(struct trusted_key_payload *p, char *datablob)
+{
+ int ret;
+ struct caam_blob_info info = {
+ .input = p->blob, .input_len = p->blob_len,
+ .output = p->key, .output_len = MAX_KEY_SIZE,
+ .key_mod = KEYMOD, .key_mod_len = sizeof(KEYMOD) - 1,
+ };
+
+ ret = caam_decap_blob(blobifier, &info);
+ if (ret)
+ return ret;
+
+ p->key_len = info.output_len;
+ return 0;
+}
+
+static int trusted_caam_init(void)
+{
+ int ret;
+
+ blobifier = caam_blob_gen_init();
+ if (IS_ERR(blobifier))
+ return PTR_ERR(blobifier);
+
+ ret = register_key_type(&key_type_trusted);
+ if (ret)
+ caam_blob_gen_exit(blobifier);
+
+ return ret;
+}
+
+static void trusted_caam_exit(void)
+{
+ unregister_key_type(&key_type_trusted);
+ caam_blob_gen_exit(blobifier);
+}
+
+struct trusted_key_ops trusted_key_caam_ops = {
+ .migratable = 0, /* non-migratable */
+ .init = trusted_caam_init,
+ .seal = trusted_caam_seal,
+ .unseal = trusted_caam_unseal,
+ .exit = trusted_caam_exit,
+};
diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
new file mode 100644
index 000000000..fee1ab2c7
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_core.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (c) 2019-2021, Linaro Limited
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/trusted_tee.h>
+#include <keys/trusted_caam.h>
+#include <keys/trusted_tpm.h>
+#include <linux/capability.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/key-type.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/static_call.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+static char *trusted_rng = "default";
+module_param_named(rng, trusted_rng, charp, 0);
+MODULE_PARM_DESC(rng, "Select trusted key RNG");
+
+static char *trusted_key_source;
+module_param_named(source, trusted_key_source, charp, 0);
+MODULE_PARM_DESC(source, "Select trusted keys source (tpm, tee or caam)");
+
+static const struct trusted_key_source trusted_key_sources[] = {
+#if defined(CONFIG_TRUSTED_KEYS_TPM)
+ { "tpm", &trusted_key_tpm_ops },
+#endif
+#if defined(CONFIG_TRUSTED_KEYS_TEE)
+ { "tee", &trusted_key_tee_ops },
+#endif
+#if defined(CONFIG_TRUSTED_KEYS_CAAM)
+ { "caam", &trusted_key_caam_ops },
+#endif
+};
+
+DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal);
+DEFINE_STATIC_CALL_NULL(trusted_key_unseal,
+ *trusted_key_sources[0].ops->unseal);
+DEFINE_STATIC_CALL_NULL(trusted_key_get_random,
+ *trusted_key_sources[0].ops->get_random);
+static void (*trusted_key_exit)(void);
+static unsigned char migratable;
+
+enum {
+ Opt_err,
+ Opt_new, Opt_load, Opt_update,
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_err, NULL}
+};
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ * payload structure
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char **datablob, struct trusted_key_payload *p)
+{
+ substring_t args[MAX_OPT_ARGS];
+ long keylen;
+ int ret = -EINVAL;
+ int key_cmd;
+ char *c;
+
+ /* main command */
+ c = strsep(datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ key_cmd = match_token(c, key_tokens, args);
+ switch (key_cmd) {
+ case Opt_new:
+ /* first argument is key size */
+ c = strsep(datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ ret = kstrtol(c, 10, &keylen);
+ if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+ return -EINVAL;
+ p->key_len = keylen;
+ ret = Opt_new;
+ break;
+ case Opt_load:
+ /* first argument is sealed blob */
+ c = strsep(datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ p->blob_len = strlen(c) / 2;
+ if (p->blob_len > MAX_BLOB_SIZE)
+ return -EINVAL;
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
+ ret = Opt_load;
+ break;
+ case Opt_update:
+ ret = Opt_update;
+ break;
+ case Opt_err:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+ struct trusted_key_payload *p = NULL;
+ int ret;
+
+ ret = key_payload_reserve(key, sizeof(*p));
+ if (ret < 0)
+ goto err;
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto err;
+
+ p->migratable = migratable;
+err:
+ return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *payload = NULL;
+ size_t datalen = prep->datalen;
+ char *datablob, *orig_datablob;
+ int ret = 0;
+ int key_cmd;
+ size_t key_len;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+
+ payload = trusted_payload_alloc(key);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key_cmd = datablob_parse(&datablob, payload);
+ if (key_cmd < 0) {
+ ret = key_cmd;
+ goto out;
+ }
+
+ dump_payload(payload);
+
+ switch (key_cmd) {
+ case Opt_load:
+ ret = static_call(trusted_key_unseal)(payload, datablob);
+ dump_payload(payload);
+ if (ret < 0)
+ pr_info("key_unseal failed (%d)\n", ret);
+ break;
+ case Opt_new:
+ key_len = payload->key_len;
+ ret = static_call(trusted_key_get_random)(payload->key,
+ key_len);
+ if (ret < 0)
+ goto out;
+
+ if (ret != key_len) {
+ pr_info("key_create failed (%d)\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = static_call(trusted_key_seal)(payload, datablob);
+ if (ret < 0)
+ pr_info("key_seal failed (%d)\n", ret);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+out:
+ kfree_sensitive(orig_datablob);
+ if (!ret)
+ rcu_assign_keypointer(key, payload);
+ else
+ kfree_sensitive(payload);
+ return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+ struct trusted_key_payload *p;
+
+ p = container_of(rcu, struct trusted_key_payload, rcu);
+ kfree_sensitive(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *p;
+ struct trusted_key_payload *new_p;
+ size_t datalen = prep->datalen;
+ char *datablob, *orig_datablob;
+ int ret = 0;
+
+ if (key_is_negative(key))
+ return -ENOKEY;
+ p = key->payload.data[0];
+ if (!p->migratable)
+ return -EPERM;
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+
+ new_p = trusted_payload_alloc(key);
+ if (!new_p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+ ret = datablob_parse(&datablob, new_p);
+ if (ret != Opt_update) {
+ ret = -EINVAL;
+ kfree_sensitive(new_p);
+ goto out;
+ }
+
+ /* copy old key values, and reseal with new pcrs */
+ new_p->migratable = p->migratable;
+ new_p->key_len = p->key_len;
+ memcpy(new_p->key, p->key, p->key_len);
+ dump_payload(p);
+ dump_payload(new_p);
+
+ ret = static_call(trusted_key_seal)(new_p, datablob);
+ if (ret < 0) {
+ pr_info("key_seal failed (%d)\n", ret);
+ kfree_sensitive(new_p);
+ goto out;
+ }
+
+ rcu_assign_keypointer(key, new_p);
+ call_rcu(&p->rcu, trusted_rcu_free);
+out:
+ kfree_sensitive(orig_datablob);
+ return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char *buffer,
+ size_t buflen)
+{
+ const struct trusted_key_payload *p;
+ char *bufp;
+ int i;
+
+ p = dereference_key_locked(key);
+ if (!p)
+ return -EINVAL;
+
+ if (buffer && buflen >= 2 * p->blob_len) {
+ bufp = buffer;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ }
+ return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - clear and free the key's payload
+ */
+static void trusted_destroy(struct key *key)
+{
+ kfree_sensitive(key->payload.data[0]);
+}
+
+struct key_type key_type_trusted = {
+ .name = "trusted",
+ .instantiate = trusted_instantiate,
+ .update = trusted_update,
+ .destroy = trusted_destroy,
+ .describe = user_describe,
+ .read = trusted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static int kernel_get_random(unsigned char *key, size_t key_len)
+{
+ return get_random_bytes_wait(key, key_len) ?: key_len;
+}
+
+static int __init init_trusted(void)
+{
+ int (*get_random)(unsigned char *key, size_t key_len);
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(trusted_key_sources); i++) {
+ if (trusted_key_source &&
+ strncmp(trusted_key_source, trusted_key_sources[i].name,
+ strlen(trusted_key_sources[i].name)))
+ continue;
+
+ /*
+ * We always support trusted.rng="kernel" and "default" as
+ * well as trusted.rng=$trusted.source if the trust source
+ * defines its own get_random callback.
+ */
+ get_random = trusted_key_sources[i].ops->get_random;
+ if (trusted_rng && strcmp(trusted_rng, "default")) {
+ if (!strcmp(trusted_rng, "kernel")) {
+ get_random = kernel_get_random;
+ } else if (strcmp(trusted_rng, trusted_key_sources[i].name) ||
+ !get_random) {
+ pr_warn("Unsupported RNG. Supported: kernel");
+ if (get_random)
+ pr_cont(", %s", trusted_key_sources[i].name);
+ pr_cont(", default\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!get_random)
+ get_random = kernel_get_random;
+
+ ret = trusted_key_sources[i].ops->init();
+ if (!ret) {
+ static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
+ static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
+ static_call_update(trusted_key_get_random, get_random);
+
+ trusted_key_exit = trusted_key_sources[i].ops->exit;
+ migratable = trusted_key_sources[i].ops->migratable;
+ }
+
+ if (!ret || ret != -ENODEV)
+ break;
+ }
+
+ /*
+ * encrypted_keys.ko depends on successful load of this module even if
+ * trusted key implementation is not found.
+ */
+ if (ret == -ENODEV)
+ return 0;
+
+ return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+ if (trusted_key_exit)
+ (*trusted_key_exit)();
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
new file mode 100644
index 000000000..24f67ca8d
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tee.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ *
+ * Author:
+ * Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <keys/trusted_tee.h>
+
+#define DRIVER_NAME "trusted-key-tee"
+
+/*
+ * Get random data for symmetric key
+ *
+ * [out] memref[0] Random data
+ */
+#define TA_CMD_GET_RANDOM 0x0
+
+/*
+ * Seal trusted key using hardware unique key
+ *
+ * [in] memref[0] Plain key
+ * [out] memref[1] Sealed key datablob
+ */
+#define TA_CMD_SEAL 0x1
+
+/*
+ * Unseal trusted key using hardware unique key
+ *
+ * [in] memref[0] Sealed key datablob
+ * [out] memref[1] Plain key
+ */
+#define TA_CMD_UNSEAL 0x2
+
+/**
+ * struct trusted_key_tee_private - TEE Trusted key private data
+ * @dev: TEE based Trusted key device.
+ * @ctx: TEE context handler.
+ * @session_id: Trusted key TA session identifier.
+ * @shm_pool: Memory pool shared with TEE device.
+ */
+struct trusted_key_tee_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *shm_pool;
+};
+
+static struct trusted_key_tee_private pvt_data;
+
+/*
+ * Have the TEE seal(encrypt) the symmetric key
+ */
+static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+{
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+ sizeof(p->key) + sizeof(p->blob));
+ if (IS_ERR(reg_shm)) {
+ dev_err(pvt_data.dev, "shm register failed\n");
+ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_SEAL;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->key_len;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->blob);
+ param[1].u.memref.shm_offs = sizeof(p->key);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data.dev, "TA_CMD_SEAL invoke err: %x\n",
+ inv_arg.ret);
+ ret = -EFAULT;
+ } else {
+ p->blob_len = param[1].u.memref.size;
+ }
+
+ tee_shm_free(reg_shm);
+
+ return ret;
+}
+
+/*
+ * Have the TEE unseal(decrypt) the symmetric key
+ */
+static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+{
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+ sizeof(p->key) + sizeof(p->blob));
+ if (IS_ERR(reg_shm)) {
+ dev_err(pvt_data.dev, "shm register failed\n");
+ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_UNSEAL;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->blob_len;
+ param[0].u.memref.shm_offs = sizeof(p->key);
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->key);
+ param[1].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data.dev, "TA_CMD_UNSEAL invoke err: %x\n",
+ inv_arg.ret);
+ ret = -EFAULT;
+ } else {
+ p->key_len = param[1].u.memref.size;
+ }
+
+ tee_shm_free(reg_shm);
+
+ return ret;
+}
+
+/*
+ * Have the TEE generate random symmetric key
+ */
+static int trusted_tee_get_random(unsigned char *key, size_t key_len)
+{
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, key, key_len);
+ if (IS_ERR(reg_shm)) {
+ dev_err(pvt_data.dev, "key shm register failed\n");
+ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_GET_RANDOM;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = key_len;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data.dev, "TA_CMD_GET_RANDOM invoke err: %x\n",
+ inv_arg.ret);
+ ret = -EFAULT;
+ } else {
+ ret = param[0].u.memref.size;
+ }
+
+ tee_shm_free(reg_shm);
+
+ return ret;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int trusted_key_probe(struct device *dev)
+{
+ struct tee_client_device *rng_device = to_tee_client_device(dev);
+ int ret;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((ret < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ ret = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ ret = register_key_type(&key_type_trusted);
+ if (ret < 0)
+ goto out_sess;
+
+ pvt_data.dev = dev;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return ret;
+}
+
+static int trusted_key_remove(struct device *dev)
+{
+ unregister_key_type(&key_type_trusted);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id trusted_key_id_table[] = {
+ {UUID_INIT(0xf04a0fe7, 0x1f5d, 0x4b9b,
+ 0xab, 0xf7, 0x61, 0x9b, 0x85, 0xb4, 0xce, 0x8c)},
+ {}
+};
+MODULE_DEVICE_TABLE(tee, trusted_key_id_table);
+
+static struct tee_client_driver trusted_key_driver = {
+ .id_table = trusted_key_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .bus = &tee_bus_type,
+ .probe = trusted_key_probe,
+ .remove = trusted_key_remove,
+ },
+};
+
+static int trusted_tee_init(void)
+{
+ return driver_register(&trusted_key_driver.driver);
+}
+
+static void trusted_tee_exit(void)
+{
+ driver_unregister(&trusted_key_driver.driver);
+}
+
+struct trusted_key_ops trusted_key_tee_ops = {
+ .migratable = 0, /* non-migratable */
+ .init = trusted_tee_init,
+ .seal = trusted_tee_seal,
+ .unseal = trusted_tee_unseal,
+ .get_random = trusted_tee_get_random,
+ .exit = trusted_tee_exit,
+};
diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
new file mode 100644
index 000000000..aa108bea6
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (c) 2019-2021, Linaro Limited
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <crypto/hash_info.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+#include <linux/key-type.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted_tpm.h>
+
+static const char hmac_alg[] = "hmac(sha1)";
+static const char hash_alg[] = "sha1";
+static struct tpm_chip *chip;
+static struct tpm_digest *digests;
+
+struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+static struct sdesc *init_sdesc(struct crypto_shash *alg)
+{
+ struct sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return ERR_PTR(-ENOMEM);
+ sdesc->shash.tfm = alg;
+ return sdesc;
+}
+
+static int TSS_sha1(const unsigned char *data, unsigned int datalen,
+ unsigned char *digest)
+{
+ struct sdesc *sdesc;
+ int ret;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+ kfree_sensitive(sdesc);
+ return ret;
+}
+
+static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, ...)
+{
+ struct sdesc *sdesc;
+ va_list argp;
+ unsigned int dlen;
+ unsigned char *data;
+ int ret;
+
+ sdesc = init_sdesc(hmacalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("can't alloc %s\n", hmac_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_setkey(hmacalg, key, keylen);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+
+ va_start(argp, keylen);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ data = va_arg(argp, unsigned char *);
+ if (data == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, digest);
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+
+/*
+ * calculate authorization info fields to send to TPM
+ */
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, unsigned char *h1,
+ unsigned char *h2, unsigned int h3, ...)
+{
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned char *data;
+ unsigned char c;
+ int ret;
+ va_list argp;
+
+ if (!chip)
+ return -ENODEV;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ c = !!h3;
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ va_start(argp, h3);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ data = va_arg(argp, unsigned char *);
+ if (!data) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (!ret)
+ ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, h1,
+ TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(TSS_authhmac);
+
+/*
+ * verify the AUTH1_COMMAND (Seal) result from TPM
+ */
+int TSS_checkhmac1(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key,
+ unsigned int keylen, ...)
+{
+ uint32_t bufsize;
+ uint16_t tag;
+ uint32_t ordinal;
+ uint32_t result;
+ unsigned char *enonce;
+ unsigned char *continueflag;
+ unsigned char *authdata;
+ unsigned char testhmac[SHA1_DIGEST_SIZE];
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned int dpos;
+ va_list argp;
+ int ret;
+
+ if (!chip)
+ return -ENODEV;
+
+ bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+ tag = LOAD16(buffer, 0);
+ ordinal = command;
+ result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+ if (tag == TPM_TAG_RSP_COMMAND)
+ return 0;
+ if (tag != TPM_TAG_RSP_AUTH1_COMMAND)
+ return -EINVAL;
+ authdata = buffer + bufsize - SHA1_DIGEST_SIZE;
+ continueflag = authdata - 1;
+ enonce = continueflag - TPM_NONCE_SIZE;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+ sizeof result);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+ sizeof ordinal);
+ if (ret < 0)
+ goto out;
+ va_start(argp, keylen);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ dpos = va_arg(argp, unsigned int);
+ ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (ret < 0)
+ goto out;
+
+ ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest,
+ TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce,
+ 1, continueflag, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
+ ret = -EINVAL;
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(TSS_checkhmac1);
+
+/*
+ * verify the AUTH2_COMMAND (unseal) result from TPM
+ */
+static int TSS_checkhmac2(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key1,
+ unsigned int keylen1,
+ const unsigned char *key2,
+ unsigned int keylen2, ...)
+{
+ uint32_t bufsize;
+ uint16_t tag;
+ uint32_t ordinal;
+ uint32_t result;
+ unsigned char *enonce1;
+ unsigned char *continueflag1;
+ unsigned char *authdata1;
+ unsigned char *enonce2;
+ unsigned char *continueflag2;
+ unsigned char *authdata2;
+ unsigned char testhmac1[SHA1_DIGEST_SIZE];
+ unsigned char testhmac2[SHA1_DIGEST_SIZE];
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned int dpos;
+ va_list argp;
+ int ret;
+
+ bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+ tag = LOAD16(buffer, 0);
+ ordinal = command;
+ result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+
+ if (tag == TPM_TAG_RSP_COMMAND)
+ return 0;
+ if (tag != TPM_TAG_RSP_AUTH2_COMMAND)
+ return -EINVAL;
+ authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1
+ + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE);
+ authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE);
+ continueflag1 = authdata1 - 1;
+ continueflag2 = authdata2 - 1;
+ enonce1 = continueflag1 - TPM_NONCE_SIZE;
+ enonce2 = continueflag2 - TPM_NONCE_SIZE;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+ sizeof result);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+ sizeof ordinal);
+ if (ret < 0)
+ goto out;
+
+ va_start(argp, keylen2);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ dpos = va_arg(argp, unsigned int);
+ ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (ret < 0)
+ goto out;
+
+ ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, enonce1,
+ TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, enonce2,
+ TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+ ret = -EINVAL;
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+
+/*
+ * For key specific tpm requests, we will generate and send our
+ * own TPM command packets using the drivers send function.
+ */
+int trusted_tpm_send(unsigned char *cmd, size_t buflen)
+{
+ int rc;
+
+ if (!chip)
+ return -ENODEV;
+
+ dump_tpm_buf(cmd);
+ rc = tpm_send(chip, cmd, buflen);
+ dump_tpm_buf(cmd);
+ if (rc > 0)
+ /* Can't return positive return codes values to keyctl */
+ rc = -EPERM;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(trusted_tpm_send);
+
+/*
+ * Lock a trusted key, by extending a selected PCR.
+ *
+ * Prevents a trusted key that is sealed to PCRs from being accessed.
+ * This uses the tpm driver's extend function.
+ */
+static int pcrlock(const int pcrnum)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return tpm_pcr_extend(chip, pcrnum, digests) ? -EINVAL : 0;
+}
+
+/*
+ * Create an object specific authorisation protocol (OSAP) session
+ */
+static int osap(struct tpm_buf *tb, struct osapsess *s,
+ const unsigned char *key, uint16_t type, uint32_t handle)
+{
+ unsigned char enonce[TPM_NONCE_SIZE];
+ unsigned char ononce[TPM_NONCE_SIZE];
+ int ret;
+
+ ret = tpm_get_random(chip, ononce, TPM_NONCE_SIZE);
+ if (ret < 0)
+ return ret;
+
+ if (ret != TPM_NONCE_SIZE)
+ return -EIO;
+
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ tpm_buf_append_u16(tb, type);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append(tb, ononce, TPM_NONCE_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+
+ s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
+ TPM_NONCE_SIZE);
+ memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
+ TPM_NONCE_SIZE]), TPM_NONCE_SIZE);
+ return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE,
+ enonce, TPM_NONCE_SIZE, ononce, 0, 0);
+}
+
+/*
+ * Create an object independent authorisation protocol (oiap) session
+ */
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+{
+ int ret;
+
+ if (!chip)
+ return -ENODEV;
+
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OIAP);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+
+ *handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
+ TPM_NONCE_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(oiap);
+
+struct tpm_digests {
+ unsigned char encauth[SHA1_DIGEST_SIZE];
+ unsigned char pubauth[SHA1_DIGEST_SIZE];
+ unsigned char xorwork[SHA1_DIGEST_SIZE * 2];
+ unsigned char xorhash[SHA1_DIGEST_SIZE];
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+};
+
+/*
+ * Have the TPM seal(encrypt) the trusted key, possibly based on
+ * Platform Configuration Registers (PCRs). AUTH1 for sealing key.
+ */
+static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+ uint32_t keyhandle, const unsigned char *keyauth,
+ const unsigned char *data, uint32_t datalen,
+ unsigned char *blob, uint32_t *bloblen,
+ const unsigned char *blobauth,
+ const unsigned char *pcrinfo, uint32_t pcrinfosize)
+{
+ struct osapsess sess;
+ struct tpm_digests *td;
+ unsigned char cont;
+ uint32_t ordinal;
+ uint32_t pcrsize;
+ uint32_t datsize;
+ int sealinfosize;
+ int encdatasize;
+ int storedsize;
+ int ret;
+ int i;
+
+ /* alloc some work space for all the hashes */
+ td = kmalloc(sizeof *td, GFP_KERNEL);
+ if (!td)
+ return -ENOMEM;
+
+ /* get session for sealing key */
+ ret = osap(tb, &sess, keyauth, keytype, keyhandle);
+ if (ret < 0)
+ goto out;
+ dump_sess(&sess);
+
+ /* calculate encrypted authorization value */
+ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE);
+ memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
+ ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
+ if (ret < 0)
+ goto out;
+
+ ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0)
+ goto out;
+
+ if (ret != TPM_NONCE_SIZE) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ordinal = htonl(TPM_ORD_SEAL);
+ datsize = htonl(datalen);
+ pcrsize = htonl(pcrinfosize);
+ cont = 0;
+
+ /* encrypt data authorization key */
+ for (i = 0; i < SHA1_DIGEST_SIZE; ++i)
+ td->encauth[i] = td->xorhash[i] ^ blobauth[i];
+
+ /* calculate authorization HMAC value */
+ if (pcrinfosize == 0) {
+ /* no pcr info specified */
+ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+ sess.enonce, td->nonceodd, cont,
+ sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+ td->encauth, sizeof(uint32_t), &pcrsize,
+ sizeof(uint32_t), &datsize, datalen, data, 0,
+ 0);
+ } else {
+ /* pcr info specified */
+ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+ sess.enonce, td->nonceodd, cont,
+ sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+ td->encauth, sizeof(uint32_t), &pcrsize,
+ pcrinfosize, pcrinfo, sizeof(uint32_t),
+ &datsize, datalen, data, 0, 0);
+ }
+ if (ret < 0)
+ goto out;
+
+ /* build and send the TPM request packet */
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, td->encauth, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, pcrinfosize);
+ tpm_buf_append(tb, pcrinfo, pcrinfosize);
+ tpm_buf_append_u32(tb, datalen);
+ tpm_buf_append(tb, data, datalen);
+ tpm_buf_append_u32(tb, sess.handle);
+ tpm_buf_append(tb, td->nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, td->pubauth, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ goto out;
+
+ /* calculate the size of the returned Blob */
+ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
+ encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
+ sizeof(uint32_t) + sealinfosize);
+ storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize +
+ sizeof(uint32_t) + encdatasize;
+
+ /* check the HMAC in the response */
+ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
+ SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0,
+ 0);
+
+ /* copy the returned blob to caller */
+ if (!ret) {
+ memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
+ *bloblen = storedsize;
+ }
+out:
+ kfree_sensitive(td);
+ return ret;
+}
+
+/*
+ * use the AUTH2_COMMAND form of unseal, to authorize both key and blob
+ */
+static int tpm_unseal(struct tpm_buf *tb,
+ uint32_t keyhandle, const unsigned char *keyauth,
+ const unsigned char *blob, int bloblen,
+ const unsigned char *blobauth,
+ unsigned char *data, unsigned int *datalen)
+{
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+ unsigned char enonce1[TPM_NONCE_SIZE];
+ unsigned char enonce2[TPM_NONCE_SIZE];
+ unsigned char authdata1[SHA1_DIGEST_SIZE];
+ unsigned char authdata2[SHA1_DIGEST_SIZE];
+ uint32_t authhandle1 = 0;
+ uint32_t authhandle2 = 0;
+ unsigned char cont = 0;
+ uint32_t ordinal;
+ int ret;
+
+ /* sessions for unsealing key and data */
+ ret = oiap(tb, &authhandle1, enonce1);
+ if (ret < 0) {
+ pr_info("oiap failed (%d)\n", ret);
+ return ret;
+ }
+ ret = oiap(tb, &authhandle2, enonce2);
+ if (ret < 0) {
+ pr_info("oiap failed (%d)\n", ret);
+ return ret;
+ }
+
+ ordinal = htonl(TPM_ORD_UNSEAL);
+ ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0)
+ return ret;
+
+ if (ret != TPM_NONCE_SIZE) {
+ pr_info("tpm_get_random failed (%d)\n", ret);
+ return -EIO;
+ }
+ ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
+ enonce1, nonceodd, cont, sizeof(uint32_t),
+ &ordinal, bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE,
+ enonce2, nonceodd, cont, sizeof(uint32_t),
+ &ordinal, bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* build and send TPM request packet */
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH2_COMMAND, TPM_ORD_UNSEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle1);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata1, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, authhandle2);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata2, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0) {
+ pr_info("authhmac failed (%d)\n", ret);
+ return ret;
+ }
+
+ *datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+ ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
+ keyauth, SHA1_DIGEST_SIZE,
+ blobauth, SHA1_DIGEST_SIZE,
+ sizeof(uint32_t), TPM_DATA_OFFSET,
+ *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
+ 0);
+ if (ret < 0) {
+ pr_info("TSS_checkhmac2 failed (%d)\n", ret);
+ return ret;
+ }
+ memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
+ return 0;
+}
+
+/*
+ * Have the TPM seal(encrypt) the symmetric key
+ */
+static int key_seal(struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ struct tpm_buf tb;
+ int ret;
+
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
+
+ /* include migratable flag at end of sealed key */
+ p->key[p->key_len] = p->migratable;
+
+ ret = tpm_seal(&tb, o->keytype, o->keyhandle, o->keyauth,
+ p->key, p->key_len + 1, p->blob, &p->blob_len,
+ o->blobauth, o->pcrinfo, o->pcrinfo_len);
+ if (ret < 0)
+ pr_info("srkseal failed (%d)\n", ret);
+
+ tpm_buf_destroy(&tb);
+ return ret;
+}
+
+/*
+ * Have the TPM unseal(decrypt) the symmetric key
+ */
+static int key_unseal(struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ struct tpm_buf tb;
+ int ret;
+
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+ o->blobauth, p->key, &p->key_len);
+ if (ret < 0)
+ pr_info("srkunseal failed (%d)\n", ret);
+ else
+ /* pull migratable flag out of sealed key */
+ p->migratable = p->key[--p->key_len];
+
+ tpm_buf_destroy(&tb);
+ return ret;
+}
+
+enum {
+ Opt_err,
+ Opt_keyhandle, Opt_keyauth, Opt_blobauth,
+ Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
+ Opt_hash,
+ Opt_policydigest,
+ Opt_policyhandle,
+};
+
+static const match_table_t key_tokens = {
+ {Opt_keyhandle, "keyhandle=%s"},
+ {Opt_keyauth, "keyauth=%s"},
+ {Opt_blobauth, "blobauth=%s"},
+ {Opt_pcrinfo, "pcrinfo=%s"},
+ {Opt_pcrlock, "pcrlock=%s"},
+ {Opt_migratable, "migratable=%s"},
+ {Opt_hash, "hash=%s"},
+ {Opt_policydigest, "policydigest=%s"},
+ {Opt_policyhandle, "policyhandle=%s"},
+ {Opt_err, NULL}
+};
+
+/* can have zero or more token= options */
+static int getoptions(char *c, struct trusted_key_payload *pay,
+ struct trusted_key_options *opt)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p = c;
+ int token;
+ int res;
+ unsigned long handle;
+ unsigned long lock;
+ unsigned long token_mask = 0;
+ unsigned int digest_len;
+ int i;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return tpm2;
+
+ opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
+
+ if (!c)
+ return 0;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, key_tokens, args);
+ if (test_and_set_bit(token, &token_mask))
+ return -EINVAL;
+
+ switch (token) {
+ case Opt_pcrinfo:
+ opt->pcrinfo_len = strlen(args[0].from) / 2;
+ if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->pcrinfo, args[0].from,
+ opt->pcrinfo_len);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_keyhandle:
+ res = kstrtoul(args[0].from, 16, &handle);
+ if (res < 0)
+ return -EINVAL;
+ opt->keytype = SEAL_keytype;
+ opt->keyhandle = handle;
+ break;
+ case Opt_keyauth:
+ if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->keyauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_blobauth:
+ /*
+ * TPM 1.2 authorizations are sha1 hashes passed in as
+ * hex strings. TPM 2.0 authorizations are simple
+ * passwords (although it can take a hash as well)
+ */
+ opt->blobauth_len = strlen(args[0].from);
+
+ if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
+ res = hex2bin(opt->blobauth, args[0].from,
+ TPM_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+
+ opt->blobauth_len = TPM_DIGEST_SIZE;
+ break;
+ }
+
+ if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
+ memcpy(opt->blobauth, args[0].from,
+ opt->blobauth_len);
+ break;
+ }
+
+ return -EINVAL;
+
+ break;
+
+ case Opt_migratable:
+ if (*args[0].from == '0')
+ pay->migratable = 0;
+ else if (*args[0].from != '1')
+ return -EINVAL;
+ break;
+ case Opt_pcrlock:
+ res = kstrtoul(args[0].from, 10, &lock);
+ if (res < 0)
+ return -EINVAL;
+ opt->pcrlock = lock;
+ break;
+ case Opt_hash:
+ if (test_bit(Opt_policydigest, &token_mask))
+ return -EINVAL;
+ for (i = 0; i < HASH_ALGO__LAST; i++) {
+ if (!strcmp(args[0].from, hash_algo_name[i])) {
+ opt->hash = i;
+ break;
+ }
+ }
+ if (i == HASH_ALGO__LAST)
+ return -EINVAL;
+ if (!tpm2 && i != HASH_ALGO_SHA1) {
+ pr_info("TPM 1.x only supports SHA-1.\n");
+ return -EINVAL;
+ }
+ break;
+ case Opt_policydigest:
+ digest_len = hash_digest_size[opt->hash];
+ if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
+ return -EINVAL;
+ res = hex2bin(opt->policydigest, args[0].from,
+ digest_len);
+ if (res < 0)
+ return -EINVAL;
+ opt->policydigest_len = digest_len;
+ break;
+ case Opt_policyhandle:
+ if (!tpm2)
+ return -EINVAL;
+ res = kstrtoul(args[0].from, 16, &handle);
+ if (res < 0)
+ return -EINVAL;
+ opt->policyhandle = handle;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct trusted_key_options *trusted_options_alloc(void)
+{
+ struct trusted_key_options *options;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return NULL;
+
+ options = kzalloc(sizeof *options, GFP_KERNEL);
+ if (options) {
+ /* set any non-zero defaults */
+ options->keytype = SRK_keytype;
+
+ if (!tpm2)
+ options->keyhandle = SRKHANDLE;
+ }
+ return options;
+}
+
+static int trusted_tpm_seal(struct trusted_key_payload *p, char *datablob)
+{
+ struct trusted_key_options *options = NULL;
+ int ret = 0;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return tpm2;
+
+ options = trusted_options_alloc();
+ if (!options)
+ return -ENOMEM;
+
+ ret = getoptions(datablob, p, options);
+ if (ret < 0)
+ goto out;
+ dump_options(options);
+
+ if (!options->keyhandle && !tpm2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tpm2)
+ ret = tpm2_seal_trusted(chip, p, options);
+ else
+ ret = key_seal(p, options);
+ if (ret < 0) {
+ pr_info("key_seal failed (%d)\n", ret);
+ goto out;
+ }
+
+ if (options->pcrlock) {
+ ret = pcrlock(options->pcrlock);
+ if (ret < 0) {
+ pr_info("pcrlock failed (%d)\n", ret);
+ goto out;
+ }
+ }
+out:
+ kfree_sensitive(options);
+ return ret;
+}
+
+static int trusted_tpm_unseal(struct trusted_key_payload *p, char *datablob)
+{
+ struct trusted_key_options *options = NULL;
+ int ret = 0;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return tpm2;
+
+ options = trusted_options_alloc();
+ if (!options)
+ return -ENOMEM;
+
+ ret = getoptions(datablob, p, options);
+ if (ret < 0)
+ goto out;
+ dump_options(options);
+
+ if (!options->keyhandle && !tpm2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tpm2)
+ ret = tpm2_unseal_trusted(chip, p, options);
+ else
+ ret = key_unseal(p, options);
+ if (ret < 0)
+ pr_info("key_unseal failed (%d)\n", ret);
+
+ if (options->pcrlock) {
+ ret = pcrlock(options->pcrlock);
+ if (ret < 0) {
+ pr_info("pcrlock failed (%d)\n", ret);
+ goto out;
+ }
+ }
+out:
+ kfree_sensitive(options);
+ return ret;
+}
+
+static int trusted_tpm_get_random(unsigned char *key, size_t key_len)
+{
+ return tpm_get_random(chip, key, key_len);
+}
+
+static void trusted_shash_release(void)
+{
+ if (hashalg)
+ crypto_free_shash(hashalg);
+ if (hmacalg)
+ crypto_free_shash(hmacalg);
+}
+
+static int __init trusted_shash_alloc(void)
+{
+ int ret;
+
+ hmacalg = crypto_alloc_shash(hmac_alg, 0, 0);
+ if (IS_ERR(hmacalg)) {
+ pr_info("could not allocate crypto %s\n",
+ hmac_alg);
+ return PTR_ERR(hmacalg);
+ }
+
+ hashalg = crypto_alloc_shash(hash_alg, 0, 0);
+ if (IS_ERR(hashalg)) {
+ pr_info("could not allocate crypto %s\n",
+ hash_alg);
+ ret = PTR_ERR(hashalg);
+ goto hashalg_fail;
+ }
+
+ return 0;
+
+hashalg_fail:
+ crypto_free_shash(hmacalg);
+ return ret;
+}
+
+static int __init init_digests(void)
+{
+ int i;
+
+ digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
+ GFP_KERNEL);
+ if (!digests)
+ return -ENOMEM;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ digests[i].alg_id = chip->allocated_banks[i].alg_id;
+
+ return 0;
+}
+
+static int __init trusted_tpm_init(void)
+{
+ int ret;
+
+ chip = tpm_default_chip();
+ if (!chip)
+ return -ENODEV;
+
+ ret = init_digests();
+ if (ret < 0)
+ goto err_put;
+ ret = trusted_shash_alloc();
+ if (ret < 0)
+ goto err_free;
+ ret = register_key_type(&key_type_trusted);
+ if (ret < 0)
+ goto err_release;
+ return 0;
+err_release:
+ trusted_shash_release();
+err_free:
+ kfree(digests);
+err_put:
+ put_device(&chip->dev);
+ return ret;
+}
+
+static void trusted_tpm_exit(void)
+{
+ if (chip) {
+ put_device(&chip->dev);
+ kfree(digests);
+ trusted_shash_release();
+ unregister_key_type(&key_type_trusted);
+ }
+}
+
+struct trusted_key_ops trusted_key_tpm_ops = {
+ .migratable = 1, /* migratable by default */
+ .init = trusted_tpm_init,
+ .seal = trusted_tpm_seal,
+ .unseal = trusted_tpm_unseal,
+ .get_random = trusted_tpm_get_random,
+ .exit = trusted_tpm_exit,
+};
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
new file mode 100644
index 000000000..bc700f85f
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ */
+
+#include <linux/asn1_encoder.h>
+#include <linux/oid_registry.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted-type.h>
+#include <keys/trusted_tpm.h>
+
+#include <asm/unaligned.h>
+
+#include "tpm2key.asn1.h"
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+static u32 tpm2key_oid[] = { 2, 23, 133, 10, 1, 5 };
+
+static int tpm2_key_encode(struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u8 *src, u32 len)
+{
+ const int SCRATCH_SIZE = PAGE_SIZE;
+ u8 *scratch = kmalloc(SCRATCH_SIZE, GFP_KERNEL);
+ u8 *work = scratch, *work1;
+ u8 *end_work = scratch + SCRATCH_SIZE;
+ u8 *priv, *pub;
+ u16 priv_len, pub_len;
+
+ priv_len = get_unaligned_be16(src) + 2;
+ priv = src;
+
+ src += priv_len;
+
+ pub_len = get_unaligned_be16(src) + 2;
+ pub = src;
+
+ if (!scratch)
+ return -ENOMEM;
+
+ work = asn1_encode_oid(work, end_work, tpm2key_oid,
+ asn1_oid_len(tpm2key_oid));
+
+ if (options->blobauth_len == 0) {
+ unsigned char bool[3], *w = bool;
+ /* tag 0 is emptyAuth */
+ w = asn1_encode_boolean(w, w + sizeof(bool), true);
+ if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+ return PTR_ERR(w);
+ work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+ }
+
+ /*
+ * Assume both octet strings will encode to a 2 byte definite length
+ *
+ * Note: For a well behaved TPM, this warning should never
+ * trigger, so if it does there's something nefarious going on
+ */
+ if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+ "BUG: scratch buffer is too small"))
+ return -EINVAL;
+
+ work = asn1_encode_integer(work, end_work, options->keyhandle);
+ work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+ work = asn1_encode_octet_string(work, end_work, priv, priv_len);
+
+ work1 = payload->blob;
+ work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+ scratch, work - scratch);
+ if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+ return PTR_ERR(work1);
+
+ return work1 - payload->blob;
+}
+
+struct tpm2_key_context {
+ u32 parent;
+ const u8 *pub;
+ u32 pub_len;
+ const u8 *priv;
+ u32 priv_len;
+};
+
+static int tpm2_key_decode(struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u8 **buf)
+{
+ int ret;
+ struct tpm2_key_context ctx;
+ u8 *blob;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ ret = asn1_ber_decoder(&tpm2key_decoder, &ctx, payload->blob,
+ payload->blob_len);
+ if (ret < 0)
+ return ret;
+
+ if (ctx.priv_len + ctx.pub_len > MAX_BLOB_SIZE)
+ return -EINVAL;
+
+ blob = kmalloc(ctx.priv_len + ctx.pub_len + 4, GFP_KERNEL);
+ if (!blob)
+ return -ENOMEM;
+
+ *buf = blob;
+ options->keyhandle = ctx.parent;
+
+ memcpy(blob, ctx.priv, ctx.priv_len);
+ blob += ctx.priv_len;
+
+ memcpy(blob, ctx.pub, ctx.pub_len);
+
+ return 0;
+}
+
+int tpm2_key_parent(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm2_key_context *ctx = context;
+ const u8 *v = value;
+ int i;
+
+ ctx->parent = 0;
+ for (i = 0; i < vlen; i++) {
+ ctx->parent <<= 8;
+ ctx->parent |= v[i];
+ }
+
+ return 0;
+}
+
+int tpm2_key_type(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ enum OID oid = look_up_OID(value, vlen);
+
+ if (oid != OID_TPMSealedData) {
+ char buffer[50];
+
+ sprint_oid(value, vlen, buffer, sizeof(buffer));
+ pr_debug("OID is \"%s\" which is not TPMSealedData\n",
+ buffer);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int tpm2_key_pub(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm2_key_context *ctx = context;
+
+ ctx->pub = value;
+ ctx->pub_len = vlen;
+
+ return 0;
+}
+
+int tpm2_key_priv(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm2_key_context *ctx = context;
+
+ ctx->priv = value;
+ ctx->priv_len = vlen;
+
+ return 0;
+}
+
+/**
+ * tpm2_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+ const u8 *nonce, u16 nonce_len,
+ u8 attributes,
+ const u8 *hmac, u16 hmac_len)
+{
+ tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+ tpm_buf_append_u32(buf, session_handle);
+ tpm_buf_append_u16(buf, nonce_len);
+
+ if (nonce && nonce_len)
+ tpm_buf_append(buf, nonce, nonce_len);
+
+ tpm_buf_append_u8(buf, attributes);
+ tpm_buf_append_u16(buf, hmac_len);
+
+ if (hmac && hmac_len)
+ tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ int blob_len = 0;
+ struct tpm_buf buf;
+ u32 hash;
+ u32 flags;
+ int i;
+ int rc;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ if (options->hash == tpm2_hash_map[i].crypto_id) {
+ hash = tpm2_hash_map[i].tpm_id;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(tpm2_hash_map))
+ return -EINVAL;
+
+ if (!options->keyhandle)
+ return -EINVAL;
+
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (rc) {
+ tpm_put_ops(chip);
+ return rc;
+ }
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ /* sensitive */
+ tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len);
+
+ tpm_buf_append_u16(&buf, options->blobauth_len);
+ if (options->blobauth_len)
+ tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
+
+ tpm_buf_append_u16(&buf, payload->key_len);
+ tpm_buf_append(&buf, payload->key, payload->key_len);
+
+ /* public */
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, hash);
+
+ /* key properties */
+ flags = 0;
+ flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH;
+ flags |= payload->migratable ? 0 : (TPM2_OA_FIXED_TPM |
+ TPM2_OA_FIXED_PARENT);
+ tpm_buf_append_u32(&buf, flags);
+
+ /* policy */
+ tpm_buf_append_u16(&buf, options->policydigest_len);
+ if (options->policydigest_len)
+ tpm_buf_append(&buf, options->policydigest,
+ options->policydigest_len);
+
+ /* public parameters */
+ tpm_buf_append_u16(&buf, TPM_ALG_NULL);
+ tpm_buf_append_u16(&buf, 0);
+
+ /* outside info */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR */
+ tpm_buf_append_u32(&buf, 0);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
+ if (rc)
+ goto out;
+
+ blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ if (blob_len > MAX_BLOB_SIZE) {
+ rc = -E2BIG;
+ goto out;
+ }
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ blob_len = tpm2_key_encode(payload, options,
+ &buf.data[TPM_HEADER_SIZE + 4],
+ blob_len);
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0) {
+ if (tpm2_rc_value(rc) == TPM2_RC_HASH)
+ rc = -EINVAL;
+ else
+ rc = -EPERM;
+ }
+ if (blob_len < 0)
+ rc = blob_len;
+ else
+ payload->blob_len = blob_len;
+
+ tpm_put_ops(chip);
+ return rc;
+}
+
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ *
+ * Return: 0 on success.
+ * -E2BIG on wrong payload size.
+ * -EPERM on tpm error status.
+ * < 0 error from tpm_send.
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle)
+{
+ struct tpm_buf buf;
+ unsigned int private_len;
+ unsigned int public_len;
+ unsigned int blob_len;
+ u8 *blob, *pub;
+ int rc;
+ u32 attrs;
+
+ rc = tpm2_key_decode(payload, options, &blob);
+ if (rc) {
+ /* old form */
+ blob = payload->blob;
+ payload->old_format = 1;
+ }
+
+ /* new format carries keyhandle but old format doesn't */
+ if (!options->keyhandle)
+ return -EINVAL;
+
+ /* must be big enough for at least the two be16 size counts */
+ if (payload->blob_len < 4)
+ return -EINVAL;
+
+ private_len = get_unaligned_be16(blob);
+
+ /* must be big enough for following public_len */
+ if (private_len + 2 + 2 > (payload->blob_len))
+ return -E2BIG;
+
+ public_len = get_unaligned_be16(blob + 2 + private_len);
+ if (private_len + 2 + public_len + 2 > payload->blob_len)
+ return -E2BIG;
+
+ pub = blob + 2 + private_len + 2;
+ /* key attributes are always at offset 4 */
+ attrs = get_unaligned_be32(pub + 4);
+
+ if ((attrs & (TPM2_OA_FIXED_TPM | TPM2_OA_FIXED_PARENT)) ==
+ (TPM2_OA_FIXED_TPM | TPM2_OA_FIXED_PARENT))
+ payload->migratable = 0;
+ else
+ payload->migratable = 1;
+
+ blob_len = private_len + public_len + 4;
+ if (blob_len > payload->blob_len)
+ return -E2BIG;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ tpm_buf_append(&buf, blob, blob_len);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
+ if (!rc)
+ *blob_handle = be32_to_cpup(
+ (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+ if (blob != payload->blob)
+ kfree(blob);
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: blob handle
+ *
+ * Return: 0 on success
+ * -EPERM on tpm error status
+ * < 0 error from tpm_send
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle)
+{
+ struct tpm_buf buf;
+ u16 data_len;
+ u8 *data;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, blob_handle);
+ tpm2_buf_append_auth(&buf,
+ options->policyhandle ?
+ options->policyhandle : TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ TPM2_SA_CONTINUE_SESSION,
+ options->blobauth /* hmac */,
+ options->blobauth_len);
+
+ rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
+ if (rc > 0)
+ rc = -EPERM;
+
+ if (!rc) {
+ data_len = be16_to_cpup(
+ (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+ data = &buf.data[TPM_HEADER_SIZE + 6];
+
+ if (payload->old_format) {
+ /* migratable flag is at the end of the key */
+ memcpy(payload->key, data, data_len - 1);
+ payload->key_len = data_len - 1;
+ payload->migratable = data[data_len - 1];
+ } else {
+ /*
+ * migratable flag already collected from key
+ * attributes
+ */
+ memcpy(payload->key, data, data_len);
+ payload->key_len = data_len;
+ }
+ }
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: Same as with tpm_send.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ u32 blob_handle;
+ int rc;
+
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
+ if (rc)
+ goto out;
+
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+ tpm2_flush_context(chip, blob_handle);
+
+out:
+ tpm_put_ops(chip);
+
+ return rc;
+}
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
new file mode 100644
index 000000000..749e2a4dc
--- /dev/null
+++ b/security/keys/user_defined.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* user_defined.c: user defined key type
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <linux/uaccess.h>
+#include "internal.h"
+
+static int logon_vet_description(const char *desc);
+
+/*
+ * user defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_user = {
+ .name = "user",
+ .preparse = user_preparse,
+ .free_preparse = user_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .update = user_update,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_user);
+
+/*
+ * This key type is essentially the same as key_type_user, but it does
+ * not define a .read op. This is suitable for storing username and
+ * password pairs in the keyring that you do not want to be readable
+ * from userspace.
+ */
+struct key_type key_type_logon = {
+ .name = "logon",
+ .preparse = user_preparse,
+ .free_preparse = user_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .update = user_update,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .vet_description = logon_vet_description,
+};
+EXPORT_SYMBOL_GPL(key_type_logon);
+
+/*
+ * Preparse a user defined key payload
+ */
+int user_preparse(struct key_preparsed_payload *prep)
+{
+ struct user_key_payload *upayload;
+ size_t datalen = prep->datalen;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
+ if (!upayload)
+ return -ENOMEM;
+
+ /* attach the data */
+ prep->quotalen = datalen;
+ prep->payload.data[0] = upayload;
+ upayload->datalen = datalen;
+ memcpy(upayload->data, prep->data, datalen);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(user_preparse);
+
+/*
+ * Free a preparse of a user defined key payload
+ */
+void user_free_preparse(struct key_preparsed_payload *prep)
+{
+ kfree_sensitive(prep->payload.data[0]);
+}
+EXPORT_SYMBOL_GPL(user_free_preparse);
+
+static void user_free_payload_rcu(struct rcu_head *head)
+{
+ struct user_key_payload *payload;
+
+ payload = container_of(head, struct user_key_payload, rcu);
+ kfree_sensitive(payload);
+}
+
+/*
+ * update a user defined key
+ * - the key's semaphore is write-locked
+ */
+int user_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct user_key_payload *zap = NULL;
+ int ret;
+
+ /* check the quota and attach the new data */
+ ret = key_payload_reserve(key, prep->datalen);
+ if (ret < 0)
+ return ret;
+
+ /* attach the new data, displacing the old */
+ key->expiry = prep->expiry;
+ if (key_is_positive(key))
+ zap = dereference_key_locked(key);
+ rcu_assign_keypointer(key, prep->payload.data[0]);
+ prep->payload.data[0] = NULL;
+
+ if (zap)
+ call_rcu(&zap->rcu, user_free_payload_rcu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(user_update);
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void user_revoke(struct key *key)
+{
+ struct user_key_payload *upayload = user_key_payload_locked(key);
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+
+ if (upayload) {
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&upayload->rcu, user_free_payload_rcu);
+ }
+}
+
+EXPORT_SYMBOL(user_revoke);
+
+/*
+ * dispose of the data dangling from the corpse of a user key
+ */
+void user_destroy(struct key *key)
+{
+ struct user_key_payload *upayload = key->payload.data[0];
+
+ kfree_sensitive(upayload);
+}
+
+EXPORT_SYMBOL_GPL(user_destroy);
+
+/*
+ * describe the user key
+ */
+void user_describe(const struct key *key, struct seq_file *m)
+{
+ seq_puts(m, key->description);
+ if (key_is_positive(key))
+ seq_printf(m, ": %u", key->datalen);
+}
+
+EXPORT_SYMBOL_GPL(user_describe);
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long user_read(const struct key *key, char *buffer, size_t buflen)
+{
+ const struct user_key_payload *upayload;
+ long ret;
+
+ upayload = user_key_payload_locked(key);
+ ret = upayload->datalen;
+
+ /* we can return the data as is */
+ if (buffer && buflen > 0) {
+ if (buflen > upayload->datalen)
+ buflen = upayload->datalen;
+
+ memcpy(buffer, upayload->data, buflen);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(user_read);
+
+/* Vet the description for a "logon" key */
+static int logon_vet_description(const char *desc)
+{
+ char *p;
+
+ /* require a "qualified" description string */
+ p = strchr(desc, ':');
+ if (!p)
+ return -EINVAL;
+
+ /* also reject description with ':' as first char */
+ if (p == desc)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/security/landlock/Kconfig b/security/landlock/Kconfig
new file mode 100644
index 000000000..8e33c4e8f
--- /dev/null
+++ b/security/landlock/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SECURITY_LANDLOCK
+ bool "Landlock support"
+ depends on SECURITY && !ARCH_EPHEMERAL_INODES
+ select SECURITY_PATH
+ help
+ Landlock is a sandboxing mechanism that enables processes to restrict
+ themselves (and their future children) by gradually enforcing
+ tailored access control policies. A Landlock security policy is a
+ set of access rights (e.g. open a file in read-only, make a
+ directory, etc.) tied to a file hierarchy. Such policy can be
+ configured and enforced by any processes for themselves using the
+ dedicated system calls: landlock_create_ruleset(),
+ landlock_add_rule(), and landlock_restrict_self().
+
+ See Documentation/userspace-api/landlock.rst for further information.
+
+ If you are unsure how to answer this question, answer N. Otherwise,
+ you should also prepend "landlock," to the content of CONFIG_LSM to
+ enable Landlock at boot time.
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
new file mode 100644
index 000000000..7bbd2f413
--- /dev/null
+++ b/security/landlock/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o
+
+landlock-y := setup.o syscalls.o object.o ruleset.o \
+ cred.o ptrace.o fs.o
diff --git a/security/landlock/common.h b/security/landlock/common.h
new file mode 100644
index 000000000..5dc0fe157
--- /dev/null
+++ b/security/landlock/common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Common constants and helpers
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_COMMON_H
+#define _SECURITY_LANDLOCK_COMMON_H
+
+#define LANDLOCK_NAME "landlock"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) LANDLOCK_NAME ": " fmt
+
+#endif /* _SECURITY_LANDLOCK_COMMON_H */
diff --git a/security/landlock/cred.c b/security/landlock/cred.c
new file mode 100644
index 000000000..ec6c37f04
--- /dev/null
+++ b/security/landlock/cred.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Credential hooks
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/cred.h>
+#include <linux/lsm_hooks.h>
+
+#include "common.h"
+#include "cred.h"
+#include "ruleset.h"
+#include "setup.h"
+
+static int hook_cred_prepare(struct cred *const new,
+ const struct cred *const old, const gfp_t gfp)
+{
+ struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
+
+ if (old_dom) {
+ landlock_get_ruleset(old_dom);
+ landlock_cred(new)->domain = old_dom;
+ }
+ return 0;
+}
+
+static void hook_cred_free(struct cred *const cred)
+{
+ struct landlock_ruleset *const dom = landlock_cred(cred)->domain;
+
+ if (dom)
+ landlock_put_ruleset_deferred(dom);
+}
+
+static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
+ LSM_HOOK_INIT(cred_free, hook_cred_free),
+};
+
+__init void landlock_add_cred_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/cred.h b/security/landlock/cred.h
new file mode 100644
index 000000000..af89ab00e
--- /dev/null
+++ b/security/landlock/cred.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Credential hooks
+ *
+ * Copyright © 2019-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_CRED_H
+#define _SECURITY_LANDLOCK_CRED_H
+
+#include <linux/cred.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+
+#include "ruleset.h"
+#include "setup.h"
+
+struct landlock_cred_security {
+ struct landlock_ruleset *domain;
+};
+
+static inline struct landlock_cred_security *
+landlock_cred(const struct cred *cred)
+{
+ return cred->security + landlock_blob_sizes.lbs_cred;
+}
+
+static inline const struct landlock_ruleset *landlock_get_current_domain(void)
+{
+ return landlock_cred(current_cred())->domain;
+}
+
+/*
+ * The call needs to come from an RCU read-side critical section.
+ */
+static inline const struct landlock_ruleset *
+landlock_get_task_domain(const struct task_struct *const task)
+{
+ return landlock_cred(__task_cred(task))->domain;
+}
+
+static inline bool landlocked(const struct task_struct *const task)
+{
+ bool has_dom;
+
+ if (task == current)
+ return !!landlock_get_current_domain();
+
+ rcu_read_lock();
+ has_dom = !!landlock_get_task_domain(task);
+ rcu_read_unlock();
+ return has_dom;
+}
+
+__init void landlock_add_cred_hooks(void);
+
+#endif /* _SECURITY_LANDLOCK_CRED_H */
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
new file mode 100644
index 000000000..64ed76654
--- /dev/null
+++ b/security/landlock/fs.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Filesystem management and hooks
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ * Copyright © 2021-2022 Microsoft Corporation
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler_types.h>
+#include <linux/dcache.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lsm_hooks.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/path.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/wait_bit.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/landlock.h>
+
+#include "common.h"
+#include "cred.h"
+#include "fs.h"
+#include "limits.h"
+#include "object.h"
+#include "ruleset.h"
+#include "setup.h"
+
+/* Underlying object management */
+
+static void release_inode(struct landlock_object *const object)
+ __releases(object->lock)
+{
+ struct inode *const inode = object->underobj;
+ struct super_block *sb;
+
+ if (!inode) {
+ spin_unlock(&object->lock);
+ return;
+ }
+
+ /*
+ * Protects against concurrent use by hook_sb_delete() of the reference
+ * to the underlying inode.
+ */
+ object->underobj = NULL;
+ /*
+ * Makes sure that if the filesystem is concurrently unmounted,
+ * hook_sb_delete() will wait for us to finish iput().
+ */
+ sb = inode->i_sb;
+ atomic_long_inc(&landlock_superblock(sb)->inode_refs);
+ spin_unlock(&object->lock);
+ /*
+ * Because object->underobj was not NULL, hook_sb_delete() and
+ * get_inode_object() guarantee that it is safe to reset
+ * landlock_inode(inode)->object while it is not NULL. It is therefore
+ * not necessary to lock inode->i_lock.
+ */
+ rcu_assign_pointer(landlock_inode(inode)->object, NULL);
+ /*
+ * Now, new rules can safely be tied to @inode with get_inode_object().
+ */
+
+ iput(inode);
+ if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
+ wake_up_var(&landlock_superblock(sb)->inode_refs);
+}
+
+static const struct landlock_object_underops landlock_fs_underops = {
+ .release = release_inode
+};
+
+/* Ruleset management */
+
+static struct landlock_object *get_inode_object(struct inode *const inode)
+{
+ struct landlock_object *object, *new_object;
+ struct landlock_inode_security *inode_sec = landlock_inode(inode);
+
+ rcu_read_lock();
+retry:
+ object = rcu_dereference(inode_sec->object);
+ if (object) {
+ if (likely(refcount_inc_not_zero(&object->usage))) {
+ rcu_read_unlock();
+ return object;
+ }
+ /*
+ * We are racing with release_inode(), the object is going
+ * away. Wait for release_inode(), then retry.
+ */
+ spin_lock(&object->lock);
+ spin_unlock(&object->lock);
+ goto retry;
+ }
+ rcu_read_unlock();
+
+ /*
+ * If there is no object tied to @inode, then create a new one (without
+ * holding any locks).
+ */
+ new_object = landlock_create_object(&landlock_fs_underops, inode);
+ if (IS_ERR(new_object))
+ return new_object;
+
+ /*
+ * Protects against concurrent calls to get_inode_object() or
+ * hook_sb_delete().
+ */
+ spin_lock(&inode->i_lock);
+ if (unlikely(rcu_access_pointer(inode_sec->object))) {
+ /* Someone else just created the object, bail out and retry. */
+ spin_unlock(&inode->i_lock);
+ kfree(new_object);
+
+ rcu_read_lock();
+ goto retry;
+ }
+
+ /*
+ * @inode will be released by hook_sb_delete() on its superblock
+ * shutdown, or by release_inode() when no more ruleset references the
+ * related object.
+ */
+ ihold(inode);
+ rcu_assign_pointer(inode_sec->object, new_object);
+ spin_unlock(&inode->i_lock);
+ return new_object;
+}
+
+/* All access rights that can be tied to files. */
+/* clang-format off */
+#define ACCESS_FILE ( \
+ LANDLOCK_ACCESS_FS_EXECUTE | \
+ LANDLOCK_ACCESS_FS_WRITE_FILE | \
+ LANDLOCK_ACCESS_FS_READ_FILE)
+/* clang-format on */
+
+/*
+ * All access rights that are denied by default whether they are handled or not
+ * by a ruleset/layer. This must be ORed with all ruleset->fs_access_masks[]
+ * entries when we need to get the absolute handled access masks.
+ */
+/* clang-format off */
+#define ACCESS_INITIALLY_DENIED ( \
+ LANDLOCK_ACCESS_FS_REFER)
+/* clang-format on */
+
+/*
+ * @path: Should have been checked by get_path_from_fd().
+ */
+int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
+ const struct path *const path,
+ access_mask_t access_rights)
+{
+ int err;
+ struct landlock_object *object;
+
+ /* Files only get access rights that make sense. */
+ if (!d_is_dir(path->dentry) &&
+ (access_rights | ACCESS_FILE) != ACCESS_FILE)
+ return -EINVAL;
+ if (WARN_ON_ONCE(ruleset->num_layers != 1))
+ return -EINVAL;
+
+ /* Transforms relative access rights to absolute ones. */
+ access_rights |=
+ LANDLOCK_MASK_ACCESS_FS &
+ ~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
+ object = get_inode_object(d_backing_inode(path->dentry));
+ if (IS_ERR(object))
+ return PTR_ERR(object);
+ mutex_lock(&ruleset->lock);
+ err = landlock_insert_rule(ruleset, object, access_rights);
+ mutex_unlock(&ruleset->lock);
+ /*
+ * No need to check for an error because landlock_insert_rule()
+ * increments the refcount for the new object if needed.
+ */
+ landlock_put_object(object);
+ return err;
+}
+
+/* Access-control management */
+
+/*
+ * The lifetime of the returned rule is tied to @domain.
+ *
+ * Returns NULL if no rule is found or if @dentry is negative.
+ */
+static inline const struct landlock_rule *
+find_rule(const struct landlock_ruleset *const domain,
+ const struct dentry *const dentry)
+{
+ const struct landlock_rule *rule;
+ const struct inode *inode;
+
+ /* Ignores nonexistent leafs. */
+ if (d_is_negative(dentry))
+ return NULL;
+
+ inode = d_backing_inode(dentry);
+ rcu_read_lock();
+ rule = landlock_find_rule(
+ domain, rcu_dereference(landlock_inode(inode)->object));
+ rcu_read_unlock();
+ return rule;
+}
+
+/*
+ * @layer_masks is read and may be updated according to the access request and
+ * the matching rule.
+ *
+ * Returns true if the request is allowed (i.e. relevant layer masks for the
+ * request are empty).
+ */
+static inline bool
+unmask_layers(const struct landlock_rule *const rule,
+ const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
+{
+ size_t layer_level;
+
+ if (!access_request || !layer_masks)
+ return true;
+ if (!rule)
+ return false;
+
+ /*
+ * An access is granted if, for each policy layer, at least one rule
+ * encountered on the pathwalk grants the requested access,
+ * regardless of its position in the layer stack. We must then check
+ * the remaining layers for each inode, from the first added layer to
+ * the last one. When there is multiple requested accesses, for each
+ * policy layer, the full set of requested accesses may not be granted
+ * by only one rule, but by the union (binary OR) of multiple rules.
+ * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
+ */
+ for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
+ const struct landlock_layer *const layer =
+ &rule->layers[layer_level];
+ const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
+ const unsigned long access_req = access_request;
+ unsigned long access_bit;
+ bool is_empty;
+
+ /*
+ * Records in @layer_masks which layer grants access to each
+ * requested access.
+ */
+ is_empty = true;
+ for_each_set_bit(access_bit, &access_req,
+ ARRAY_SIZE(*layer_masks)) {
+ if (layer->access & BIT_ULL(access_bit))
+ (*layer_masks)[access_bit] &= ~layer_bit;
+ is_empty = is_empty && !(*layer_masks)[access_bit];
+ }
+ if (is_empty)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Allows access to pseudo filesystems that will never be mountable (e.g.
+ * sockfs, pipefs), but can still be reachable through
+ * /proc/<pid>/fd/<file-descriptor>
+ */
+static inline bool is_nouser_or_private(const struct dentry *dentry)
+{
+ return (dentry->d_sb->s_flags & SB_NOUSER) ||
+ (d_is_positive(dentry) &&
+ unlikely(IS_PRIVATE(d_backing_inode(dentry))));
+}
+
+static inline access_mask_t
+get_handled_accesses(const struct landlock_ruleset *const domain)
+{
+ access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
+ size_t layer_level;
+
+ for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
+ access_dom |= domain->fs_access_masks[layer_level];
+ return access_dom & LANDLOCK_MASK_ACCESS_FS;
+}
+
+static inline access_mask_t
+init_layer_masks(const struct landlock_ruleset *const domain,
+ const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
+{
+ access_mask_t handled_accesses = 0;
+ size_t layer_level;
+
+ memset(layer_masks, 0, sizeof(*layer_masks));
+ /* An empty access request can happen because of O_WRONLY | O_RDWR. */
+ if (!access_request)
+ return 0;
+
+ /* Saves all handled accesses per layer. */
+ for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
+ const unsigned long access_req = access_request;
+ unsigned long access_bit;
+
+ for_each_set_bit(access_bit, &access_req,
+ ARRAY_SIZE(*layer_masks)) {
+ /*
+ * Artificially handles all initially denied by default
+ * access rights.
+ */
+ if (BIT_ULL(access_bit) &
+ (domain->fs_access_masks[layer_level] |
+ ACCESS_INITIALLY_DENIED)) {
+ (*layer_masks)[access_bit] |=
+ BIT_ULL(layer_level);
+ handled_accesses |= BIT_ULL(access_bit);
+ }
+ }
+ }
+ return handled_accesses;
+}
+
+/*
+ * Check that a destination file hierarchy has more restrictions than a source
+ * file hierarchy. This is only used for link and rename actions.
+ *
+ * @layer_masks_child2: Optional child masks.
+ */
+static inline bool no_more_access(
+ const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
+ const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
+ const bool child1_is_directory,
+ const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
+ const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
+ const bool child2_is_directory)
+{
+ unsigned long access_bit;
+
+ for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
+ access_bit++) {
+ /* Ignores accesses that only make sense for directories. */
+ const bool is_file_access =
+ !!(BIT_ULL(access_bit) & ACCESS_FILE);
+
+ if (child1_is_directory || is_file_access) {
+ /*
+ * Checks if the destination restrictions are a
+ * superset of the source ones (i.e. inherited access
+ * rights without child exceptions):
+ * restrictions(parent2) >= restrictions(child1)
+ */
+ if ((((*layer_masks_parent1)[access_bit] &
+ (*layer_masks_child1)[access_bit]) |
+ (*layer_masks_parent2)[access_bit]) !=
+ (*layer_masks_parent2)[access_bit])
+ return false;
+ }
+
+ if (!layer_masks_child2)
+ continue;
+ if (child2_is_directory || is_file_access) {
+ /*
+ * Checks inverted restrictions for RENAME_EXCHANGE:
+ * restrictions(parent1) >= restrictions(child2)
+ */
+ if ((((*layer_masks_parent2)[access_bit] &
+ (*layer_masks_child2)[access_bit]) |
+ (*layer_masks_parent1)[access_bit]) !=
+ (*layer_masks_parent1)[access_bit])
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Removes @layer_masks accesses that are not requested.
+ *
+ * Returns true if the request is allowed, false otherwise.
+ */
+static inline bool
+scope_to_request(const access_mask_t access_request,
+ layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
+{
+ const unsigned long access_req = access_request;
+ unsigned long access_bit;
+
+ if (WARN_ON_ONCE(!layer_masks))
+ return true;
+
+ for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
+ (*layer_masks)[access_bit] = 0;
+ return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
+}
+
+/*
+ * Returns true if there is at least one access right different than
+ * LANDLOCK_ACCESS_FS_REFER.
+ */
+static inline bool
+is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
+ const access_mask_t access_request)
+{
+ unsigned long access_bit;
+ /* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
+ const unsigned long access_check = access_request &
+ ~LANDLOCK_ACCESS_FS_REFER;
+
+ if (!layer_masks)
+ return false;
+
+ for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
+ if ((*layer_masks)[access_bit])
+ return true;
+ }
+ return false;
+}
+
+/**
+ * check_access_path_dual - Check accesses for requests with a common path
+ *
+ * @domain: Domain to check against.
+ * @path: File hierarchy to walk through.
+ * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
+ * equal to @layer_masks_parent2 (if any). This is tied to the unique
+ * requested path for most actions, or the source in case of a refer action
+ * (i.e. rename or link), or the source and destination in case of
+ * RENAME_EXCHANGE.
+ * @layer_masks_parent1: Pointer to a matrix of layer masks per access
+ * masks, identifying the layers that forbid a specific access. Bits from
+ * this matrix can be unset according to the @path walk. An empty matrix
+ * means that @domain allows all possible Landlock accesses (i.e. not only
+ * those identified by @access_request_parent1). This matrix can
+ * initially refer to domain layer masks and, when the accesses for the
+ * destination and source are the same, to requested layer masks.
+ * @dentry_child1: Dentry to the initial child of the parent1 path. This
+ * pointer must be NULL for non-refer actions (i.e. not link nor rename).
+ * @access_request_parent2: Similar to @access_request_parent1 but for a
+ * request involving a source and a destination. This refers to the
+ * destination, except in case of RENAME_EXCHANGE where it also refers to
+ * the source. Must be set to 0 when using a simple path request.
+ * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
+ * action. This must be NULL otherwise.
+ * @dentry_child2: Dentry to the initial child of the parent2 path. This
+ * pointer is only set for RENAME_EXCHANGE actions and must be NULL
+ * otherwise.
+ *
+ * This helper first checks that the destination has a superset of restrictions
+ * compared to the source (if any) for a common path. Because of
+ * RENAME_EXCHANGE actions, source and destinations may be swapped. It then
+ * checks that the collected accesses and the remaining ones are enough to
+ * allow the request.
+ *
+ * Returns:
+ * - 0 if the access request is granted;
+ * - -EACCES if it is denied because of access right other than
+ * LANDLOCK_ACCESS_FS_REFER;
+ * - -EXDEV if the renaming or linking would be a privileged escalation
+ * (according to each layered policies), or if LANDLOCK_ACCESS_FS_REFER is
+ * not allowed by the source or the destination.
+ */
+static int check_access_path_dual(
+ const struct landlock_ruleset *const domain,
+ const struct path *const path,
+ const access_mask_t access_request_parent1,
+ layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
+ const struct dentry *const dentry_child1,
+ const access_mask_t access_request_parent2,
+ layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
+ const struct dentry *const dentry_child2)
+{
+ bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
+ child1_is_directory = true, child2_is_directory = true;
+ struct path walker_path;
+ access_mask_t access_masked_parent1, access_masked_parent2;
+ layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
+ _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
+ layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
+ (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
+
+ if (!access_request_parent1 && !access_request_parent2)
+ return 0;
+ if (WARN_ON_ONCE(!domain || !path))
+ return 0;
+ if (is_nouser_or_private(path->dentry))
+ return 0;
+ if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
+ return -EACCES;
+
+ if (unlikely(layer_masks_parent2)) {
+ if (WARN_ON_ONCE(!dentry_child1))
+ return -EACCES;
+ /*
+ * For a double request, first check for potential privilege
+ * escalation by looking at domain handled accesses (which are
+ * a superset of the meaningful requested accesses).
+ */
+ access_masked_parent1 = access_masked_parent2 =
+ get_handled_accesses(domain);
+ is_dom_check = true;
+ } else {
+ if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
+ return -EACCES;
+ /* For a simple request, only check for requested accesses. */
+ access_masked_parent1 = access_request_parent1;
+ access_masked_parent2 = access_request_parent2;
+ is_dom_check = false;
+ }
+
+ if (unlikely(dentry_child1)) {
+ unmask_layers(find_rule(domain, dentry_child1),
+ init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
+ &_layer_masks_child1),
+ &_layer_masks_child1);
+ layer_masks_child1 = &_layer_masks_child1;
+ child1_is_directory = d_is_dir(dentry_child1);
+ }
+ if (unlikely(dentry_child2)) {
+ unmask_layers(find_rule(domain, dentry_child2),
+ init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
+ &_layer_masks_child2),
+ &_layer_masks_child2);
+ layer_masks_child2 = &_layer_masks_child2;
+ child2_is_directory = d_is_dir(dentry_child2);
+ }
+
+ walker_path = *path;
+ path_get(&walker_path);
+ /*
+ * We need to walk through all the hierarchy to not miss any relevant
+ * restriction.
+ */
+ while (true) {
+ struct dentry *parent_dentry;
+ const struct landlock_rule *rule;
+
+ /*
+ * If at least all accesses allowed on the destination are
+ * already allowed on the source, respectively if there is at
+ * least as much as restrictions on the destination than on the
+ * source, then we can safely refer files from the source to
+ * the destination without risking a privilege escalation.
+ * This also applies in the case of RENAME_EXCHANGE, which
+ * implies checks on both direction. This is crucial for
+ * standalone multilayered security policies. Furthermore,
+ * this helps avoid policy writers to shoot themselves in the
+ * foot.
+ */
+ if (unlikely(is_dom_check &&
+ no_more_access(
+ layer_masks_parent1, layer_masks_child1,
+ child1_is_directory, layer_masks_parent2,
+ layer_masks_child2,
+ child2_is_directory))) {
+ allowed_parent1 = scope_to_request(
+ access_request_parent1, layer_masks_parent1);
+ allowed_parent2 = scope_to_request(
+ access_request_parent2, layer_masks_parent2);
+
+ /* Stops when all accesses are granted. */
+ if (allowed_parent1 && allowed_parent2)
+ break;
+
+ /*
+ * Now, downgrades the remaining checks from domain
+ * handled accesses to requested accesses.
+ */
+ is_dom_check = false;
+ access_masked_parent1 = access_request_parent1;
+ access_masked_parent2 = access_request_parent2;
+ }
+
+ rule = find_rule(domain, walker_path.dentry);
+ allowed_parent1 = unmask_layers(rule, access_masked_parent1,
+ layer_masks_parent1);
+ allowed_parent2 = unmask_layers(rule, access_masked_parent2,
+ layer_masks_parent2);
+
+ /* Stops when a rule from each layer grants access. */
+ if (allowed_parent1 && allowed_parent2)
+ break;
+
+jump_up:
+ if (walker_path.dentry == walker_path.mnt->mnt_root) {
+ if (follow_up(&walker_path)) {
+ /* Ignores hidden mount points. */
+ goto jump_up;
+ } else {
+ /*
+ * Stops at the real root. Denies access
+ * because not all layers have granted access.
+ */
+ break;
+ }
+ }
+ if (unlikely(IS_ROOT(walker_path.dentry))) {
+ /*
+ * Stops at disconnected root directories. Only allows
+ * access to internal filesystems (e.g. nsfs, which is
+ * reachable through /proc/<pid>/ns/<namespace>).
+ */
+ allowed_parent1 = allowed_parent2 =
+ !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
+ break;
+ }
+ parent_dentry = dget_parent(walker_path.dentry);
+ dput(walker_path.dentry);
+ walker_path.dentry = parent_dentry;
+ }
+ path_put(&walker_path);
+
+ if (allowed_parent1 && allowed_parent2)
+ return 0;
+
+ /*
+ * This prioritizes EACCES over EXDEV for all actions, including
+ * renames with RENAME_EXCHANGE.
+ */
+ if (likely(is_eacces(layer_masks_parent1, access_request_parent1) ||
+ is_eacces(layer_masks_parent2, access_request_parent2)))
+ return -EACCES;
+
+ /*
+ * Gracefully forbids reparenting if the destination directory
+ * hierarchy is not a superset of restrictions of the source directory
+ * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
+ * source or the destination.
+ */
+ return -EXDEV;
+}
+
+static inline int check_access_path(const struct landlock_ruleset *const domain,
+ const struct path *const path,
+ access_mask_t access_request)
+{
+ layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
+
+ access_request = init_layer_masks(domain, access_request, &layer_masks);
+ return check_access_path_dual(domain, path, access_request,
+ &layer_masks, NULL, 0, NULL, NULL);
+}
+
+static inline int current_check_access_path(const struct path *const path,
+ const access_mask_t access_request)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ return check_access_path(dom, path, access_request);
+}
+
+static inline access_mask_t get_mode_access(const umode_t mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFLNK:
+ return LANDLOCK_ACCESS_FS_MAKE_SYM;
+ case 0:
+ /* A zero mode translates to S_IFREG. */
+ case S_IFREG:
+ return LANDLOCK_ACCESS_FS_MAKE_REG;
+ case S_IFDIR:
+ return LANDLOCK_ACCESS_FS_MAKE_DIR;
+ case S_IFCHR:
+ return LANDLOCK_ACCESS_FS_MAKE_CHAR;
+ case S_IFBLK:
+ return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
+ case S_IFIFO:
+ return LANDLOCK_ACCESS_FS_MAKE_FIFO;
+ case S_IFSOCK:
+ return LANDLOCK_ACCESS_FS_MAKE_SOCK;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline access_mask_t maybe_remove(const struct dentry *const dentry)
+{
+ if (d_is_negative(dentry))
+ return 0;
+ return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
+ LANDLOCK_ACCESS_FS_REMOVE_FILE;
+}
+
+/**
+ * collect_domain_accesses - Walk through a file path and collect accesses
+ *
+ * @domain: Domain to check against.
+ * @mnt_root: Last directory to check.
+ * @dir: Directory to start the walk from.
+ * @layer_masks_dom: Where to store the collected accesses.
+ *
+ * This helper is useful to begin a path walk from the @dir directory to a
+ * @mnt_root directory used as a mount point. This mount point is the common
+ * ancestor between the source and the destination of a renamed and linked
+ * file. While walking from @dir to @mnt_root, we record all the domain's
+ * allowed accesses in @layer_masks_dom.
+ *
+ * This is similar to check_access_path_dual() but much simpler because it only
+ * handles walking on the same mount point and only checks one set of accesses.
+ *
+ * Returns:
+ * - true if all the domain access rights are allowed for @dir;
+ * - false if the walk reached @mnt_root.
+ */
+static bool collect_domain_accesses(
+ const struct landlock_ruleset *const domain,
+ const struct dentry *const mnt_root, struct dentry *dir,
+ layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
+{
+ unsigned long access_dom;
+ bool ret = false;
+
+ if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
+ return true;
+ if (is_nouser_or_private(dir))
+ return true;
+
+ access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
+ layer_masks_dom);
+
+ dget(dir);
+ while (true) {
+ struct dentry *parent_dentry;
+
+ /* Gets all layers allowing all domain accesses. */
+ if (unmask_layers(find_rule(domain, dir), access_dom,
+ layer_masks_dom)) {
+ /*
+ * Stops when all handled accesses are allowed by at
+ * least one rule in each layer.
+ */
+ ret = true;
+ break;
+ }
+
+ /* We should not reach a root other than @mnt_root. */
+ if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
+ break;
+
+ parent_dentry = dget_parent(dir);
+ dput(dir);
+ dir = parent_dentry;
+ }
+ dput(dir);
+ return ret;
+}
+
+/**
+ * current_check_refer_path - Check if a rename or link action is allowed
+ *
+ * @old_dentry: File or directory requested to be moved or linked.
+ * @new_dir: Destination parent directory.
+ * @new_dentry: Destination file or directory.
+ * @removable: Sets to true if it is a rename operation.
+ * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
+ *
+ * Because of its unprivileged constraints, Landlock relies on file hierarchies
+ * (and not only inodes) to tie access rights to files. Being able to link or
+ * rename a file hierarchy brings some challenges. Indeed, moving or linking a
+ * file (i.e. creating a new reference to an inode) can have an impact on the
+ * actions allowed for a set of files if it would change its parent directory
+ * (i.e. reparenting).
+ *
+ * To avoid trivial access right bypasses, Landlock first checks if the file or
+ * directory requested to be moved would gain new access rights inherited from
+ * its new hierarchy. Before returning any error, Landlock then checks that
+ * the parent source hierarchy and the destination hierarchy would allow the
+ * link or rename action. If it is not the case, an error with EACCES is
+ * returned to inform user space that there is no way to remove or create the
+ * requested source file type. If it should be allowed but the new inherited
+ * access rights would be greater than the source access rights, then the
+ * kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
+ * user space to abort the whole operation if there is no way to do it, or to
+ * manually copy the source to the destination if this remains allowed, e.g.
+ * because file creation is allowed on the destination directory but not direct
+ * linking.
+ *
+ * To achieve this goal, the kernel needs to compare two file hierarchies: the
+ * one identifying the source file or directory (including itself), and the
+ * destination one. This can be seen as a multilayer partial ordering problem.
+ * The kernel walks through these paths and collects in a matrix the access
+ * rights that are denied per layer. These matrices are then compared to see
+ * if the destination one has more (or the same) restrictions as the source
+ * one. If this is the case, the requested action will not return EXDEV, which
+ * doesn't mean the action is allowed. The parent hierarchy of the source
+ * (i.e. parent directory), and the destination hierarchy must also be checked
+ * to verify that they explicitly allow such action (i.e. referencing,
+ * creation and potentially removal rights). The kernel implementation is then
+ * required to rely on potentially four matrices of access rights: one for the
+ * source file or directory (i.e. the child), a potentially other one for the
+ * other source/destination (in case of RENAME_EXCHANGE), one for the source
+ * parent hierarchy and a last one for the destination hierarchy. These
+ * ephemeral matrices take some space on the stack, which limits the number of
+ * layers to a deemed reasonable number: 16.
+ *
+ * Returns:
+ * - 0 if access is allowed;
+ * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
+ * - -EACCES if file removal or creation is denied.
+ */
+static int current_check_refer_path(struct dentry *const old_dentry,
+ const struct path *const new_dir,
+ struct dentry *const new_dentry,
+ const bool removable, const bool exchange)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+ bool allow_parent1, allow_parent2;
+ access_mask_t access_request_parent1, access_request_parent2;
+ struct path mnt_dir;
+ layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
+ layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
+
+ if (!dom)
+ return 0;
+ if (WARN_ON_ONCE(dom->num_layers < 1))
+ return -EACCES;
+ if (unlikely(d_is_negative(old_dentry)))
+ return -ENOENT;
+ if (exchange) {
+ if (unlikely(d_is_negative(new_dentry)))
+ return -ENOENT;
+ access_request_parent1 =
+ get_mode_access(d_backing_inode(new_dentry)->i_mode);
+ } else {
+ access_request_parent1 = 0;
+ }
+ access_request_parent2 =
+ get_mode_access(d_backing_inode(old_dentry)->i_mode);
+ if (removable) {
+ access_request_parent1 |= maybe_remove(old_dentry);
+ access_request_parent2 |= maybe_remove(new_dentry);
+ }
+
+ /* The mount points are the same for old and new paths, cf. EXDEV. */
+ if (old_dentry->d_parent == new_dir->dentry) {
+ /*
+ * The LANDLOCK_ACCESS_FS_REFER access right is not required
+ * for same-directory referer (i.e. no reparenting).
+ */
+ access_request_parent1 = init_layer_masks(
+ dom, access_request_parent1 | access_request_parent2,
+ &layer_masks_parent1);
+ return check_access_path_dual(dom, new_dir,
+ access_request_parent1,
+ &layer_masks_parent1, NULL, 0,
+ NULL, NULL);
+ }
+
+ access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
+ access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
+
+ /* Saves the common mount point. */
+ mnt_dir.mnt = new_dir->mnt;
+ mnt_dir.dentry = new_dir->mnt->mnt_root;
+
+ /* new_dir->dentry is equal to new_dentry->d_parent */
+ allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
+ old_dentry->d_parent,
+ &layer_masks_parent1);
+ allow_parent2 = collect_domain_accesses(
+ dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
+
+ if (allow_parent1 && allow_parent2)
+ return 0;
+
+ /*
+ * To be able to compare source and destination domain access rights,
+ * take into account the @old_dentry access rights aggregated with its
+ * parent access rights. This will be useful to compare with the
+ * destination parent access rights.
+ */
+ return check_access_path_dual(dom, &mnt_dir, access_request_parent1,
+ &layer_masks_parent1, old_dentry,
+ access_request_parent2,
+ &layer_masks_parent2,
+ exchange ? new_dentry : NULL);
+}
+
+/* Inode hooks */
+
+static void hook_inode_free_security(struct inode *const inode)
+{
+ /*
+ * All inodes must already have been untied from their object by
+ * release_inode() or hook_sb_delete().
+ */
+ WARN_ON_ONCE(landlock_inode(inode)->object);
+}
+
+/* Super-block hooks */
+
+/*
+ * Release the inodes used in a security policy.
+ *
+ * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
+ */
+static void hook_sb_delete(struct super_block *const sb)
+{
+ struct inode *inode, *prev_inode = NULL;
+
+ if (!landlock_initialized)
+ return;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ struct landlock_object *object;
+
+ /* Only handles referenced inodes. */
+ if (!atomic_read(&inode->i_count))
+ continue;
+
+ /*
+ * Protects against concurrent modification of inode (e.g.
+ * from get_inode_object()).
+ */
+ spin_lock(&inode->i_lock);
+ /*
+ * Checks I_FREEING and I_WILL_FREE to protect against a race
+ * condition when release_inode() just called iput(), which
+ * could lead to a NULL dereference of inode->security or a
+ * second call to iput() for the same Landlock object. Also
+ * checks I_NEW because such inode cannot be tied to an object.
+ */
+ if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+
+ rcu_read_lock();
+ object = rcu_dereference(landlock_inode(inode)->object);
+ if (!object) {
+ rcu_read_unlock();
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ /* Keeps a reference to this inode until the next loop walk. */
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+
+ /*
+ * If there is no concurrent release_inode() ongoing, then we
+ * are in charge of calling iput() on this inode, otherwise we
+ * will just wait for it to finish.
+ */
+ spin_lock(&object->lock);
+ if (object->underobj == inode) {
+ object->underobj = NULL;
+ spin_unlock(&object->lock);
+ rcu_read_unlock();
+
+ /*
+ * Because object->underobj was not NULL,
+ * release_inode() and get_inode_object() guarantee
+ * that it is safe to reset
+ * landlock_inode(inode)->object while it is not NULL.
+ * It is therefore not necessary to lock inode->i_lock.
+ */
+ rcu_assign_pointer(landlock_inode(inode)->object, NULL);
+ /*
+ * At this point, we own the ihold() reference that was
+ * originally set up by get_inode_object() and the
+ * __iget() reference that we just set in this loop
+ * walk. Therefore the following call to iput() will
+ * not sleep nor drop the inode because there is now at
+ * least two references to it.
+ */
+ iput(inode);
+ } else {
+ spin_unlock(&object->lock);
+ rcu_read_unlock();
+ }
+
+ if (prev_inode) {
+ /*
+ * At this point, we still own the __iget() reference
+ * that we just set in this loop walk. Therefore we
+ * can drop the list lock and know that the inode won't
+ * disappear from under us until the next loop walk.
+ */
+ spin_unlock(&sb->s_inode_list_lock);
+ /*
+ * We can now actually put the inode reference from the
+ * previous loop walk, which is not needed anymore.
+ */
+ iput(prev_inode);
+ cond_resched();
+ spin_lock(&sb->s_inode_list_lock);
+ }
+ prev_inode = inode;
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+
+ /* Puts the inode reference from the last loop walk, if any. */
+ if (prev_inode)
+ iput(prev_inode);
+ /* Waits for pending iput() in release_inode(). */
+ wait_var_event(&landlock_superblock(sb)->inode_refs,
+ !atomic_long_read(&landlock_superblock(sb)->inode_refs));
+}
+
+/*
+ * Because a Landlock security policy is defined according to the filesystem
+ * topology (i.e. the mount namespace), changing it may grant access to files
+ * not previously allowed.
+ *
+ * To make it simple, deny any filesystem topology modification by landlocked
+ * processes. Non-landlocked processes may still change the namespace of a
+ * landlocked process, but this kind of threat must be handled by a system-wide
+ * access-control security policy.
+ *
+ * This could be lifted in the future if Landlock can safely handle mount
+ * namespace updates requested by a landlocked process. Indeed, we could
+ * update the current domain (which is currently read-only) by taking into
+ * account the accesses of the source and the destination of a new mount point.
+ * However, it would also require to make all the child domains dynamically
+ * inherit these new constraints. Anyway, for backward compatibility reasons,
+ * a dedicated user space option would be required (e.g. as a ruleset flag).
+ */
+static int hook_sb_mount(const char *const dev_name,
+ const struct path *const path, const char *const type,
+ const unsigned long flags, void *const data)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+static int hook_move_mount(const struct path *const from_path,
+ const struct path *const to_path)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+/*
+ * Removing a mount point may reveal a previously hidden file hierarchy, which
+ * may then grant access to files, which may have previously been forbidden.
+ */
+static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+/*
+ * pivot_root(2), like mount(2), changes the current mount namespace. It must
+ * then be forbidden for a landlocked process.
+ *
+ * However, chroot(2) may be allowed because it only changes the relative root
+ * directory of the current process. Moreover, it can be used to restrict the
+ * view of the filesystem.
+ */
+static int hook_sb_pivotroot(const struct path *const old_path,
+ const struct path *const new_path)
+{
+ if (!landlock_get_current_domain())
+ return 0;
+ return -EPERM;
+}
+
+/* Path hooks */
+
+static int hook_path_link(struct dentry *const old_dentry,
+ const struct path *const new_dir,
+ struct dentry *const new_dentry)
+{
+ return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
+ false);
+}
+
+static int hook_path_rename(const struct path *const old_dir,
+ struct dentry *const old_dentry,
+ const struct path *const new_dir,
+ struct dentry *const new_dentry,
+ const unsigned int flags)
+{
+ /* old_dir refers to old_dentry->d_parent and new_dir->mnt */
+ return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
+ !!(flags & RENAME_EXCHANGE));
+}
+
+static int hook_path_mkdir(const struct path *const dir,
+ struct dentry *const dentry, const umode_t mode)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
+}
+
+static int hook_path_mknod(const struct path *const dir,
+ struct dentry *const dentry, const umode_t mode,
+ const unsigned int dev)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ return check_access_path(dom, dir, get_mode_access(mode));
+}
+
+static int hook_path_symlink(const struct path *const dir,
+ struct dentry *const dentry,
+ const char *const old_name)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
+}
+
+static int hook_path_unlink(const struct path *const dir,
+ struct dentry *const dentry)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
+}
+
+static int hook_path_rmdir(const struct path *const dir,
+ struct dentry *const dentry)
+{
+ return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
+}
+
+/* File hooks */
+
+static inline access_mask_t get_file_access(const struct file *const file)
+{
+ access_mask_t access = 0;
+
+ if (file->f_mode & FMODE_READ) {
+ /* A directory can only be opened in read mode. */
+ if (S_ISDIR(file_inode(file)->i_mode))
+ return LANDLOCK_ACCESS_FS_READ_DIR;
+ access = LANDLOCK_ACCESS_FS_READ_FILE;
+ }
+ if (file->f_mode & FMODE_WRITE)
+ access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
+ /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
+ if (file->f_flags & __FMODE_EXEC)
+ access |= LANDLOCK_ACCESS_FS_EXECUTE;
+ return access;
+}
+
+static int hook_file_open(struct file *const file)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+ /*
+ * Because a file may be opened with O_PATH, get_file_access() may
+ * return 0. This case will be handled with a future Landlock
+ * evolution.
+ */
+ return check_access_path(dom, &file->f_path, get_file_access(file));
+}
+
+static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
+
+ LSM_HOOK_INIT(sb_delete, hook_sb_delete),
+ LSM_HOOK_INIT(sb_mount, hook_sb_mount),
+ LSM_HOOK_INIT(move_mount, hook_move_mount),
+ LSM_HOOK_INIT(sb_umount, hook_sb_umount),
+ LSM_HOOK_INIT(sb_remount, hook_sb_remount),
+ LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
+
+ LSM_HOOK_INIT(path_link, hook_path_link),
+ LSM_HOOK_INIT(path_rename, hook_path_rename),
+ LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
+ LSM_HOOK_INIT(path_mknod, hook_path_mknod),
+ LSM_HOOK_INIT(path_symlink, hook_path_symlink),
+ LSM_HOOK_INIT(path_unlink, hook_path_unlink),
+ LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
+
+ LSM_HOOK_INIT(file_open, hook_file_open),
+};
+
+__init void landlock_add_fs_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/fs.h b/security/landlock/fs.h
new file mode 100644
index 000000000..8db7acf91
--- /dev/null
+++ b/security/landlock/fs.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Filesystem management and hooks
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_FS_H
+#define _SECURITY_LANDLOCK_FS_H
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+
+#include "ruleset.h"
+#include "setup.h"
+
+/**
+ * struct landlock_inode_security - Inode security blob
+ *
+ * Enable to reference a &struct landlock_object tied to an inode (i.e.
+ * underlying object).
+ */
+struct landlock_inode_security {
+ /**
+ * @object: Weak pointer to an allocated object. All assignments of a
+ * new object are protected by the underlying inode->i_lock. However,
+ * atomically disassociating @object from the inode is only protected
+ * by @object->lock, from the time @object's usage refcount drops to
+ * zero to the time this pointer is nulled out (cf. release_inode() and
+ * hook_sb_delete()). Indeed, such disassociation doesn't require
+ * inode->i_lock thanks to the careful rcu_access_pointer() check
+ * performed by get_inode_object().
+ */
+ struct landlock_object __rcu *object;
+};
+
+/**
+ * struct landlock_superblock_security - Superblock security blob
+ *
+ * Enable hook_sb_delete() to wait for concurrent calls to release_inode().
+ */
+struct landlock_superblock_security {
+ /**
+ * @inode_refs: Number of pending inodes (from this superblock) that
+ * are being released by release_inode().
+ * Cf. struct super_block->s_fsnotify_inode_refs .
+ */
+ atomic_long_t inode_refs;
+};
+
+static inline struct landlock_inode_security *
+landlock_inode(const struct inode *const inode)
+{
+ return inode->i_security + landlock_blob_sizes.lbs_inode;
+}
+
+static inline struct landlock_superblock_security *
+landlock_superblock(const struct super_block *const superblock)
+{
+ return superblock->s_security + landlock_blob_sizes.lbs_superblock;
+}
+
+__init void landlock_add_fs_hooks(void);
+
+int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
+ const struct path *const path,
+ access_mask_t access_hierarchy);
+
+#endif /* _SECURITY_LANDLOCK_FS_H */
diff --git a/security/landlock/limits.h b/security/landlock/limits.h
new file mode 100644
index 000000000..b54184ab9
--- /dev/null
+++ b/security/landlock/limits.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Limits for different components
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_LIMITS_H
+#define _SECURITY_LANDLOCK_LIMITS_H
+
+#include <linux/bitops.h>
+#include <linux/limits.h>
+#include <uapi/linux/landlock.h>
+
+/* clang-format off */
+
+#define LANDLOCK_MAX_NUM_LAYERS 16
+#define LANDLOCK_MAX_NUM_RULES U32_MAX
+
+#define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_REFER
+#define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1)
+#define LANDLOCK_NUM_ACCESS_FS __const_hweight64(LANDLOCK_MASK_ACCESS_FS)
+
+/* clang-format on */
+
+#endif /* _SECURITY_LANDLOCK_LIMITS_H */
diff --git a/security/landlock/object.c b/security/landlock/object.c
new file mode 100644
index 000000000..1f50612f0
--- /dev/null
+++ b/security/landlock/object.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Object management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/bug.h>
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "object.h"
+
+struct landlock_object *
+landlock_create_object(const struct landlock_object_underops *const underops,
+ void *const underobj)
+{
+ struct landlock_object *new_object;
+
+ if (WARN_ON_ONCE(!underops || !underobj))
+ return ERR_PTR(-ENOENT);
+ new_object = kzalloc(sizeof(*new_object), GFP_KERNEL_ACCOUNT);
+ if (!new_object)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&new_object->usage, 1);
+ spin_lock_init(&new_object->lock);
+ new_object->underops = underops;
+ new_object->underobj = underobj;
+ return new_object;
+}
+
+/*
+ * The caller must own the object (i.e. thanks to object->usage) to safely put
+ * it.
+ */
+void landlock_put_object(struct landlock_object *const object)
+{
+ /*
+ * The call to @object->underops->release(object) might sleep, e.g.
+ * because of iput().
+ */
+ might_sleep();
+ if (!object)
+ return;
+
+ /*
+ * If the @object's refcount cannot drop to zero, we can just decrement
+ * the refcount without holding a lock. Otherwise, the decrement must
+ * happen under @object->lock for synchronization with things like
+ * get_inode_object().
+ */
+ if (refcount_dec_and_lock(&object->usage, &object->lock)) {
+ __acquire(&object->lock);
+ /*
+ * With @object->lock initially held, remove the reference from
+ * @object->underobj to @object (if it still exists).
+ */
+ object->underops->release(object);
+ kfree_rcu(object, rcu_free);
+ }
+}
diff --git a/security/landlock/object.h b/security/landlock/object.h
new file mode 100644
index 000000000..5f28c35e8
--- /dev/null
+++ b/security/landlock/object.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Object management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_OBJECT_H
+#define _SECURITY_LANDLOCK_OBJECT_H
+
+#include <linux/compiler_types.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+
+struct landlock_object;
+
+/**
+ * struct landlock_object_underops - Operations on an underlying object
+ */
+struct landlock_object_underops {
+ /**
+ * @release: Releases the underlying object (e.g. iput() for an inode).
+ */
+ void (*release)(struct landlock_object *const object)
+ __releases(object->lock);
+};
+
+/**
+ * struct landlock_object - Security blob tied to a kernel object
+ *
+ * The goal of this structure is to enable to tie a set of ephemeral access
+ * rights (pertaining to different domains) to a kernel object (e.g an inode)
+ * in a safe way. This implies to handle concurrent use and modification.
+ *
+ * The lifetime of a &struct landlock_object depends on the rules referring to
+ * it.
+ */
+struct landlock_object {
+ /**
+ * @usage: This counter is used to tie an object to the rules matching
+ * it or to keep it alive while adding a new rule. If this counter
+ * reaches zero, this struct must not be modified, but this counter can
+ * still be read from within an RCU read-side critical section. When
+ * adding a new rule to an object with a usage counter of zero, we must
+ * wait until the pointer to this object is set to NULL (or recycled).
+ */
+ refcount_t usage;
+ /**
+ * @lock: Protects against concurrent modifications. This lock must be
+ * held from the time @usage drops to zero until any weak references
+ * from @underobj to this object have been cleaned up.
+ *
+ * Lock ordering: inode->i_lock nests inside this.
+ */
+ spinlock_t lock;
+ /**
+ * @underobj: Used when cleaning up an object and to mark an object as
+ * tied to its underlying kernel structure. This pointer is protected
+ * by @lock. Cf. landlock_release_inodes() and release_inode().
+ */
+ void *underobj;
+ union {
+ /**
+ * @rcu_free: Enables lockless use of @usage, @lock and
+ * @underobj from within an RCU read-side critical section.
+ * @rcu_free and @underops are only used by
+ * landlock_put_object().
+ */
+ struct rcu_head rcu_free;
+ /**
+ * @underops: Enables landlock_put_object() to release the
+ * underlying object (e.g. inode).
+ */
+ const struct landlock_object_underops *underops;
+ };
+};
+
+struct landlock_object *
+landlock_create_object(const struct landlock_object_underops *const underops,
+ void *const underobj);
+
+void landlock_put_object(struct landlock_object *const object);
+
+static inline void landlock_get_object(struct landlock_object *const object)
+{
+ if (object)
+ refcount_inc(&object->usage);
+}
+
+#endif /* _SECURITY_LANDLOCK_OBJECT_H */
diff --git a/security/landlock/ptrace.c b/security/landlock/ptrace.c
new file mode 100644
index 000000000..4c5b9cd71
--- /dev/null
+++ b/security/landlock/ptrace.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Ptrace hooks
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ */
+
+#include <asm/current.h>
+#include <linux/cred.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/lsm_hooks.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "cred.h"
+#include "ptrace.h"
+#include "ruleset.h"
+#include "setup.h"
+
+/**
+ * domain_scope_le - Checks domain ordering for scoped ptrace
+ *
+ * @parent: Parent domain.
+ * @child: Potential child of @parent.
+ *
+ * Checks if the @parent domain is less or equal to (i.e. an ancestor, which
+ * means a subset of) the @child domain.
+ */
+static bool domain_scope_le(const struct landlock_ruleset *const parent,
+ const struct landlock_ruleset *const child)
+{
+ const struct landlock_hierarchy *walker;
+
+ if (!parent)
+ return true;
+ if (!child)
+ return false;
+ for (walker = child->hierarchy; walker; walker = walker->parent) {
+ if (walker == parent->hierarchy)
+ /* @parent is in the scoped hierarchy of @child. */
+ return true;
+ }
+ /* There is no relationship between @parent and @child. */
+ return false;
+}
+
+static bool task_is_scoped(const struct task_struct *const parent,
+ const struct task_struct *const child)
+{
+ bool is_scoped;
+ const struct landlock_ruleset *dom_parent, *dom_child;
+
+ rcu_read_lock();
+ dom_parent = landlock_get_task_domain(parent);
+ dom_child = landlock_get_task_domain(child);
+ is_scoped = domain_scope_le(dom_parent, dom_child);
+ rcu_read_unlock();
+ return is_scoped;
+}
+
+static int task_ptrace(const struct task_struct *const parent,
+ const struct task_struct *const child)
+{
+ /* Quick return for non-landlocked tasks. */
+ if (!landlocked(parent))
+ return 0;
+ if (task_is_scoped(parent, child))
+ return 0;
+ return -EPERM;
+}
+
+/**
+ * hook_ptrace_access_check - Determines whether the current process may access
+ * another
+ *
+ * @child: Process to be accessed.
+ * @mode: Mode of attachment.
+ *
+ * If the current task has Landlock rules, then the child must have at least
+ * the same rules. Else denied.
+ *
+ * Determines whether a process may access another, returning 0 if permission
+ * granted, -errno if denied.
+ */
+static int hook_ptrace_access_check(struct task_struct *const child,
+ const unsigned int mode)
+{
+ return task_ptrace(current, child);
+}
+
+/**
+ * hook_ptrace_traceme - Determines whether another process may trace the
+ * current one
+ *
+ * @parent: Task proposed to be the tracer.
+ *
+ * If the parent has Landlock rules, then the current task must have the same
+ * or more rules. Else denied.
+ *
+ * Determines whether the nominated task is permitted to trace the current
+ * process, returning 0 if permission is granted, -errno if denied.
+ */
+static int hook_ptrace_traceme(struct task_struct *const parent)
+{
+ return task_ptrace(parent, current);
+}
+
+static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(ptrace_access_check, hook_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, hook_ptrace_traceme),
+};
+
+__init void landlock_add_ptrace_hooks(void)
+{
+ security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
+ LANDLOCK_NAME);
+}
diff --git a/security/landlock/ptrace.h b/security/landlock/ptrace.h
new file mode 100644
index 000000000..265b220ae
--- /dev/null
+++ b/security/landlock/ptrace.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Ptrace hooks
+ *
+ * Copyright © 2017-2019 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_PTRACE_H
+#define _SECURITY_LANDLOCK_PTRACE_H
+
+__init void landlock_add_ptrace_hooks(void);
+
+#endif /* _SECURITY_LANDLOCK_PTRACE_H */
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
new file mode 100644
index 000000000..996484f98
--- /dev/null
+++ b/security/landlock/ruleset.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Ruleset management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/overflow.h>
+#include <linux/rbtree.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "limits.h"
+#include "object.h"
+#include "ruleset.h"
+
+static struct landlock_ruleset *create_ruleset(const u32 num_layers)
+{
+ struct landlock_ruleset *new_ruleset;
+
+ new_ruleset =
+ kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers),
+ GFP_KERNEL_ACCOUNT);
+ if (!new_ruleset)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&new_ruleset->usage, 1);
+ mutex_init(&new_ruleset->lock);
+ new_ruleset->root = RB_ROOT;
+ new_ruleset->num_layers = num_layers;
+ /*
+ * hierarchy = NULL
+ * num_rules = 0
+ * fs_access_masks[] = 0
+ */
+ return new_ruleset;
+}
+
+struct landlock_ruleset *
+landlock_create_ruleset(const access_mask_t fs_access_mask)
+{
+ struct landlock_ruleset *new_ruleset;
+
+ /* Informs about useless ruleset. */
+ if (!fs_access_mask)
+ return ERR_PTR(-ENOMSG);
+ new_ruleset = create_ruleset(1);
+ if (!IS_ERR(new_ruleset))
+ new_ruleset->fs_access_masks[0] = fs_access_mask;
+ return new_ruleset;
+}
+
+static void build_check_rule(void)
+{
+ const struct landlock_rule rule = {
+ .num_layers = ~0,
+ };
+
+ BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
+}
+
+static struct landlock_rule *
+create_rule(struct landlock_object *const object,
+ const struct landlock_layer (*const layers)[], const u32 num_layers,
+ const struct landlock_layer *const new_layer)
+{
+ struct landlock_rule *new_rule;
+ u32 new_num_layers;
+
+ build_check_rule();
+ if (new_layer) {
+ /* Should already be checked by landlock_merge_ruleset(). */
+ if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
+ return ERR_PTR(-E2BIG);
+ new_num_layers = num_layers + 1;
+ } else {
+ new_num_layers = num_layers;
+ }
+ new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
+ GFP_KERNEL_ACCOUNT);
+ if (!new_rule)
+ return ERR_PTR(-ENOMEM);
+ RB_CLEAR_NODE(&new_rule->node);
+ landlock_get_object(object);
+ new_rule->object = object;
+ new_rule->num_layers = new_num_layers;
+ /* Copies the original layer stack. */
+ memcpy(new_rule->layers, layers,
+ flex_array_size(new_rule, layers, num_layers));
+ if (new_layer)
+ /* Adds a copy of @new_layer on the layer stack. */
+ new_rule->layers[new_rule->num_layers - 1] = *new_layer;
+ return new_rule;
+}
+
+static void free_rule(struct landlock_rule *const rule)
+{
+ might_sleep();
+ if (!rule)
+ return;
+ landlock_put_object(rule->object);
+ kfree(rule);
+}
+
+static void build_check_ruleset(void)
+{
+ const struct landlock_ruleset ruleset = {
+ .num_rules = ~0,
+ .num_layers = ~0,
+ };
+ typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
+
+ BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
+ BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
+ BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
+}
+
+/**
+ * insert_rule - Create and insert a rule in a ruleset
+ *
+ * @ruleset: The ruleset to be updated.
+ * @object: The object to build the new rule with. The underlying kernel
+ * object must be held by the caller.
+ * @layers: One or multiple layers to be copied into the new rule.
+ * @num_layers: The number of @layers entries.
+ *
+ * When user space requests to add a new rule to a ruleset, @layers only
+ * contains one entry and this entry is not assigned to any level. In this
+ * case, the new rule will extend @ruleset, similarly to a boolean OR between
+ * access rights.
+ *
+ * When merging a ruleset in a domain, or copying a domain, @layers will be
+ * added to @ruleset as new constraints, similarly to a boolean AND between
+ * access rights.
+ */
+static int insert_rule(struct landlock_ruleset *const ruleset,
+ struct landlock_object *const object,
+ const struct landlock_layer (*const layers)[],
+ size_t num_layers)
+{
+ struct rb_node **walker_node;
+ struct rb_node *parent_node = NULL;
+ struct landlock_rule *new_rule;
+
+ might_sleep();
+ lockdep_assert_held(&ruleset->lock);
+ if (WARN_ON_ONCE(!object || !layers))
+ return -ENOENT;
+ walker_node = &(ruleset->root.rb_node);
+ while (*walker_node) {
+ struct landlock_rule *const this =
+ rb_entry(*walker_node, struct landlock_rule, node);
+
+ if (this->object != object) {
+ parent_node = *walker_node;
+ if (this->object < object)
+ walker_node = &((*walker_node)->rb_right);
+ else
+ walker_node = &((*walker_node)->rb_left);
+ continue;
+ }
+
+ /* Only a single-level layer should match an existing rule. */
+ if (WARN_ON_ONCE(num_layers != 1))
+ return -EINVAL;
+
+ /* If there is a matching rule, updates it. */
+ if ((*layers)[0].level == 0) {
+ /*
+ * Extends access rights when the request comes from
+ * landlock_add_rule(2), i.e. @ruleset is not a domain.
+ */
+ if (WARN_ON_ONCE(this->num_layers != 1))
+ return -EINVAL;
+ if (WARN_ON_ONCE(this->layers[0].level != 0))
+ return -EINVAL;
+ this->layers[0].access |= (*layers)[0].access;
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(this->layers[0].level == 0))
+ return -EINVAL;
+
+ /*
+ * Intersects access rights when it is a merge between a
+ * ruleset and a domain.
+ */
+ new_rule = create_rule(object, &this->layers, this->num_layers,
+ &(*layers)[0]);
+ if (IS_ERR(new_rule))
+ return PTR_ERR(new_rule);
+ rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
+ free_rule(this);
+ return 0;
+ }
+
+ /* There is no match for @object. */
+ build_check_ruleset();
+ if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
+ return -E2BIG;
+ new_rule = create_rule(object, layers, num_layers, NULL);
+ if (IS_ERR(new_rule))
+ return PTR_ERR(new_rule);
+ rb_link_node(&new_rule->node, parent_node, walker_node);
+ rb_insert_color(&new_rule->node, &ruleset->root);
+ ruleset->num_rules++;
+ return 0;
+}
+
+static void build_check_layer(void)
+{
+ const struct landlock_layer layer = {
+ .level = ~0,
+ .access = ~0,
+ };
+
+ BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
+ BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
+}
+
+/* @ruleset must be locked by the caller. */
+int landlock_insert_rule(struct landlock_ruleset *const ruleset,
+ struct landlock_object *const object,
+ const access_mask_t access)
+{
+ struct landlock_layer layers[] = { {
+ .access = access,
+ /* When @level is zero, insert_rule() extends @ruleset. */
+ .level = 0,
+ } };
+
+ build_check_layer();
+ return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
+}
+
+static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
+{
+ if (hierarchy)
+ refcount_inc(&hierarchy->usage);
+}
+
+static void put_hierarchy(struct landlock_hierarchy *hierarchy)
+{
+ while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
+ const struct landlock_hierarchy *const freeme = hierarchy;
+
+ hierarchy = hierarchy->parent;
+ kfree(freeme);
+ }
+}
+
+static int merge_ruleset(struct landlock_ruleset *const dst,
+ struct landlock_ruleset *const src)
+{
+ struct landlock_rule *walker_rule, *next_rule;
+ int err = 0;
+
+ might_sleep();
+ /* Should already be checked by landlock_merge_ruleset() */
+ if (WARN_ON_ONCE(!src))
+ return 0;
+ /* Only merge into a domain. */
+ if (WARN_ON_ONCE(!dst || !dst->hierarchy))
+ return -EINVAL;
+
+ /* Locks @dst first because we are its only owner. */
+ mutex_lock(&dst->lock);
+ mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
+
+ /* Stacks the new layer. */
+ if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
+
+ /* Merges the @src tree. */
+ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root,
+ node) {
+ struct landlock_layer layers[] = { {
+ .level = dst->num_layers,
+ } };
+
+ if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ layers[0].access = walker_rule->layers[0].access;
+ err = insert_rule(dst, walker_rule->object, &layers,
+ ARRAY_SIZE(layers));
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&src->lock);
+ mutex_unlock(&dst->lock);
+ return err;
+}
+
+static int inherit_ruleset(struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const child)
+{
+ struct landlock_rule *walker_rule, *next_rule;
+ int err = 0;
+
+ might_sleep();
+ if (!parent)
+ return 0;
+
+ /* Locks @child first because we are its only owner. */
+ mutex_lock(&child->lock);
+ mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
+
+ /* Copies the @parent tree. */
+ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
+ &parent->root, node) {
+ err = insert_rule(child, walker_rule->object,
+ &walker_rule->layers,
+ walker_rule->num_layers);
+ if (err)
+ goto out_unlock;
+ }
+
+ if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ /* Copies the parent layer stack and leaves a space for the new layer. */
+ memcpy(child->fs_access_masks, parent->fs_access_masks,
+ flex_array_size(parent, fs_access_masks, parent->num_layers));
+
+ if (WARN_ON_ONCE(!parent->hierarchy)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ get_hierarchy(parent->hierarchy);
+ child->hierarchy->parent = parent->hierarchy;
+
+out_unlock:
+ mutex_unlock(&parent->lock);
+ mutex_unlock(&child->lock);
+ return err;
+}
+
+static void free_ruleset(struct landlock_ruleset *const ruleset)
+{
+ struct landlock_rule *freeme, *next;
+
+ might_sleep();
+ rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node)
+ free_rule(freeme);
+ put_hierarchy(ruleset->hierarchy);
+ kfree(ruleset);
+}
+
+void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
+{
+ might_sleep();
+ if (ruleset && refcount_dec_and_test(&ruleset->usage))
+ free_ruleset(ruleset);
+}
+
+static void free_ruleset_work(struct work_struct *const work)
+{
+ struct landlock_ruleset *ruleset;
+
+ ruleset = container_of(work, struct landlock_ruleset, work_free);
+ free_ruleset(ruleset);
+}
+
+void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
+{
+ if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
+ INIT_WORK(&ruleset->work_free, free_ruleset_work);
+ schedule_work(&ruleset->work_free);
+ }
+}
+
+/**
+ * landlock_merge_ruleset - Merge a ruleset with a domain
+ *
+ * @parent: Parent domain.
+ * @ruleset: New ruleset to be merged.
+ *
+ * Returns the intersection of @parent and @ruleset, or returns @parent if
+ * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
+ */
+struct landlock_ruleset *
+landlock_merge_ruleset(struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const ruleset)
+{
+ struct landlock_ruleset *new_dom;
+ u32 num_layers;
+ int err;
+
+ might_sleep();
+ if (WARN_ON_ONCE(!ruleset || parent == ruleset))
+ return ERR_PTR(-EINVAL);
+
+ if (parent) {
+ if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
+ return ERR_PTR(-E2BIG);
+ num_layers = parent->num_layers + 1;
+ } else {
+ num_layers = 1;
+ }
+
+ /* Creates a new domain... */
+ new_dom = create_ruleset(num_layers);
+ if (IS_ERR(new_dom))
+ return new_dom;
+ new_dom->hierarchy =
+ kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
+ if (!new_dom->hierarchy) {
+ err = -ENOMEM;
+ goto out_put_dom;
+ }
+ refcount_set(&new_dom->hierarchy->usage, 1);
+
+ /* ...as a child of @parent... */
+ err = inherit_ruleset(parent, new_dom);
+ if (err)
+ goto out_put_dom;
+
+ /* ...and including @ruleset. */
+ err = merge_ruleset(new_dom, ruleset);
+ if (err)
+ goto out_put_dom;
+
+ return new_dom;
+
+out_put_dom:
+ landlock_put_ruleset(new_dom);
+ return ERR_PTR(err);
+}
+
+/*
+ * The returned access has the same lifetime as @ruleset.
+ */
+const struct landlock_rule *
+landlock_find_rule(const struct landlock_ruleset *const ruleset,
+ const struct landlock_object *const object)
+{
+ const struct rb_node *node;
+
+ if (!object)
+ return NULL;
+ node = ruleset->root.rb_node;
+ while (node) {
+ struct landlock_rule *this =
+ rb_entry(node, struct landlock_rule, node);
+
+ if (this->object == object)
+ return this;
+ if (this->object < object)
+ node = node->rb_right;
+ else
+ node = node->rb_left;
+ }
+ return NULL;
+}
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
new file mode 100644
index 000000000..d43231b78
--- /dev/null
+++ b/security/landlock/ruleset.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Ruleset management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_RULESET_H
+#define _SECURITY_LANDLOCK_RULESET_H
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
+
+#include "limits.h"
+#include "object.h"
+
+typedef u16 access_mask_t;
+/* Makes sure all filesystem access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
+/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
+static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
+
+typedef u16 layer_mask_t;
+/* Makes sure all layers can be checked. */
+static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS);
+
+/**
+ * struct landlock_layer - Access rights for a given layer
+ */
+struct landlock_layer {
+ /**
+ * @level: Position of this layer in the layer stack.
+ */
+ u16 level;
+ /**
+ * @access: Bitfield of allowed actions on the kernel object. They are
+ * relative to the object type (e.g. %LANDLOCK_ACTION_FS_READ).
+ */
+ access_mask_t access;
+};
+
+/**
+ * struct landlock_rule - Access rights tied to an object
+ */
+struct landlock_rule {
+ /**
+ * @node: Node in the ruleset's red-black tree.
+ */
+ struct rb_node node;
+ /**
+ * @object: Pointer to identify a kernel object (e.g. an inode). This
+ * is used as a key for this ruleset element. This pointer is set once
+ * and never modified. It always points to an allocated object because
+ * each rule increments the refcount of its object.
+ */
+ struct landlock_object *object;
+ /**
+ * @num_layers: Number of entries in @layers.
+ */
+ u32 num_layers;
+ /**
+ * @layers: Stack of layers, from the latest to the newest, implemented
+ * as a flexible array member (FAM).
+ */
+ struct landlock_layer layers[];
+};
+
+/**
+ * struct landlock_hierarchy - Node in a ruleset hierarchy
+ */
+struct landlock_hierarchy {
+ /**
+ * @parent: Pointer to the parent node, or NULL if it is a root
+ * Landlock domain.
+ */
+ struct landlock_hierarchy *parent;
+ /**
+ * @usage: Number of potential children domains plus their parent
+ * domain.
+ */
+ refcount_t usage;
+};
+
+/**
+ * struct landlock_ruleset - Landlock ruleset
+ *
+ * This data structure must contain unique entries, be updatable, and quick to
+ * match an object.
+ */
+struct landlock_ruleset {
+ /**
+ * @root: Root of a red-black tree containing &struct landlock_rule
+ * nodes. Once a ruleset is tied to a process (i.e. as a domain), this
+ * tree is immutable until @usage reaches zero.
+ */
+ struct rb_root root;
+ /**
+ * @hierarchy: Enables hierarchy identification even when a parent
+ * domain vanishes. This is needed for the ptrace protection.
+ */
+ struct landlock_hierarchy *hierarchy;
+ union {
+ /**
+ * @work_free: Enables to free a ruleset within a lockless
+ * section. This is only used by
+ * landlock_put_ruleset_deferred() when @usage reaches zero.
+ * The fields @lock, @usage, @num_rules, @num_layers and
+ * @fs_access_masks are then unused.
+ */
+ struct work_struct work_free;
+ struct {
+ /**
+ * @lock: Protects against concurrent modifications of
+ * @root, if @usage is greater than zero.
+ */
+ struct mutex lock;
+ /**
+ * @usage: Number of processes (i.e. domains) or file
+ * descriptors referencing this ruleset.
+ */
+ refcount_t usage;
+ /**
+ * @num_rules: Number of non-overlapping (i.e. not for
+ * the same object) rules in this ruleset.
+ */
+ u32 num_rules;
+ /**
+ * @num_layers: Number of layers that are used in this
+ * ruleset. This enables to check that all the layers
+ * allow an access request. A value of 0 identifies a
+ * non-merged ruleset (i.e. not a domain).
+ */
+ u32 num_layers;
+ /**
+ * @fs_access_masks: Contains the subset of filesystem
+ * actions that are restricted by a ruleset. A domain
+ * saves all layers of merged rulesets in a stack
+ * (FAM), starting from the first layer to the last
+ * one. These layers are used when merging rulesets,
+ * for user space backward compatibility (i.e.
+ * future-proof), and to properly handle merged
+ * rulesets without overlapping access rights. These
+ * layers are set once and never changed for the
+ * lifetime of the ruleset.
+ */
+ access_mask_t fs_access_masks[];
+ };
+ };
+};
+
+struct landlock_ruleset *
+landlock_create_ruleset(const access_mask_t fs_access_mask);
+
+void landlock_put_ruleset(struct landlock_ruleset *const ruleset);
+void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset);
+
+int landlock_insert_rule(struct landlock_ruleset *const ruleset,
+ struct landlock_object *const object,
+ const access_mask_t access);
+
+struct landlock_ruleset *
+landlock_merge_ruleset(struct landlock_ruleset *const parent,
+ struct landlock_ruleset *const ruleset);
+
+const struct landlock_rule *
+landlock_find_rule(const struct landlock_ruleset *const ruleset,
+ const struct landlock_object *const object);
+
+static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset)
+{
+ if (ruleset)
+ refcount_inc(&ruleset->usage);
+}
+
+#endif /* _SECURITY_LANDLOCK_RULESET_H */
diff --git a/security/landlock/setup.c b/security/landlock/setup.c
new file mode 100644
index 000000000..f8e8e9804
--- /dev/null
+++ b/security/landlock/setup.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - Security framework setup
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <linux/init.h>
+#include <linux/lsm_hooks.h>
+
+#include "common.h"
+#include "cred.h"
+#include "fs.h"
+#include "ptrace.h"
+#include "setup.h"
+
+bool landlock_initialized __lsm_ro_after_init = false;
+
+struct lsm_blob_sizes landlock_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct landlock_cred_security),
+ .lbs_inode = sizeof(struct landlock_inode_security),
+ .lbs_superblock = sizeof(struct landlock_superblock_security),
+};
+
+static int __init landlock_init(void)
+{
+ landlock_add_cred_hooks();
+ landlock_add_ptrace_hooks();
+ landlock_add_fs_hooks();
+ landlock_initialized = true;
+ pr_info("Up and running.\n");
+ return 0;
+}
+
+DEFINE_LSM(LANDLOCK_NAME) = {
+ .name = LANDLOCK_NAME,
+ .init = landlock_init,
+ .blobs = &landlock_blob_sizes,
+};
diff --git a/security/landlock/setup.h b/security/landlock/setup.h
new file mode 100644
index 000000000..1daffab1a
--- /dev/null
+++ b/security/landlock/setup.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Security framework setup
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#ifndef _SECURITY_LANDLOCK_SETUP_H
+#define _SECURITY_LANDLOCK_SETUP_H
+
+#include <linux/lsm_hooks.h>
+
+extern bool landlock_initialized;
+
+extern struct lsm_blob_sizes landlock_blob_sizes;
+
+#endif /* _SECURITY_LANDLOCK_SETUP_H */
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
new file mode 100644
index 000000000..2ca0ccbd9
--- /dev/null
+++ b/security/landlock/syscalls.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock LSM - System call implementations and user space interfaces
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ */
+
+#include <asm/current.h>
+#include <linux/anon_inodes.h>
+#include <linux/build_bug.h>
+#include <linux/capability.h>
+#include <linux/compiler_types.h>
+#include <linux/dcache.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/limits.h>
+#include <linux/mount.h>
+#include <linux/path.h>
+#include <linux/sched.h>
+#include <linux/security.h>
+#include <linux/stddef.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/landlock.h>
+
+#include "cred.h"
+#include "fs.h"
+#include "limits.h"
+#include "ruleset.h"
+#include "setup.h"
+
+/**
+ * copy_min_struct_from_user - Safe future-proof argument copying
+ *
+ * Extend copy_struct_from_user() to check for consistent user buffer.
+ *
+ * @dst: Kernel space pointer or NULL.
+ * @ksize: Actual size of the data pointed to by @dst.
+ * @ksize_min: Minimal required size to be copied.
+ * @src: User space pointer or NULL.
+ * @usize: (Alleged) size of the data pointed to by @src.
+ */
+static __always_inline int
+copy_min_struct_from_user(void *const dst, const size_t ksize,
+ const size_t ksize_min, const void __user *const src,
+ const size_t usize)
+{
+ /* Checks buffer inconsistencies. */
+ BUILD_BUG_ON(!dst);
+ if (!src)
+ return -EFAULT;
+
+ /* Checks size ranges. */
+ BUILD_BUG_ON(ksize <= 0);
+ BUILD_BUG_ON(ksize < ksize_min);
+ if (usize < ksize_min)
+ return -EINVAL;
+ if (usize > PAGE_SIZE)
+ return -E2BIG;
+
+ /* Copies user buffer and fills with zeros. */
+ return copy_struct_from_user(dst, ksize, src, usize);
+}
+
+/*
+ * This function only contains arithmetic operations with constants, leading to
+ * BUILD_BUG_ON(). The related code is evaluated and checked at build time,
+ * but it is then ignored thanks to compiler optimizations.
+ */
+static void build_check_abi(void)
+{
+ struct landlock_ruleset_attr ruleset_attr;
+ struct landlock_path_beneath_attr path_beneath_attr;
+ size_t ruleset_size, path_beneath_size;
+
+ /*
+ * For each user space ABI structures, first checks that there is no
+ * hole in them, then checks that all architectures have the same
+ * struct size.
+ */
+ ruleset_size = sizeof(ruleset_attr.handled_access_fs);
+ BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size);
+ BUILD_BUG_ON(sizeof(ruleset_attr) != 8);
+
+ path_beneath_size = sizeof(path_beneath_attr.allowed_access);
+ path_beneath_size += sizeof(path_beneath_attr.parent_fd);
+ BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size);
+ BUILD_BUG_ON(sizeof(path_beneath_attr) != 12);
+}
+
+/* Ruleset handling */
+
+static int fop_ruleset_release(struct inode *const inode,
+ struct file *const filp)
+{
+ struct landlock_ruleset *ruleset = filp->private_data;
+
+ landlock_put_ruleset(ruleset);
+ return 0;
+}
+
+static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf,
+ const size_t size, loff_t *const ppos)
+{
+ /* Dummy handler to enable FMODE_CAN_READ. */
+ return -EINVAL;
+}
+
+static ssize_t fop_dummy_write(struct file *const filp,
+ const char __user *const buf, const size_t size,
+ loff_t *const ppos)
+{
+ /* Dummy handler to enable FMODE_CAN_WRITE. */
+ return -EINVAL;
+}
+
+/*
+ * A ruleset file descriptor enables to build a ruleset by adding (i.e.
+ * writing) rule after rule, without relying on the task's context. This
+ * reentrant design is also used in a read way to enforce the ruleset on the
+ * current task.
+ */
+static const struct file_operations ruleset_fops = {
+ .release = fop_ruleset_release,
+ .read = fop_dummy_read,
+ .write = fop_dummy_write,
+};
+
+#define LANDLOCK_ABI_VERSION 2
+
+/**
+ * sys_landlock_create_ruleset - Create a new ruleset
+ *
+ * @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of
+ * the new ruleset.
+ * @size: Size of the pointed &struct landlock_ruleset_attr (needed for
+ * backward and forward compatibility).
+ * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION.
+ *
+ * This system call enables to create a new Landlock ruleset, and returns the
+ * related file descriptor on success.
+ *
+ * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is
+ * 0, then the returned value is the highest supported Landlock ABI version
+ * (starting at 1).
+ *
+ * Possible returned errors are:
+ *
+ * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - %EINVAL: unknown @flags, or unknown access, or too small @size;
+ * - %E2BIG or %EFAULT: @attr or @size inconsistencies;
+ * - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
+ */
+SYSCALL_DEFINE3(landlock_create_ruleset,
+ const struct landlock_ruleset_attr __user *const, attr,
+ const size_t, size, const __u32, flags)
+{
+ struct landlock_ruleset_attr ruleset_attr;
+ struct landlock_ruleset *ruleset;
+ int err, ruleset_fd;
+
+ /* Build-time checks. */
+ build_check_abi();
+
+ if (!landlock_initialized)
+ return -EOPNOTSUPP;
+
+ if (flags) {
+ if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr &&
+ !size)
+ return LANDLOCK_ABI_VERSION;
+ return -EINVAL;
+ }
+
+ /* Copies raw user space buffer. */
+ err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr),
+ offsetofend(typeof(ruleset_attr),
+ handled_access_fs),
+ attr, size);
+ if (err)
+ return err;
+
+ /* Checks content (and 32-bits cast). */
+ if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) !=
+ LANDLOCK_MASK_ACCESS_FS)
+ return -EINVAL;
+
+ /* Checks arguments and transforms to kernel struct. */
+ ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* Creates anonymous FD referring to the ruleset. */
+ ruleset_fd = anon_inode_getfd("[landlock-ruleset]", &ruleset_fops,
+ ruleset, O_RDWR | O_CLOEXEC);
+ if (ruleset_fd < 0)
+ landlock_put_ruleset(ruleset);
+ return ruleset_fd;
+}
+
+/*
+ * Returns an owned ruleset from a FD. It is thus needed to call
+ * landlock_put_ruleset() on the return value.
+ */
+static struct landlock_ruleset *get_ruleset_from_fd(const int fd,
+ const fmode_t mode)
+{
+ struct fd ruleset_f;
+ struct landlock_ruleset *ruleset;
+
+ ruleset_f = fdget(fd);
+ if (!ruleset_f.file)
+ return ERR_PTR(-EBADF);
+
+ /* Checks FD type and access right. */
+ if (ruleset_f.file->f_op != &ruleset_fops) {
+ ruleset = ERR_PTR(-EBADFD);
+ goto out_fdput;
+ }
+ if (!(ruleset_f.file->f_mode & mode)) {
+ ruleset = ERR_PTR(-EPERM);
+ goto out_fdput;
+ }
+ ruleset = ruleset_f.file->private_data;
+ if (WARN_ON_ONCE(ruleset->num_layers != 1)) {
+ ruleset = ERR_PTR(-EINVAL);
+ goto out_fdput;
+ }
+ landlock_get_ruleset(ruleset);
+
+out_fdput:
+ fdput(ruleset_f);
+ return ruleset;
+}
+
+/* Path handling */
+
+/*
+ * @path: Must call put_path(@path) after the call if it succeeded.
+ */
+static int get_path_from_fd(const s32 fd, struct path *const path)
+{
+ struct fd f;
+ int err = 0;
+
+ BUILD_BUG_ON(!__same_type(
+ fd, ((struct landlock_path_beneath_attr *)NULL)->parent_fd));
+
+ /* Handles O_PATH. */
+ f = fdget_raw(fd);
+ if (!f.file)
+ return -EBADF;
+ /*
+ * Forbids ruleset FDs, internal filesystems (e.g. nsfs), including
+ * pseudo filesystems that will never be mountable (e.g. sockfs,
+ * pipefs).
+ */
+ if ((f.file->f_op == &ruleset_fops) ||
+ (f.file->f_path.mnt->mnt_flags & MNT_INTERNAL) ||
+ (f.file->f_path.dentry->d_sb->s_flags & SB_NOUSER) ||
+ d_is_negative(f.file->f_path.dentry) ||
+ IS_PRIVATE(d_backing_inode(f.file->f_path.dentry))) {
+ err = -EBADFD;
+ goto out_fdput;
+ }
+ *path = f.file->f_path;
+ path_get(path);
+
+out_fdput:
+ fdput(f);
+ return err;
+}
+
+/**
+ * sys_landlock_add_rule - Add a new rule to a ruleset
+ *
+ * @ruleset_fd: File descriptor tied to the ruleset that should be extended
+ * with the new rule.
+ * @rule_type: Identify the structure type pointed to by @rule_attr (only
+ * %LANDLOCK_RULE_PATH_BENEATH for now).
+ * @rule_attr: Pointer to a rule (only of type &struct
+ * landlock_path_beneath_attr for now).
+ * @flags: Must be 0.
+ *
+ * This system call enables to define a new rule and add it to an existing
+ * ruleset.
+ *
+ * Possible returned errors are:
+ *
+ * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - %EINVAL: @flags is not 0, or inconsistent access in the rule (i.e.
+ * &landlock_path_beneath_attr.allowed_access is not a subset of the
+ * ruleset handled accesses);
+ * - %ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access);
+ * - %EBADF: @ruleset_fd is not a file descriptor for the current thread, or a
+ * member of @rule_attr is not a file descriptor as expected;
+ * - %EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of
+ * @rule_attr is not the expected file descriptor type;
+ * - %EPERM: @ruleset_fd has no write access to the underlying ruleset;
+ * - %EFAULT: @rule_attr inconsistency.
+ */
+SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
+ const enum landlock_rule_type, rule_type,
+ const void __user *const, rule_attr, const __u32, flags)
+{
+ struct landlock_path_beneath_attr path_beneath_attr;
+ struct path path;
+ struct landlock_ruleset *ruleset;
+ int res, err;
+
+ if (!landlock_initialized)
+ return -EOPNOTSUPP;
+
+ /* No flag for now. */
+ if (flags)
+ return -EINVAL;
+
+ /* Gets and checks the ruleset. */
+ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ if (rule_type != LANDLOCK_RULE_PATH_BENEATH) {
+ err = -EINVAL;
+ goto out_put_ruleset;
+ }
+
+ /* Copies raw user space buffer, only one type for now. */
+ res = copy_from_user(&path_beneath_attr, rule_attr,
+ sizeof(path_beneath_attr));
+ if (res) {
+ err = -EFAULT;
+ goto out_put_ruleset;
+ }
+
+ /*
+ * Informs about useless rule: empty allowed_access (i.e. deny rules)
+ * are ignored in path walks.
+ */
+ if (!path_beneath_attr.allowed_access) {
+ err = -ENOMSG;
+ goto out_put_ruleset;
+ }
+ /*
+ * Checks that allowed_access matches the @ruleset constraints
+ * (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits).
+ */
+ if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) !=
+ ruleset->fs_access_masks[0]) {
+ err = -EINVAL;
+ goto out_put_ruleset;
+ }
+
+ /* Gets and checks the new rule. */
+ err = get_path_from_fd(path_beneath_attr.parent_fd, &path);
+ if (err)
+ goto out_put_ruleset;
+
+ /* Imports the new rule. */
+ err = landlock_append_fs_rule(ruleset, &path,
+ path_beneath_attr.allowed_access);
+ path_put(&path);
+
+out_put_ruleset:
+ landlock_put_ruleset(ruleset);
+ return err;
+}
+
+/* Enforcement */
+
+/**
+ * sys_landlock_restrict_self - Enforce a ruleset on the calling thread
+ *
+ * @ruleset_fd: File descriptor tied to the ruleset to merge with the target.
+ * @flags: Must be 0.
+ *
+ * This system call enables to enforce a Landlock ruleset on the current
+ * thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its
+ * namespace or is running with no_new_privs. This avoids scenarios where
+ * unprivileged tasks can affect the behavior of privileged children.
+ *
+ * Possible returned errors are:
+ *
+ * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+ * - %EINVAL: @flags is not 0.
+ * - %EBADF: @ruleset_fd is not a file descriptor for the current thread;
+ * - %EBADFD: @ruleset_fd is not a ruleset file descriptor;
+ * - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the
+ * current thread is not running with no_new_privs, or it doesn't have
+ * %CAP_SYS_ADMIN in its namespace.
+ * - %E2BIG: The maximum number of stacked rulesets is reached for the current
+ * thread.
+ */
+SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
+ flags)
+{
+ struct landlock_ruleset *new_dom, *ruleset;
+ struct cred *new_cred;
+ struct landlock_cred_security *new_llcred;
+ int err;
+
+ if (!landlock_initialized)
+ return -EOPNOTSUPP;
+
+ /*
+ * Similar checks as for seccomp(2), except that an -EPERM may be
+ * returned.
+ */
+ if (!task_no_new_privs(current) &&
+ !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* No flag for now. */
+ if (flags)
+ return -EINVAL;
+
+ /* Gets and checks the ruleset. */
+ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* Prepares new credentials. */
+ new_cred = prepare_creds();
+ if (!new_cred) {
+ err = -ENOMEM;
+ goto out_put_ruleset;
+ }
+ new_llcred = landlock_cred(new_cred);
+
+ /*
+ * There is no possible race condition while copying and manipulating
+ * the current credentials because they are dedicated per thread.
+ */
+ new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset);
+ if (IS_ERR(new_dom)) {
+ err = PTR_ERR(new_dom);
+ goto out_put_creds;
+ }
+
+ /* Replaces the old (prepared) domain. */
+ landlock_put_ruleset(new_llcred->domain);
+ new_llcred->domain = new_dom;
+
+ landlock_put_ruleset(ruleset);
+ return commit_creds(new_cred);
+
+out_put_creds:
+ abort_creds(new_cred);
+
+out_put_ruleset:
+ landlock_put_ruleset(ruleset);
+ return err;
+}
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
new file mode 100644
index 000000000..6724eaba3
--- /dev/null
+++ b/security/loadpin/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_LOADPIN
+ bool "Pin load of kernel files (modules, fw, etc) to one filesystem"
+ depends on SECURITY && BLOCK
+ help
+ Any files read through the kernel file reading interface
+ (kernel modules, firmware, kexec images, security policy)
+ can be pinned to the first filesystem used for loading. When
+ enabled, any files that come from other filesystems will be
+ rejected. This is best used on systems without an initrd that
+ have a root filesystem backed by a read-only device such as
+ dm-verity or a CDROM.
+
+config SECURITY_LOADPIN_ENFORCE
+ bool "Enforce LoadPin at boot"
+ depends on SECURITY_LOADPIN
+ help
+ If selected, LoadPin will enforce pinning at boot. If not
+ selected, it can be enabled at boot with the kernel parameter
+ "loadpin.enforce=1".
+
+config SECURITY_LOADPIN_VERITY
+ bool "Allow reading files from certain other filesystems that use dm-verity"
+ depends on SECURITY_LOADPIN && DM_VERITY=y && SECURITYFS
+ help
+ If selected LoadPin can allow reading files from filesystems
+ that use dm-verity. LoadPin maintains a list of verity root
+ digests it considers trusted. A verity backed filesystem is
+ considered trusted if its root digest is found in the list
+ of trusted digests.
+
+ The list of trusted verity can be populated through an ioctl
+ on the LoadPin securityfs entry 'dm-verity'. The ioctl
+ expects a file descriptor of a file with verity digests as
+ parameter. The file must be located on the pinned root and
+ start with the line:
+
+ # LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS
+
+ This is followed by the verity digests, with one digest per
+ line.
diff --git a/security/loadpin/Makefile b/security/loadpin/Makefile
new file mode 100644
index 000000000..0ead1c310
--- /dev/null
+++ b/security/loadpin/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SECURITY_LOADPIN) += loadpin.o
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
new file mode 100644
index 000000000..110a5ab2b
--- /dev/null
+++ b/security/loadpin/loadpin.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Module and Firmware Pinning Security Module
+ *
+ * Copyright 2011-2016 Google Inc.
+ *
+ * Author: Kees Cook <keescook@chromium.org>
+ */
+
+#define pr_fmt(fmt) "LoadPin: " fmt
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel_read_file.h>
+#include <linux/lsm_hooks.h>
+#include <linux/mount.h>
+#include <linux/blkdev.h>
+#include <linux/path.h>
+#include <linux/sched.h> /* current */
+#include <linux/string_helpers.h>
+#include <linux/dm-verity-loadpin.h>
+#include <uapi/linux/loadpin.h>
+
+#define VERITY_DIGEST_FILE_HEADER "# LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS"
+
+static void report_load(const char *origin, struct file *file, char *operation)
+{
+ char *cmdline, *pathname;
+
+ pathname = kstrdup_quotable_file(file, GFP_KERNEL);
+ cmdline = kstrdup_quotable_cmdline(current, GFP_KERNEL);
+
+ pr_notice("%s %s obj=%s%s%s pid=%d cmdline=%s%s%s\n",
+ origin, operation,
+ (pathname && pathname[0] != '<') ? "\"" : "",
+ pathname,
+ (pathname && pathname[0] != '<') ? "\"" : "",
+ task_pid_nr(current),
+ cmdline ? "\"" : "", cmdline, cmdline ? "\"" : "");
+
+ kfree(cmdline);
+ kfree(pathname);
+}
+
+static int enforce = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE);
+static char *exclude_read_files[READING_MAX_ID];
+static int ignore_read_file_id[READING_MAX_ID] __ro_after_init;
+static struct super_block *pinned_root;
+static DEFINE_SPINLOCK(pinned_root_spinlock);
+#ifdef CONFIG_SECURITY_LOADPIN_VERITY
+static bool deny_reading_verity_digests;
+#endif
+
+#ifdef CONFIG_SYSCTL
+
+static struct ctl_path loadpin_sysctl_path[] = {
+ { .procname = "kernel", },
+ { .procname = "loadpin", },
+ { }
+};
+
+static struct ctl_table loadpin_sysctl_table[] = {
+ {
+ .procname = "enforce",
+ .data = &enforce,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+/*
+ * This must be called after early kernel init, since then the rootdev
+ * is available.
+ */
+static void check_pinning_enforcement(struct super_block *mnt_sb)
+{
+ bool ro = false;
+
+ /*
+ * If load pinning is not enforced via a read-only block
+ * device, allow sysctl to change modes for testing.
+ */
+ if (mnt_sb->s_bdev) {
+ ro = bdev_read_only(mnt_sb->s_bdev);
+ pr_info("%pg (%u:%u): %s\n", mnt_sb->s_bdev,
+ MAJOR(mnt_sb->s_bdev->bd_dev),
+ MINOR(mnt_sb->s_bdev->bd_dev),
+ ro ? "read-only" : "writable");
+ } else
+ pr_info("mnt_sb lacks block device, treating as: writable\n");
+
+ if (!ro) {
+ if (!register_sysctl_paths(loadpin_sysctl_path,
+ loadpin_sysctl_table))
+ pr_notice("sysctl registration failed!\n");
+ else
+ pr_info("enforcement can be disabled.\n");
+ } else
+ pr_info("load pinning engaged.\n");
+}
+#else
+static void check_pinning_enforcement(struct super_block *mnt_sb)
+{
+ pr_info("load pinning engaged.\n");
+}
+#endif
+
+static void loadpin_sb_free_security(struct super_block *mnt_sb)
+{
+ /*
+ * When unmounting the filesystem we were using for load
+ * pinning, we acknowledge the superblock release, but make sure
+ * no other modules or firmware can be loaded.
+ */
+ if (!IS_ERR_OR_NULL(pinned_root) && mnt_sb == pinned_root) {
+ pinned_root = ERR_PTR(-EIO);
+ pr_info("umount pinned fs: refusing further loads\n");
+ }
+}
+
+static int loadpin_check(struct file *file, enum kernel_read_file_id id)
+{
+ struct super_block *load_root;
+ const char *origin = kernel_read_file_id_str(id);
+
+ /* If the file id is excluded, ignore the pinning. */
+ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) &&
+ ignore_read_file_id[id]) {
+ report_load(origin, file, "pinning-excluded");
+ return 0;
+ }
+
+ /* This handles the older init_module API that has a NULL file. */
+ if (!file) {
+ if (!enforce) {
+ report_load(origin, NULL, "old-api-pinning-ignored");
+ return 0;
+ }
+
+ report_load(origin, NULL, "old-api-denied");
+ return -EPERM;
+ }
+
+ load_root = file->f_path.mnt->mnt_sb;
+
+ /* First loaded module/firmware defines the root for all others. */
+ spin_lock(&pinned_root_spinlock);
+ /*
+ * pinned_root is only NULL at startup. Otherwise, it is either
+ * a valid reference, or an ERR_PTR.
+ */
+ if (!pinned_root) {
+ pinned_root = load_root;
+ /*
+ * Unlock now since it's only pinned_root we care about.
+ * In the worst case, we will (correctly) report pinning
+ * failures before we have announced that pinning is
+ * enforcing. This would be purely cosmetic.
+ */
+ spin_unlock(&pinned_root_spinlock);
+ check_pinning_enforcement(pinned_root);
+ report_load(origin, file, "pinned");
+ } else {
+ spin_unlock(&pinned_root_spinlock);
+ }
+
+ if (IS_ERR_OR_NULL(pinned_root) ||
+ ((load_root != pinned_root) && !dm_verity_loadpin_is_bdev_trusted(load_root->s_bdev))) {
+ if (unlikely(!enforce)) {
+ report_load(origin, file, "pinning-ignored");
+ return 0;
+ }
+
+ report_load(origin, file, "denied");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents)
+{
+ /*
+ * LoadPin only cares about the _origin_ of a file, not its
+ * contents, so we can ignore the "are full contents available"
+ * argument here.
+ */
+ return loadpin_check(file, id);
+}
+
+static int loadpin_load_data(enum kernel_load_data_id id, bool contents)
+{
+ /*
+ * LoadPin only cares about the _origin_ of a file, not its
+ * contents, so a NULL file is passed, and we can ignore the
+ * state of "contents".
+ */
+ return loadpin_check(NULL, (enum kernel_read_file_id) id);
+}
+
+static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(sb_free_security, loadpin_sb_free_security),
+ LSM_HOOK_INIT(kernel_read_file, loadpin_read_file),
+ LSM_HOOK_INIT(kernel_load_data, loadpin_load_data),
+};
+
+static void __init parse_exclude(void)
+{
+ int i, j;
+ char *cur;
+
+ /*
+ * Make sure all the arrays stay within expected sizes. This
+ * is slightly weird because kernel_read_file_str[] includes
+ * READING_MAX_ID, which isn't actually meaningful here.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(exclude_read_files) !=
+ ARRAY_SIZE(ignore_read_file_id));
+ BUILD_BUG_ON(ARRAY_SIZE(kernel_read_file_str) <
+ ARRAY_SIZE(ignore_read_file_id));
+
+ for (i = 0; i < ARRAY_SIZE(exclude_read_files); i++) {
+ cur = exclude_read_files[i];
+ if (!cur)
+ break;
+ if (*cur == '\0')
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(ignore_read_file_id); j++) {
+ if (strcmp(cur, kernel_read_file_str[j]) == 0) {
+ pr_info("excluding: %s\n",
+ kernel_read_file_str[j]);
+ ignore_read_file_id[j] = 1;
+ /*
+ * Can not break, because one read_file_str
+ * may map to more than on read_file_id.
+ */
+ }
+ }
+ }
+}
+
+static int __init loadpin_init(void)
+{
+ pr_info("ready to pin (currently %senforcing)\n",
+ enforce ? "" : "not ");
+ parse_exclude();
+ security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
+
+ return 0;
+}
+
+DEFINE_LSM(loadpin) = {
+ .name = "loadpin",
+ .init = loadpin_init,
+};
+
+#ifdef CONFIG_SECURITY_LOADPIN_VERITY
+
+enum loadpin_securityfs_interface_index {
+ LOADPIN_DM_VERITY,
+};
+
+static int read_trusted_verity_root_digests(unsigned int fd)
+{
+ struct fd f;
+ void *data;
+ int rc;
+ char *p, *d;
+
+ if (deny_reading_verity_digests)
+ return -EPERM;
+
+ /* The list of trusted root digests can only be set up once */
+ if (!list_empty(&dm_verity_loadpin_trusted_root_digests))
+ return -EPERM;
+
+ f = fdget(fd);
+ if (!f.file)
+ return -EINVAL;
+
+ data = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = kernel_read_file(f.file, 0, (void **)&data, SZ_4K - 1, NULL, READING_POLICY);
+ if (rc < 0)
+ goto err;
+
+ p = data;
+ p[rc] = '\0';
+ p = strim(p);
+
+ p = strim(data);
+ while ((d = strsep(&p, "\n")) != NULL) {
+ int len;
+ struct dm_verity_loadpin_trusted_root_digest *trd;
+
+ if (d == data) {
+ /* first line, validate header */
+ if (strcmp(d, VERITY_DIGEST_FILE_HEADER)) {
+ rc = -EPROTO;
+ goto err;
+ }
+
+ continue;
+ }
+
+ len = strlen(d);
+
+ if (len % 2) {
+ rc = -EPROTO;
+ goto err;
+ }
+
+ len /= 2;
+
+ trd = kzalloc(struct_size(trd, data, len), GFP_KERNEL);
+ if (!trd) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (hex2bin(trd->data, d, len)) {
+ kfree(trd);
+ rc = -EPROTO;
+ goto err;
+ }
+
+ trd->len = len;
+
+ list_add_tail(&trd->node, &dm_verity_loadpin_trusted_root_digests);
+ }
+
+ if (list_empty(&dm_verity_loadpin_trusted_root_digests)) {
+ rc = -EPROTO;
+ goto err;
+ }
+
+ kfree(data);
+ fdput(f);
+
+ return 0;
+
+err:
+ kfree(data);
+
+ /* any failure in loading/parsing invalidates the entire list */
+ {
+ struct dm_verity_loadpin_trusted_root_digest *trd, *tmp;
+
+ list_for_each_entry_safe(trd, tmp, &dm_verity_loadpin_trusted_root_digests, node) {
+ list_del(&trd->node);
+ kfree(trd);
+ }
+ }
+
+ /* disallow further attempts after reading a corrupt/invalid file */
+ deny_reading_verity_digests = true;
+
+ fdput(f);
+
+ return rc;
+}
+
+/******************************** securityfs ********************************/
+
+static long dm_verity_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ void __user *uarg = (void __user *)arg;
+ unsigned int fd;
+
+ switch (cmd) {
+ case LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS:
+ if (copy_from_user(&fd, uarg, sizeof(fd)))
+ return -EFAULT;
+
+ return read_trusted_verity_root_digests(fd);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations loadpin_dm_verity_ops = {
+ .unlocked_ioctl = dm_verity_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+/**
+ * init_loadpin_securityfs - create the securityfs directory for LoadPin
+ *
+ * We can not put this method normally under the loadpin_init() code path since
+ * the security subsystem gets initialized before the vfs caches.
+ *
+ * Returns 0 if the securityfs directory creation was successful.
+ */
+static int __init init_loadpin_securityfs(void)
+{
+ struct dentry *loadpin_dir, *dentry;
+
+ loadpin_dir = securityfs_create_dir("loadpin", NULL);
+ if (IS_ERR(loadpin_dir)) {
+ pr_err("LoadPin: could not create securityfs dir: %ld\n",
+ PTR_ERR(loadpin_dir));
+ return PTR_ERR(loadpin_dir);
+ }
+
+ dentry = securityfs_create_file("dm-verity", 0600, loadpin_dir,
+ (void *)LOADPIN_DM_VERITY, &loadpin_dm_verity_ops);
+ if (IS_ERR(dentry)) {
+ pr_err("LoadPin: could not create securityfs entry 'dm-verity': %ld\n",
+ PTR_ERR(dentry));
+ return PTR_ERR(dentry);
+ }
+
+ return 0;
+}
+
+fs_initcall(init_loadpin_securityfs);
+
+#endif /* CONFIG_SECURITY_LOADPIN_VERITY */
+
+/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
+module_param(enforce, int, 0);
+MODULE_PARM_DESC(enforce, "Enforce module/firmware pinning");
+module_param_array_named(exclude, exclude_read_files, charp, NULL, 0);
+MODULE_PARM_DESC(exclude, "Exclude pinning specific read file types");
diff --git a/security/lockdown/Kconfig b/security/lockdown/Kconfig
new file mode 100644
index 000000000..e84ddf484
--- /dev/null
+++ b/security/lockdown/Kconfig
@@ -0,0 +1,47 @@
+config SECURITY_LOCKDOWN_LSM
+ bool "Basic module for enforcing kernel lockdown"
+ depends on SECURITY
+ select MODULE_SIG if MODULES
+ help
+ Build support for an LSM that enforces a coarse kernel lockdown
+ behaviour.
+
+config SECURITY_LOCKDOWN_LSM_EARLY
+ bool "Enable lockdown LSM early in init"
+ depends on SECURITY_LOCKDOWN_LSM
+ help
+ Enable the lockdown LSM early in boot. This is necessary in order
+ to ensure that lockdown enforcement can be carried out on kernel
+ boot parameters that are otherwise parsed before the security
+ subsystem is fully initialised. If enabled, lockdown will
+ unconditionally be called before any other LSMs.
+
+choice
+ prompt "Kernel default lockdown mode"
+ default LOCK_DOWN_KERNEL_FORCE_NONE
+ depends on SECURITY_LOCKDOWN_LSM
+ help
+ The kernel can be configured to default to differing levels of
+ lockdown.
+
+config LOCK_DOWN_KERNEL_FORCE_NONE
+ bool "None"
+ help
+ No lockdown functionality is enabled by default. Lockdown may be
+ enabled via the kernel commandline or /sys/kernel/security/lockdown.
+
+config LOCK_DOWN_KERNEL_FORCE_INTEGRITY
+ bool "Integrity"
+ help
+ The kernel runs in integrity mode by default. Features that allow
+ the kernel to be modified at runtime are disabled.
+
+config LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY
+ bool "Confidentiality"
+ help
+ The kernel runs in confidentiality mode by default. Features that
+ allow the kernel to be modified at runtime or that permit userland
+ code to read confidential material held inside the kernel are
+ disabled.
+
+endchoice
diff --git a/security/lockdown/Makefile b/security/lockdown/Makefile
new file mode 100644
index 000000000..e3634b901
--- /dev/null
+++ b/security/lockdown/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown.o
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
new file mode 100644
index 000000000..a79b985e9
--- /dev/null
+++ b/security/lockdown/lockdown.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Lock down the kernel
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/security.h>
+#include <linux/export.h>
+#include <linux/lsm_hooks.h>
+
+static enum lockdown_reason kernel_locked_down;
+
+static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE,
+ LOCKDOWN_INTEGRITY_MAX,
+ LOCKDOWN_CONFIDENTIALITY_MAX};
+
+/*
+ * Put the kernel into lock-down mode.
+ */
+static int lock_kernel_down(const char *where, enum lockdown_reason level)
+{
+ if (kernel_locked_down >= level)
+ return -EPERM;
+
+ kernel_locked_down = level;
+ pr_notice("Kernel is locked down from %s; see man kernel_lockdown.7\n",
+ where);
+ return 0;
+}
+
+static int __init lockdown_param(char *level)
+{
+ if (!level)
+ return -EINVAL;
+
+ if (strcmp(level, "integrity") == 0)
+ lock_kernel_down("command line", LOCKDOWN_INTEGRITY_MAX);
+ else if (strcmp(level, "confidentiality") == 0)
+ lock_kernel_down("command line", LOCKDOWN_CONFIDENTIALITY_MAX);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+early_param("lockdown", lockdown_param);
+
+/**
+ * lockdown_is_locked_down - Find out if the kernel is locked down
+ * @what: Tag to use in notice generated if lockdown is in effect
+ */
+static int lockdown_is_locked_down(enum lockdown_reason what)
+{
+ if (WARN(what >= LOCKDOWN_CONFIDENTIALITY_MAX,
+ "Invalid lockdown reason"))
+ return -EPERM;
+
+ if (kernel_locked_down >= what) {
+ if (lockdown_reasons[what])
+ pr_notice_ratelimited("Lockdown: %s: %s is restricted; see man kernel_lockdown.7\n",
+ current->comm, lockdown_reasons[what]);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static struct security_hook_list lockdown_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(locked_down, lockdown_is_locked_down),
+};
+
+static int __init lockdown_lsm_init(void)
+{
+#if defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY)
+ lock_kernel_down("Kernel configuration", LOCKDOWN_INTEGRITY_MAX);
+#elif defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY)
+ lock_kernel_down("Kernel configuration", LOCKDOWN_CONFIDENTIALITY_MAX);
+#endif
+ security_add_hooks(lockdown_hooks, ARRAY_SIZE(lockdown_hooks),
+ "lockdown");
+ return 0;
+}
+
+static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ char temp[80];
+ int i, offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) {
+ enum lockdown_reason level = lockdown_levels[i];
+
+ if (lockdown_reasons[level]) {
+ const char *label = lockdown_reasons[level];
+
+ if (kernel_locked_down == level)
+ offset += sprintf(temp+offset, "[%s] ", label);
+ else
+ offset += sprintf(temp+offset, "%s ", label);
+ }
+ }
+
+ /* Convert the last space to a newline if needed. */
+ if (offset > 0)
+ temp[offset-1] = '\n';
+
+ return simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+}
+
+static ssize_t lockdown_write(struct file *file, const char __user *buf,
+ size_t n, loff_t *ppos)
+{
+ char *state;
+ int i, len, err = -EINVAL;
+
+ state = memdup_user_nul(buf, n);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ len = strlen(state);
+ if (len && state[len-1] == '\n') {
+ state[len-1] = '\0';
+ len--;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) {
+ enum lockdown_reason level = lockdown_levels[i];
+ const char *label = lockdown_reasons[level];
+
+ if (label && !strcmp(state, label))
+ err = lock_kernel_down("securityfs", level);
+ }
+
+ kfree(state);
+ return err ? err : n;
+}
+
+static const struct file_operations lockdown_ops = {
+ .read = lockdown_read,
+ .write = lockdown_write,
+};
+
+static int __init lockdown_secfs_init(void)
+{
+ struct dentry *dentry;
+
+ dentry = securityfs_create_file("lockdown", 0644, NULL, NULL,
+ &lockdown_ops);
+ return PTR_ERR_OR_ZERO(dentry);
+}
+
+core_initcall(lockdown_secfs_init);
+
+#ifdef CONFIG_SECURITY_LOCKDOWN_LSM_EARLY
+DEFINE_EARLY_LSM(lockdown) = {
+#else
+DEFINE_LSM(lockdown) = {
+#endif
+ .name = "lockdown",
+ .init = lockdown_lsm_init,
+};
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
new file mode 100644
index 000000000..75cc3f8d2
--- /dev/null
+++ b/security/lsm_audit.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * common LSM auditing functions
+ *
+ * Based on code written for SELinux by :
+ * Stephen Smalley, <sds@tycho.nsa.gov>
+ * James Morris <jmorris@redhat.com>
+ * Author : Etienne Basset, <etienne.basset@ensta.org>
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <net/sock.h>
+#include <linux/un.h>
+#include <net/af_unix.h>
+#include <linux/audit.h>
+#include <linux/ipv6.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/sctp.h>
+#include <linux/lsm_audit.h>
+#include <linux/security.h>
+
+/**
+ * ipv4_skb_to_auditdata : fill auditdata from skb
+ * @skb : the skb
+ * @ad : the audit data to fill
+ * @proto : the layer 4 protocol
+ *
+ * return 0 on success
+ */
+int ipv4_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto)
+{
+ int ret = 0;
+ struct iphdr *ih;
+
+ ih = ip_hdr(skb);
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
+
+ if (proto)
+ *proto = ih->protocol;
+ /* non initial fragment */
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ return 0;
+
+ switch (ih->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr *th = tcp_hdr(skb);
+
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr *uh = udp_hdr(skb);
+
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
+ break;
+ }
+ case IPPROTO_DCCP: {
+ struct dccp_hdr *dh = dccp_hdr(skb);
+
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
+ break;
+ }
+ case IPPROTO_SCTP: {
+ struct sctphdr *sh = sctp_hdr(skb);
+
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * ipv6_skb_to_auditdata : fill auditdata from skb
+ * @skb : the skb
+ * @ad : the audit data to fill
+ * @proto : the layer 4 protocol
+ *
+ * return 0 on success
+ */
+int ipv6_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto)
+{
+ int offset, ret = 0;
+ struct ipv6hdr *ip6;
+ u8 nexthdr;
+ __be16 frag_off;
+
+ ip6 = ipv6_hdr(skb);
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
+ /* IPv6 can have several extension header before the Transport header
+ * skip them */
+ offset = skb_network_offset(skb);
+ offset += sizeof(*ip6);
+ nexthdr = ip6->nexthdr;
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+ if (offset < 0)
+ return 0;
+ if (proto)
+ *proto = nexthdr;
+ switch (nexthdr) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph, *th;
+
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ break;
+
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr _udph, *uh;
+
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh == NULL)
+ break;
+
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
+ break;
+ }
+ case IPPROTO_DCCP: {
+ struct dccp_hdr _dccph, *dh;
+
+ dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+ if (dh == NULL)
+ break;
+
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
+ break;
+ }
+ case IPPROTO_SCTP: {
+ struct sctphdr _sctph, *sh;
+
+ sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ break;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+#endif
+
+
+static inline void print_ipv6_addr(struct audit_buffer *ab,
+ const struct in6_addr *addr, __be16 port,
+ char *name1, char *name2)
+{
+ if (!ipv6_addr_any(addr))
+ audit_log_format(ab, " %s=%pI6c", name1, addr);
+ if (port)
+ audit_log_format(ab, " %s=%d", name2, ntohs(port));
+}
+
+static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
+ __be16 port, char *name1, char *name2)
+{
+ if (addr)
+ audit_log_format(ab, " %s=%pI4", name1, &addr);
+ if (port)
+ audit_log_format(ab, " %s=%d", name2, ntohs(port));
+}
+
+/**
+ * dump_common_audit_data - helper to dump common audit data
+ * @a : common audit data
+ *
+ */
+static void dump_common_audit_data(struct audit_buffer *ab,
+ struct common_audit_data *a)
+{
+ char comm[sizeof(current->comm)];
+
+ /*
+ * To keep stack sizes in check force programers to notice if they
+ * start making this union too large! See struct lsm_network_audit
+ * as an example of how to deal with large data.
+ */
+ BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
+
+ audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
+ audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
+
+ switch (a->type) {
+ case LSM_AUDIT_DATA_NONE:
+ return;
+ case LSM_AUDIT_DATA_IPC:
+ audit_log_format(ab, " ipc_key=%d ", a->u.ipc_id);
+ break;
+ case LSM_AUDIT_DATA_CAP:
+ audit_log_format(ab, " capability=%d ", a->u.cap);
+ break;
+ case LSM_AUDIT_DATA_PATH: {
+ struct inode *inode;
+
+ audit_log_d_path(ab, " path=", &a->u.path);
+
+ inode = d_backing_inode(a->u.path.dentry);
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ break;
+ }
+ case LSM_AUDIT_DATA_FILE: {
+ struct inode *inode;
+
+ audit_log_d_path(ab, " path=", &a->u.file->f_path);
+
+ inode = file_inode(a->u.file);
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ break;
+ }
+ case LSM_AUDIT_DATA_IOCTL_OP: {
+ struct inode *inode;
+
+ audit_log_d_path(ab, " path=", &a->u.op->path);
+
+ inode = a->u.op->path.dentry->d_inode;
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+
+ audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd);
+ break;
+ }
+ case LSM_AUDIT_DATA_DENTRY: {
+ struct inode *inode;
+
+ audit_log_format(ab, " name=");
+ spin_lock(&a->u.dentry->d_lock);
+ audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+ spin_unlock(&a->u.dentry->d_lock);
+
+ inode = d_backing_inode(a->u.dentry);
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ break;
+ }
+ case LSM_AUDIT_DATA_INODE: {
+ struct dentry *dentry;
+ struct inode *inode;
+
+ rcu_read_lock();
+ inode = a->u.inode;
+ dentry = d_find_alias_rcu(inode);
+ if (dentry) {
+ audit_log_format(ab, " name=");
+ spin_lock(&dentry->d_lock);
+ audit_log_untrustedstring(ab, dentry->d_name.name);
+ spin_unlock(&dentry->d_lock);
+ }
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ rcu_read_unlock();
+ break;
+ }
+ case LSM_AUDIT_DATA_TASK: {
+ struct task_struct *tsk = a->u.tsk;
+ if (tsk) {
+ pid_t pid = task_tgid_nr(tsk);
+ if (pid) {
+ char comm[sizeof(tsk->comm)];
+ audit_log_format(ab, " opid=%d ocomm=", pid);
+ audit_log_untrustedstring(ab,
+ memcpy(comm, tsk->comm, sizeof(comm)));
+ }
+ }
+ break;
+ }
+ case LSM_AUDIT_DATA_NET:
+ if (a->u.net->sk) {
+ const struct sock *sk = a->u.net->sk;
+ struct unix_sock *u;
+ struct unix_address *addr;
+ int len = 0;
+ char *p = NULL;
+
+ switch (sk->sk_family) {
+ case AF_INET: {
+ struct inet_sock *inet = inet_sk(sk);
+
+ print_ipv4_addr(ab, inet->inet_rcv_saddr,
+ inet->inet_sport,
+ "laddr", "lport");
+ print_ipv4_addr(ab, inet->inet_daddr,
+ inet->inet_dport,
+ "faddr", "fport");
+ break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6: {
+ struct inet_sock *inet = inet_sk(sk);
+
+ print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
+ inet->inet_sport,
+ "laddr", "lport");
+ print_ipv6_addr(ab, &sk->sk_v6_daddr,
+ inet->inet_dport,
+ "faddr", "fport");
+ break;
+ }
+#endif
+ case AF_UNIX:
+ u = unix_sk(sk);
+ addr = smp_load_acquire(&u->addr);
+ if (!addr)
+ break;
+ if (u->path.dentry) {
+ audit_log_d_path(ab, " path=", &u->path);
+ break;
+ }
+ len = addr->len-sizeof(short);
+ p = &addr->name->sun_path[0];
+ audit_log_format(ab, " path=");
+ if (*p)
+ audit_log_untrustedstring(ab, p);
+ else
+ audit_log_n_hex(ab, p, len);
+ break;
+ }
+ }
+
+ switch (a->u.net->family) {
+ case AF_INET:
+ print_ipv4_addr(ab, a->u.net->v4info.saddr,
+ a->u.net->sport,
+ "saddr", "src");
+ print_ipv4_addr(ab, a->u.net->v4info.daddr,
+ a->u.net->dport,
+ "daddr", "dest");
+ break;
+ case AF_INET6:
+ print_ipv6_addr(ab, &a->u.net->v6info.saddr,
+ a->u.net->sport,
+ "saddr", "src");
+ print_ipv6_addr(ab, &a->u.net->v6info.daddr,
+ a->u.net->dport,
+ "daddr", "dest");
+ break;
+ }
+ if (a->u.net->netif > 0) {
+ struct net_device *dev;
+
+ /* NOTE: we always use init's namespace */
+ dev = dev_get_by_index(&init_net, a->u.net->netif);
+ if (dev) {
+ audit_log_format(ab, " netif=%s", dev->name);
+ dev_put(dev);
+ }
+ }
+ break;
+#ifdef CONFIG_KEYS
+ case LSM_AUDIT_DATA_KEY:
+ audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
+ if (a->u.key_struct.key_desc) {
+ audit_log_format(ab, " key_desc=");
+ audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
+ }
+ break;
+#endif
+ case LSM_AUDIT_DATA_KMOD:
+ audit_log_format(ab, " kmod=");
+ audit_log_untrustedstring(ab, a->u.kmod_name);
+ break;
+ case LSM_AUDIT_DATA_IBPKEY: {
+ struct in6_addr sbn_pfx;
+
+ memset(&sbn_pfx.s6_addr, 0,
+ sizeof(sbn_pfx.s6_addr));
+ memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix,
+ sizeof(a->u.ibpkey->subnet_prefix));
+ audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c",
+ a->u.ibpkey->pkey, &sbn_pfx);
+ break;
+ }
+ case LSM_AUDIT_DATA_IBENDPORT:
+ audit_log_format(ab, " device=%s port_num=%u",
+ a->u.ibendport->dev_name,
+ a->u.ibendport->port);
+ break;
+ case LSM_AUDIT_DATA_LOCKDOWN:
+ audit_log_format(ab, " lockdown_reason=\"%s\"",
+ lockdown_reasons[a->u.reason]);
+ break;
+ case LSM_AUDIT_DATA_ANONINODE:
+ audit_log_format(ab, " anonclass=%s", a->u.anonclass);
+ break;
+ } /* switch (a->type) */
+}
+
+/**
+ * common_lsm_audit - generic LSM auditing function
+ * @a: auxiliary audit data
+ * @pre_audit: lsm-specific pre-audit callback
+ * @post_audit: lsm-specific post-audit callback
+ *
+ * setup the audit buffer for common security information
+ * uses callback to print LSM specific information
+ */
+void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
+{
+ struct audit_buffer *ab;
+
+ if (a == NULL)
+ return;
+ /* we use GFP_ATOMIC so we won't sleep */
+ ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN,
+ AUDIT_AVC);
+
+ if (ab == NULL)
+ return;
+
+ if (pre_audit)
+ pre_audit(ab, a);
+
+ dump_common_audit_data(ab, a);
+
+ if (post_audit)
+ post_audit(ab, a);
+
+ audit_log_end(ab);
+}
diff --git a/security/min_addr.c b/security/min_addr.c
new file mode 100644
index 000000000..88c9a6a21
--- /dev/null
+++ b/security/min_addr.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/security.h>
+#include <linux/sysctl.h>
+
+/* amount of vm to protect from userspace access by both DAC and the LSM*/
+unsigned long mmap_min_addr;
+/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
+unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
+/* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */
+
+/*
+ * Update mmap_min_addr = max(dac_mmap_min_addr, CONFIG_LSM_MMAP_MIN_ADDR)
+ */
+static void update_mmap_min_addr(void)
+{
+#ifdef CONFIG_LSM_MMAP_MIN_ADDR
+ if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
+ mmap_min_addr = dac_mmap_min_addr;
+ else
+ mmap_min_addr = CONFIG_LSM_MMAP_MIN_ADDR;
+#else
+ mmap_min_addr = dac_mmap_min_addr;
+#endif
+}
+
+/*
+ * sysctl handler which just sets dac_mmap_min_addr = the new value and then
+ * calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
+ */
+int mmap_min_addr_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ if (write && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+
+ update_mmap_min_addr();
+
+ return ret;
+}
+
+static int __init init_mmap_min_addr(void)
+{
+ update_mmap_min_addr();
+
+ return 0;
+}
+pure_initcall(init_mmap_min_addr);
diff --git a/security/safesetid/Kconfig b/security/safesetid/Kconfig
new file mode 100644
index 000000000..18b5fb904
--- /dev/null
+++ b/security/safesetid/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_SAFESETID
+ bool "Gate setid transitions to limit CAP_SET{U/G}ID capabilities"
+ depends on SECURITY
+ select SECURITYFS
+ default n
+ help
+ SafeSetID is an LSM module that gates the setid family of syscalls to
+ restrict UID/GID transitions from a given UID/GID to only those
+ approved by a system-wide whitelist. These restrictions also prohibit
+ the given UIDs/GIDs from obtaining auxiliary privileges associated
+ with CAP_SET{U/G}ID, such as allowing a user to set up user namespace
+ UID mappings.
+
+ If you are unsure how to answer this question, answer N.
diff --git a/security/safesetid/Makefile b/security/safesetid/Makefile
new file mode 100644
index 000000000..6b0660321
--- /dev/null
+++ b/security/safesetid/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the safesetid LSM.
+#
+
+obj-$(CONFIG_SECURITY_SAFESETID) := safesetid.o
+safesetid-y := lsm.o securityfs.o
diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c
new file mode 100644
index 000000000..e806739f7
--- /dev/null
+++ b/security/safesetid/lsm.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SafeSetID Linux Security Module
+ *
+ * Author: Micah Morton <mortonm@chromium.org>
+ *
+ * Copyright (C) 2018 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "SafeSetID: " fmt
+
+#include <linux/lsm_hooks.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include "lsm.h"
+
+/* Flag indicating whether initialization completed */
+int safesetid_initialized __initdata;
+
+struct setid_ruleset __rcu *safesetid_setuid_rules;
+struct setid_ruleset __rcu *safesetid_setgid_rules;
+
+
+/* Compute a decision for a transition from @src to @dst under @policy. */
+enum sid_policy_type _setid_policy_lookup(struct setid_ruleset *policy,
+ kid_t src, kid_t dst)
+{
+ struct setid_rule *rule;
+ enum sid_policy_type result = SIDPOL_DEFAULT;
+
+ if (policy->type == UID) {
+ hash_for_each_possible(policy->rules, rule, next, __kuid_val(src.uid)) {
+ if (!uid_eq(rule->src_id.uid, src.uid))
+ continue;
+ if (uid_eq(rule->dst_id.uid, dst.uid))
+ return SIDPOL_ALLOWED;
+ result = SIDPOL_CONSTRAINED;
+ }
+ } else if (policy->type == GID) {
+ hash_for_each_possible(policy->rules, rule, next, __kgid_val(src.gid)) {
+ if (!gid_eq(rule->src_id.gid, src.gid))
+ continue;
+ if (gid_eq(rule->dst_id.gid, dst.gid)){
+ return SIDPOL_ALLOWED;
+ }
+ result = SIDPOL_CONSTRAINED;
+ }
+ } else {
+ /* Should not reach here, report the ID as contrainsted */
+ result = SIDPOL_CONSTRAINED;
+ }
+ return result;
+}
+
+/*
+ * Compute a decision for a transition from @src to @dst under the active
+ * policy.
+ */
+static enum sid_policy_type setid_policy_lookup(kid_t src, kid_t dst, enum setid_type new_type)
+{
+ enum sid_policy_type result = SIDPOL_DEFAULT;
+ struct setid_ruleset *pol;
+
+ rcu_read_lock();
+ if (new_type == UID)
+ pol = rcu_dereference(safesetid_setuid_rules);
+ else if (new_type == GID)
+ pol = rcu_dereference(safesetid_setgid_rules);
+ else { /* Should not reach here */
+ result = SIDPOL_CONSTRAINED;
+ rcu_read_unlock();
+ return result;
+ }
+
+ if (pol) {
+ pol->type = new_type;
+ result = _setid_policy_lookup(pol, src, dst);
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+static int safesetid_security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
+{
+ /* We're only interested in CAP_SETUID and CAP_SETGID. */
+ if (cap != CAP_SETUID && cap != CAP_SETGID)
+ return 0;
+
+ /*
+ * If CAP_SET{U/G}ID is currently used for a setid or setgroups syscall, we
+ * want to let it go through here; the real security check happens later, in
+ * the task_fix_set{u/g}id or task_fix_setgroups hooks.
+ */
+ if ((opts & CAP_OPT_INSETID) != 0)
+ return 0;
+
+ switch (cap) {
+ case CAP_SETUID:
+ /*
+ * If no policy applies to this task, allow the use of CAP_SETUID for
+ * other purposes.
+ */
+ if (setid_policy_lookup((kid_t){.uid = cred->uid}, INVALID_ID, UID) == SIDPOL_DEFAULT)
+ return 0;
+ /*
+ * Reject use of CAP_SETUID for functionality other than calling
+ * set*uid() (e.g. setting up userns uid mappings).
+ */
+ pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions\n",
+ __kuid_val(cred->uid));
+ return -EPERM;
+ case CAP_SETGID:
+ /*
+ * If no policy applies to this task, allow the use of CAP_SETGID for
+ * other purposes.
+ */
+ if (setid_policy_lookup((kid_t){.gid = cred->gid}, INVALID_ID, GID) == SIDPOL_DEFAULT)
+ return 0;
+ /*
+ * Reject use of CAP_SETUID for functionality other than calling
+ * set*gid() (e.g. setting up userns gid mappings).
+ */
+ pr_warn("Operation requires CAP_SETGID, which is not available to GID %u for operations besides approved set*gid transitions\n",
+ __kuid_val(cred->uid));
+ return -EPERM;
+ default:
+ /* Error, the only capabilities were checking for is CAP_SETUID/GID */
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Check whether a caller with old credentials @old is allowed to switch to
+ * credentials that contain @new_id.
+ */
+static bool id_permitted_for_cred(const struct cred *old, kid_t new_id, enum setid_type new_type)
+{
+ bool permitted;
+
+ /* If our old creds already had this ID in it, it's fine. */
+ if (new_type == UID) {
+ if (uid_eq(new_id.uid, old->uid) || uid_eq(new_id.uid, old->euid) ||
+ uid_eq(new_id.uid, old->suid))
+ return true;
+ } else if (new_type == GID){
+ if (gid_eq(new_id.gid, old->gid) || gid_eq(new_id.gid, old->egid) ||
+ gid_eq(new_id.gid, old->sgid))
+ return true;
+ } else /* Error, new_type is an invalid type */
+ return false;
+
+ /*
+ * Transitions to new UIDs require a check against the policy of the old
+ * RUID.
+ */
+ permitted =
+ setid_policy_lookup((kid_t){.uid = old->uid}, new_id, new_type) != SIDPOL_CONSTRAINED;
+
+ if (!permitted) {
+ if (new_type == UID) {
+ pr_warn("UID transition ((%d,%d,%d) -> %d) blocked\n",
+ __kuid_val(old->uid), __kuid_val(old->euid),
+ __kuid_val(old->suid), __kuid_val(new_id.uid));
+ } else if (new_type == GID) {
+ pr_warn("GID transition ((%d,%d,%d) -> %d) blocked\n",
+ __kgid_val(old->gid), __kgid_val(old->egid),
+ __kgid_val(old->sgid), __kgid_val(new_id.gid));
+ } else /* Error, new_type is an invalid type */
+ return false;
+ }
+ return permitted;
+}
+
+/*
+ * Check whether there is either an exception for user under old cred struct to
+ * set*uid to user under new cred struct, or the UID transition is allowed (by
+ * Linux set*uid rules) even without CAP_SETUID.
+ */
+static int safesetid_task_fix_setuid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+
+ /* Do nothing if there are no setuid restrictions for our old RUID. */
+ if (setid_policy_lookup((kid_t){.uid = old->uid}, INVALID_ID, UID) == SIDPOL_DEFAULT)
+ return 0;
+
+ if (id_permitted_for_cred(old, (kid_t){.uid = new->uid}, UID) &&
+ id_permitted_for_cred(old, (kid_t){.uid = new->euid}, UID) &&
+ id_permitted_for_cred(old, (kid_t){.uid = new->suid}, UID) &&
+ id_permitted_for_cred(old, (kid_t){.uid = new->fsuid}, UID))
+ return 0;
+
+ /*
+ * Kill this process to avoid potential security vulnerabilities
+ * that could arise from a missing allowlist entry preventing a
+ * privileged process from dropping to a lesser-privileged one.
+ */
+ force_sig(SIGKILL);
+ return -EACCES;
+}
+
+static int safesetid_task_fix_setgid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+
+ /* Do nothing if there are no setgid restrictions for our old RGID. */
+ if (setid_policy_lookup((kid_t){.gid = old->gid}, INVALID_ID, GID) == SIDPOL_DEFAULT)
+ return 0;
+
+ if (id_permitted_for_cred(old, (kid_t){.gid = new->gid}, GID) &&
+ id_permitted_for_cred(old, (kid_t){.gid = new->egid}, GID) &&
+ id_permitted_for_cred(old, (kid_t){.gid = new->sgid}, GID) &&
+ id_permitted_for_cred(old, (kid_t){.gid = new->fsgid}, GID))
+ return 0;
+
+ /*
+ * Kill this process to avoid potential security vulnerabilities
+ * that could arise from a missing allowlist entry preventing a
+ * privileged process from dropping to a lesser-privileged one.
+ */
+ force_sig(SIGKILL);
+ return -EACCES;
+}
+
+static int safesetid_task_fix_setgroups(struct cred *new, const struct cred *old)
+{
+ int i;
+
+ /* Do nothing if there are no setgid restrictions for our old RGID. */
+ if (setid_policy_lookup((kid_t){.gid = old->gid}, INVALID_ID, GID) == SIDPOL_DEFAULT)
+ return 0;
+
+ get_group_info(new->group_info);
+ for (i = 0; i < new->group_info->ngroups; i++) {
+ if (!id_permitted_for_cred(old, (kid_t){.gid = new->group_info->gid[i]}, GID)) {
+ put_group_info(new->group_info);
+ /*
+ * Kill this process to avoid potential security vulnerabilities
+ * that could arise from a missing allowlist entry preventing a
+ * privileged process from dropping to a lesser-privileged one.
+ */
+ force_sig(SIGKILL);
+ return -EACCES;
+ }
+ }
+
+ put_group_info(new->group_info);
+ return 0;
+}
+
+static struct security_hook_list safesetid_security_hooks[] = {
+ LSM_HOOK_INIT(task_fix_setuid, safesetid_task_fix_setuid),
+ LSM_HOOK_INIT(task_fix_setgid, safesetid_task_fix_setgid),
+ LSM_HOOK_INIT(task_fix_setgroups, safesetid_task_fix_setgroups),
+ LSM_HOOK_INIT(capable, safesetid_security_capable)
+};
+
+static int __init safesetid_security_init(void)
+{
+ security_add_hooks(safesetid_security_hooks,
+ ARRAY_SIZE(safesetid_security_hooks), "safesetid");
+
+ /* Report that SafeSetID successfully initialized */
+ safesetid_initialized = 1;
+
+ return 0;
+}
+
+DEFINE_LSM(safesetid_security_init) = {
+ .init = safesetid_security_init,
+ .name = "safesetid",
+};
diff --git a/security/safesetid/lsm.h b/security/safesetid/lsm.h
new file mode 100644
index 000000000..d346f4849
--- /dev/null
+++ b/security/safesetid/lsm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SafeSetID Linux Security Module
+ *
+ * Author: Micah Morton <mortonm@chromium.org>
+ *
+ * Copyright (C) 2018 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef _SAFESETID_H
+#define _SAFESETID_H
+
+#include <linux/types.h>
+#include <linux/uidgid.h>
+#include <linux/hashtable.h>
+
+/* Flag indicating whether initialization completed */
+extern int safesetid_initialized __initdata;
+
+enum sid_policy_type {
+ SIDPOL_DEFAULT, /* source ID is unaffected by policy */
+ SIDPOL_CONSTRAINED, /* source ID is affected by policy */
+ SIDPOL_ALLOWED /* target ID explicitly allowed */
+};
+
+typedef union {
+ kuid_t uid;
+ kgid_t gid;
+} kid_t;
+
+enum setid_type {
+ UID,
+ GID
+};
+
+/*
+ * Hash table entry to store safesetid policy signifying that 'src_id'
+ * can set*id to 'dst_id'.
+ */
+struct setid_rule {
+ struct hlist_node next;
+ kid_t src_id;
+ kid_t dst_id;
+
+ /* Flag to signal if rule is for UID's or GID's */
+ enum setid_type type;
+};
+
+#define SETID_HASH_BITS 8 /* 256 buckets in hash table */
+
+/* Extension of INVALID_UID/INVALID_GID for kid_t type */
+#define INVALID_ID (kid_t){.uid = INVALID_UID}
+
+struct setid_ruleset {
+ DECLARE_HASHTABLE(rules, SETID_HASH_BITS);
+ char *policy_str;
+ struct rcu_head rcu;
+
+ //Flag to signal if ruleset is for UID's or GID's
+ enum setid_type type;
+};
+
+enum sid_policy_type _setid_policy_lookup(struct setid_ruleset *policy,
+ kid_t src, kid_t dst);
+
+extern struct setid_ruleset __rcu *safesetid_setuid_rules;
+extern struct setid_ruleset __rcu *safesetid_setgid_rules;
+
+#endif /* _SAFESETID_H */
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
new file mode 100644
index 000000000..25310468b
--- /dev/null
+++ b/security/safesetid/securityfs.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SafeSetID Linux Security Module
+ *
+ * Author: Micah Morton <mortonm@chromium.org>
+ *
+ * Copyright (C) 2018 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "SafeSetID: " fmt
+
+#include <linux/security.h>
+#include <linux/cred.h>
+
+#include "lsm.h"
+
+static DEFINE_MUTEX(uid_policy_update_lock);
+static DEFINE_MUTEX(gid_policy_update_lock);
+
+/*
+ * In the case the input buffer contains one or more invalid IDs, the kid_t
+ * variables pointed to by @parent and @child will get updated but this
+ * function will return an error.
+ * Contents of @buf may be modified.
+ */
+static int parse_policy_line(struct file *file, char *buf,
+ struct setid_rule *rule)
+{
+ char *child_str;
+ int ret;
+ u32 parsed_parent, parsed_child;
+
+ /* Format of |buf| string should be <UID>:<UID> or <GID>:<GID> */
+ child_str = strchr(buf, ':');
+ if (child_str == NULL)
+ return -EINVAL;
+ *child_str = '\0';
+ child_str++;
+
+ ret = kstrtou32(buf, 0, &parsed_parent);
+ if (ret)
+ return ret;
+
+ ret = kstrtou32(child_str, 0, &parsed_child);
+ if (ret)
+ return ret;
+
+ if (rule->type == UID){
+ rule->src_id.uid = make_kuid(file->f_cred->user_ns, parsed_parent);
+ rule->dst_id.uid = make_kuid(file->f_cred->user_ns, parsed_child);
+ if (!uid_valid(rule->src_id.uid) || !uid_valid(rule->dst_id.uid))
+ return -EINVAL;
+ } else if (rule->type == GID){
+ rule->src_id.gid = make_kgid(file->f_cred->user_ns, parsed_parent);
+ rule->dst_id.gid = make_kgid(file->f_cred->user_ns, parsed_child);
+ if (!gid_valid(rule->src_id.gid) || !gid_valid(rule->dst_id.gid))
+ return -EINVAL;
+ } else {
+ /* Error, rule->type is an invalid type */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void __release_ruleset(struct rcu_head *rcu)
+{
+ struct setid_ruleset *pol =
+ container_of(rcu, struct setid_ruleset, rcu);
+ int bucket;
+ struct setid_rule *rule;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(pol->rules, bucket, tmp, rule, next)
+ kfree(rule);
+ kfree(pol->policy_str);
+ kfree(pol);
+}
+
+static void release_ruleset(struct setid_ruleset *pol){
+ call_rcu(&pol->rcu, __release_ruleset);
+}
+
+static void insert_rule(struct setid_ruleset *pol, struct setid_rule *rule)
+{
+ if (pol->type == UID)
+ hash_add(pol->rules, &rule->next, __kuid_val(rule->src_id.uid));
+ else if (pol->type == GID)
+ hash_add(pol->rules, &rule->next, __kgid_val(rule->src_id.gid));
+ else /* Error, pol->type is neither UID or GID */
+ return;
+}
+
+static int verify_ruleset(struct setid_ruleset *pol)
+{
+ int bucket;
+ struct setid_rule *rule, *nrule;
+ int res = 0;
+
+ hash_for_each(pol->rules, bucket, rule, next) {
+ if (_setid_policy_lookup(pol, rule->dst_id, INVALID_ID) == SIDPOL_DEFAULT) {
+ if (pol->type == UID) {
+ pr_warn("insecure policy detected: uid %d is constrained but transitively unconstrained through uid %d\n",
+ __kuid_val(rule->src_id.uid),
+ __kuid_val(rule->dst_id.uid));
+ } else if (pol->type == GID) {
+ pr_warn("insecure policy detected: gid %d is constrained but transitively unconstrained through gid %d\n",
+ __kgid_val(rule->src_id.gid),
+ __kgid_val(rule->dst_id.gid));
+ } else { /* pol->type is an invalid type */
+ res = -EINVAL;
+ return res;
+ }
+ res = -EINVAL;
+
+ /* fix it up */
+ nrule = kmalloc(sizeof(struct setid_rule), GFP_KERNEL);
+ if (!nrule)
+ return -ENOMEM;
+ if (pol->type == UID){
+ nrule->src_id.uid = rule->dst_id.uid;
+ nrule->dst_id.uid = rule->dst_id.uid;
+ nrule->type = UID;
+ } else { /* pol->type must be GID if we've made it to here */
+ nrule->src_id.gid = rule->dst_id.gid;
+ nrule->dst_id.gid = rule->dst_id.gid;
+ nrule->type = GID;
+ }
+ insert_rule(pol, nrule);
+ }
+ }
+ return res;
+}
+
+static ssize_t handle_policy_update(struct file *file,
+ const char __user *ubuf, size_t len, enum setid_type policy_type)
+{
+ struct setid_ruleset *pol;
+ char *buf, *p, *end;
+ int err;
+
+ pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL);
+ if (!pol)
+ return -ENOMEM;
+ pol->policy_str = NULL;
+ pol->type = policy_type;
+ hash_init(pol->rules);
+
+ p = buf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(buf)) {
+ err = PTR_ERR(buf);
+ goto out_free_pol;
+ }
+ pol->policy_str = kstrdup(buf, GFP_KERNEL);
+ if (pol->policy_str == NULL) {
+ err = -ENOMEM;
+ goto out_free_buf;
+ }
+
+ /* policy lines, including the last one, end with \n */
+ while (*p != '\0') {
+ struct setid_rule *rule;
+
+ end = strchr(p, '\n');
+ if (end == NULL) {
+ err = -EINVAL;
+ goto out_free_buf;
+ }
+ *end = '\0';
+
+ rule = kmalloc(sizeof(struct setid_rule), GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto out_free_buf;
+ }
+
+ rule->type = policy_type;
+ err = parse_policy_line(file, p, rule);
+ if (err)
+ goto out_free_rule;
+
+ if (_setid_policy_lookup(pol, rule->src_id, rule->dst_id) == SIDPOL_ALLOWED) {
+ pr_warn("bad policy: duplicate entry\n");
+ err = -EEXIST;
+ goto out_free_rule;
+ }
+
+ insert_rule(pol, rule);
+ p = end + 1;
+ continue;
+
+out_free_rule:
+ kfree(rule);
+ goto out_free_buf;
+ }
+
+ err = verify_ruleset(pol);
+ /* bogus policy falls through after fixing it up */
+ if (err && err != -EINVAL)
+ goto out_free_buf;
+
+ /*
+ * Everything looks good, apply the policy and release the old one.
+ * What we really want here is an xchg() wrapper for RCU, but since that
+ * doesn't currently exist, just use a spinlock for now.
+ */
+ if (policy_type == UID) {
+ mutex_lock(&uid_policy_update_lock);
+ pol = rcu_replace_pointer(safesetid_setuid_rules, pol,
+ lockdep_is_held(&uid_policy_update_lock));
+ mutex_unlock(&uid_policy_update_lock);
+ } else if (policy_type == GID) {
+ mutex_lock(&gid_policy_update_lock);
+ pol = rcu_replace_pointer(safesetid_setgid_rules, pol,
+ lockdep_is_held(&gid_policy_update_lock));
+ mutex_unlock(&gid_policy_update_lock);
+ } else {
+ /* Error, policy type is neither UID or GID */
+ pr_warn("error: bad policy type");
+ }
+ err = len;
+
+out_free_buf:
+ kfree(buf);
+out_free_pol:
+ if (pol)
+ release_ruleset(pol);
+ return err;
+}
+
+static ssize_t safesetid_uid_file_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ if (!file_ns_capable(file, &init_user_ns, CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ return handle_policy_update(file, buf, len, UID);
+}
+
+static ssize_t safesetid_gid_file_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ if (!file_ns_capable(file, &init_user_ns, CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ return handle_policy_update(file, buf, len, GID);
+}
+
+static ssize_t safesetid_file_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos, struct mutex *policy_update_lock, struct __rcu setid_ruleset* ruleset)
+{
+ ssize_t res = 0;
+ struct setid_ruleset *pol;
+ const char *kbuf;
+
+ mutex_lock(policy_update_lock);
+ pol = rcu_dereference_protected(ruleset, lockdep_is_held(policy_update_lock));
+ if (pol) {
+ kbuf = pol->policy_str;
+ res = simple_read_from_buffer(buf, len, ppos,
+ kbuf, strlen(kbuf));
+ }
+ mutex_unlock(policy_update_lock);
+
+ return res;
+}
+
+static ssize_t safesetid_uid_file_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return safesetid_file_read(file, buf, len, ppos,
+ &uid_policy_update_lock, safesetid_setuid_rules);
+}
+
+static ssize_t safesetid_gid_file_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return safesetid_file_read(file, buf, len, ppos,
+ &gid_policy_update_lock, safesetid_setgid_rules);
+}
+
+
+
+static const struct file_operations safesetid_uid_file_fops = {
+ .read = safesetid_uid_file_read,
+ .write = safesetid_uid_file_write,
+};
+
+static const struct file_operations safesetid_gid_file_fops = {
+ .read = safesetid_gid_file_read,
+ .write = safesetid_gid_file_write,
+};
+
+static int __init safesetid_init_securityfs(void)
+{
+ int ret;
+ struct dentry *policy_dir;
+ struct dentry *uid_policy_file;
+ struct dentry *gid_policy_file;
+
+ if (!safesetid_initialized)
+ return 0;
+
+ policy_dir = securityfs_create_dir("safesetid", NULL);
+ if (IS_ERR(policy_dir)) {
+ ret = PTR_ERR(policy_dir);
+ goto error;
+ }
+
+ uid_policy_file = securityfs_create_file("uid_allowlist_policy", 0600,
+ policy_dir, NULL, &safesetid_uid_file_fops);
+ if (IS_ERR(uid_policy_file)) {
+ ret = PTR_ERR(uid_policy_file);
+ goto error;
+ }
+
+ gid_policy_file = securityfs_create_file("gid_allowlist_policy", 0600,
+ policy_dir, NULL, &safesetid_gid_file_fops);
+ if (IS_ERR(gid_policy_file)) {
+ ret = PTR_ERR(gid_policy_file);
+ goto error;
+ }
+
+
+ return 0;
+
+error:
+ securityfs_remove(policy_dir);
+ return ret;
+}
+fs_initcall(safesetid_init_securityfs);
diff --git a/security/security.c b/security/security.c
new file mode 100644
index 000000000..fc15b963e
--- /dev/null
+++ b/security/security.c
@@ -0,0 +1,2707 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Security plug functions
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2016 Mellanox Technologies
+ */
+
+#define pr_fmt(fmt) "LSM: " fmt
+
+#include <linux/bpf.h>
+#include <linux/capability.h>
+#include <linux/dcache.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kernel_read_file.h>
+#include <linux/lsm_hooks.h>
+#include <linux/integrity.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+#include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
+#include <linux/backing-dev.h>
+#include <linux/string.h>
+#include <linux/msg.h>
+#include <net/flow.h>
+
+#define MAX_LSM_EVM_XATTR 2
+
+/* How many LSMs were built into the kernel? */
+#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
+
+/*
+ * These are descriptions of the reasons that can be passed to the
+ * security_locked_down() LSM hook. Placing this array here allows
+ * all security modules to use the same descriptions for auditing
+ * purposes.
+ */
+const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+ [LOCKDOWN_NONE] = "none",
+ [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
+ [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+ [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
+ [LOCKDOWN_KEXEC] = "kexec of unsigned images",
+ [LOCKDOWN_HIBERNATION] = "hibernation",
+ [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
+ [LOCKDOWN_IOPORT] = "raw io port access",
+ [LOCKDOWN_MSR] = "raw MSR access",
+ [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
+ [LOCKDOWN_DEVICE_TREE] = "modifying device tree contents",
+ [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
+ [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
+ [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
+ [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
+ [LOCKDOWN_DEBUGFS] = "debugfs access",
+ [LOCKDOWN_XMON_WR] = "xmon write access",
+ [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
+ [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
+ [LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection",
+ [LOCKDOWN_INTEGRITY_MAX] = "integrity",
+ [LOCKDOWN_KCORE] = "/proc/kcore access",
+ [LOCKDOWN_KPROBES] = "use of kprobes",
+ [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
+ [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
+ [LOCKDOWN_PERF] = "unsafe use of perf",
+ [LOCKDOWN_TRACEFS] = "use of tracefs",
+ [LOCKDOWN_XMON_RW] = "xmon read and write access",
+ [LOCKDOWN_XFRM_SECRET] = "xfrm SA secret",
+ [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
+};
+
+struct security_hook_heads security_hook_heads __lsm_ro_after_init;
+static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
+
+static struct kmem_cache *lsm_file_cache;
+static struct kmem_cache *lsm_inode_cache;
+
+char *lsm_names;
+static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;
+
+/* Boot-time LSM user choice */
+static __initdata const char *chosen_lsm_order;
+static __initdata const char *chosen_major_lsm;
+
+static __initconst const char * const builtin_lsm_order = CONFIG_LSM;
+
+/* Ordered list of LSMs to initialize. */
+static __initdata struct lsm_info **ordered_lsms;
+static __initdata struct lsm_info *exclusive;
+
+static __initdata bool debug;
+#define init_debug(...) \
+ do { \
+ if (debug) \
+ pr_info(__VA_ARGS__); \
+ } while (0)
+
+static bool __init is_enabled(struct lsm_info *lsm)
+{
+ if (!lsm->enabled)
+ return false;
+
+ return *lsm->enabled;
+}
+
+/* Mark an LSM's enabled flag. */
+static int lsm_enabled_true __initdata = 1;
+static int lsm_enabled_false __initdata = 0;
+static void __init set_enabled(struct lsm_info *lsm, bool enabled)
+{
+ /*
+ * When an LSM hasn't configured an enable variable, we can use
+ * a hard-coded location for storing the default enabled state.
+ */
+ if (!lsm->enabled) {
+ if (enabled)
+ lsm->enabled = &lsm_enabled_true;
+ else
+ lsm->enabled = &lsm_enabled_false;
+ } else if (lsm->enabled == &lsm_enabled_true) {
+ if (!enabled)
+ lsm->enabled = &lsm_enabled_false;
+ } else if (lsm->enabled == &lsm_enabled_false) {
+ if (enabled)
+ lsm->enabled = &lsm_enabled_true;
+ } else {
+ *lsm->enabled = enabled;
+ }
+}
+
+/* Is an LSM already listed in the ordered LSMs list? */
+static bool __init exists_ordered_lsm(struct lsm_info *lsm)
+{
+ struct lsm_info **check;
+
+ for (check = ordered_lsms; *check; check++)
+ if (*check == lsm)
+ return true;
+
+ return false;
+}
+
+/* Append an LSM to the list of ordered LSMs to initialize. */
+static int last_lsm __initdata;
+static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
+{
+ /* Ignore duplicate selections. */
+ if (exists_ordered_lsm(lsm))
+ return;
+
+ if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
+ return;
+
+ /* Enable this LSM, if it is not already set. */
+ if (!lsm->enabled)
+ lsm->enabled = &lsm_enabled_true;
+ ordered_lsms[last_lsm++] = lsm;
+
+ init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
+ is_enabled(lsm) ? "en" : "dis");
+}
+
+/* Is an LSM allowed to be initialized? */
+static bool __init lsm_allowed(struct lsm_info *lsm)
+{
+ /* Skip if the LSM is disabled. */
+ if (!is_enabled(lsm))
+ return false;
+
+ /* Not allowed if another exclusive LSM already initialized. */
+ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
+ init_debug("exclusive disabled: %s\n", lsm->name);
+ return false;
+ }
+
+ return true;
+}
+
+static void __init lsm_set_blob_size(int *need, int *lbs)
+{
+ int offset;
+
+ if (*need > 0) {
+ offset = *lbs;
+ *lbs += *need;
+ *need = offset;
+ }
+}
+
+static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
+{
+ if (!needed)
+ return;
+
+ lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
+ lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
+ /*
+ * The inode blob gets an rcu_head in addition to
+ * what the modules might need.
+ */
+ if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
+ blob_sizes.lbs_inode = sizeof(struct rcu_head);
+ lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
+ lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
+ lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
+ lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
+ lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
+}
+
+/* Prepare LSM for initialization. */
+static void __init prepare_lsm(struct lsm_info *lsm)
+{
+ int enabled = lsm_allowed(lsm);
+
+ /* Record enablement (to handle any following exclusive LSMs). */
+ set_enabled(lsm, enabled);
+
+ /* If enabled, do pre-initialization work. */
+ if (enabled) {
+ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
+ exclusive = lsm;
+ init_debug("exclusive chosen: %s\n", lsm->name);
+ }
+
+ lsm_set_blob_sizes(lsm->blobs);
+ }
+}
+
+/* Initialize a given LSM, if it is enabled. */
+static void __init initialize_lsm(struct lsm_info *lsm)
+{
+ if (is_enabled(lsm)) {
+ int ret;
+
+ init_debug("initializing %s\n", lsm->name);
+ ret = lsm->init();
+ WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
+ }
+}
+
+/* Populate ordered LSMs list from comma-separated LSM name list. */
+static void __init ordered_lsm_parse(const char *order, const char *origin)
+{
+ struct lsm_info *lsm;
+ char *sep, *name, *next;
+
+ /* LSM_ORDER_FIRST is always first. */
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (lsm->order == LSM_ORDER_FIRST)
+ append_ordered_lsm(lsm, "first");
+ }
+
+ /* Process "security=", if given. */
+ if (chosen_major_lsm) {
+ struct lsm_info *major;
+
+ /*
+ * To match the original "security=" behavior, this
+ * explicitly does NOT fallback to another Legacy Major
+ * if the selected one was separately disabled: disable
+ * all non-matching Legacy Major LSMs.
+ */
+ for (major = __start_lsm_info; major < __end_lsm_info;
+ major++) {
+ if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
+ strcmp(major->name, chosen_major_lsm) != 0) {
+ set_enabled(major, false);
+ init_debug("security=%s disabled: %s\n",
+ chosen_major_lsm, major->name);
+ }
+ }
+ }
+
+ sep = kstrdup(order, GFP_KERNEL);
+ next = sep;
+ /* Walk the list, looking for matching LSMs. */
+ while ((name = strsep(&next, ",")) != NULL) {
+ bool found = false;
+
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (lsm->order == LSM_ORDER_MUTABLE &&
+ strcmp(lsm->name, name) == 0) {
+ append_ordered_lsm(lsm, origin);
+ found = true;
+ }
+ }
+
+ if (!found)
+ init_debug("%s ignored: %s\n", origin, name);
+ }
+
+ /* Process "security=", if given. */
+ if (chosen_major_lsm) {
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (exists_ordered_lsm(lsm))
+ continue;
+ if (strcmp(lsm->name, chosen_major_lsm) == 0)
+ append_ordered_lsm(lsm, "security=");
+ }
+ }
+
+ /* Disable all LSMs not in the ordered list. */
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (exists_ordered_lsm(lsm))
+ continue;
+ set_enabled(lsm, false);
+ init_debug("%s disabled: %s\n", origin, lsm->name);
+ }
+
+ kfree(sep);
+}
+
+static void __init lsm_early_cred(struct cred *cred);
+static void __init lsm_early_task(struct task_struct *task);
+
+static int lsm_append(const char *new, char **result);
+
+static void __init ordered_lsm_init(void)
+{
+ struct lsm_info **lsm;
+
+ ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
+ GFP_KERNEL);
+
+ if (chosen_lsm_order) {
+ if (chosen_major_lsm) {
+ pr_info("security= is ignored because it is superseded by lsm=\n");
+ chosen_major_lsm = NULL;
+ }
+ ordered_lsm_parse(chosen_lsm_order, "cmdline");
+ } else
+ ordered_lsm_parse(builtin_lsm_order, "builtin");
+
+ for (lsm = ordered_lsms; *lsm; lsm++)
+ prepare_lsm(*lsm);
+
+ init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
+ init_debug("file blob size = %d\n", blob_sizes.lbs_file);
+ init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
+ init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
+ init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
+ init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
+ init_debug("task blob size = %d\n", blob_sizes.lbs_task);
+
+ /*
+ * Create any kmem_caches needed for blobs
+ */
+ if (blob_sizes.lbs_file)
+ lsm_file_cache = kmem_cache_create("lsm_file_cache",
+ blob_sizes.lbs_file, 0,
+ SLAB_PANIC, NULL);
+ if (blob_sizes.lbs_inode)
+ lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
+ blob_sizes.lbs_inode, 0,
+ SLAB_PANIC, NULL);
+
+ lsm_early_cred((struct cred *) current->cred);
+ lsm_early_task(current);
+ for (lsm = ordered_lsms; *lsm; lsm++)
+ initialize_lsm(*lsm);
+
+ kfree(ordered_lsms);
+}
+
+int __init early_security_init(void)
+{
+ struct lsm_info *lsm;
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ INIT_HLIST_HEAD(&security_hook_heads.NAME);
+#include "linux/lsm_hook_defs.h"
+#undef LSM_HOOK
+
+ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
+ if (!lsm->enabled)
+ lsm->enabled = &lsm_enabled_true;
+ prepare_lsm(lsm);
+ initialize_lsm(lsm);
+ }
+
+ return 0;
+}
+
+/**
+ * security_init - initializes the security framework
+ *
+ * This should be called early in the kernel initialization sequence.
+ */
+int __init security_init(void)
+{
+ struct lsm_info *lsm;
+
+ pr_info("Security Framework initializing\n");
+
+ /*
+ * Append the names of the early LSM modules now that kmalloc() is
+ * available
+ */
+ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
+ if (lsm->enabled)
+ lsm_append(lsm->name, &lsm_names);
+ }
+
+ /* Load LSMs in specified order. */
+ ordered_lsm_init();
+
+ return 0;
+}
+
+/* Save user chosen LSM */
+static int __init choose_major_lsm(char *str)
+{
+ chosen_major_lsm = str;
+ return 1;
+}
+__setup("security=", choose_major_lsm);
+
+/* Explicitly choose LSM initialization order. */
+static int __init choose_lsm_order(char *str)
+{
+ chosen_lsm_order = str;
+ return 1;
+}
+__setup("lsm=", choose_lsm_order);
+
+/* Enable LSM order debugging. */
+static int __init enable_debug(char *str)
+{
+ debug = true;
+ return 1;
+}
+__setup("lsm.debug", enable_debug);
+
+static bool match_last_lsm(const char *list, const char *lsm)
+{
+ const char *last;
+
+ if (WARN_ON(!list || !lsm))
+ return false;
+ last = strrchr(list, ',');
+ if (last)
+ /* Pass the comma, strcmp() will check for '\0' */
+ last++;
+ else
+ last = list;
+ return !strcmp(last, lsm);
+}
+
+static int lsm_append(const char *new, char **result)
+{
+ char *cp;
+
+ if (*result == NULL) {
+ *result = kstrdup(new, GFP_KERNEL);
+ if (*result == NULL)
+ return -ENOMEM;
+ } else {
+ /* Check if it is the last registered name */
+ if (match_last_lsm(*result, new))
+ return 0;
+ cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
+ if (cp == NULL)
+ return -ENOMEM;
+ kfree(*result);
+ *result = cp;
+ }
+ return 0;
+}
+
+/**
+ * security_add_hooks - Add a modules hooks to the hook lists.
+ * @hooks: the hooks to add
+ * @count: the number of hooks to add
+ * @lsm: the name of the security module
+ *
+ * Each LSM has to register its hooks with the infrastructure.
+ */
+void __init security_add_hooks(struct security_hook_list *hooks, int count,
+ const char *lsm)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ hooks[i].lsm = lsm;
+ hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
+ }
+
+ /*
+ * Don't try to append during early_security_init(), we'll come back
+ * and fix this up afterwards.
+ */
+ if (slab_is_available()) {
+ if (lsm_append(lsm, &lsm_names) < 0)
+ panic("%s - Cannot get early memory.\n", __func__);
+ }
+}
+
+int call_blocking_lsm_notifier(enum lsm_event event, void *data)
+{
+ return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
+ event, data);
+}
+EXPORT_SYMBOL(call_blocking_lsm_notifier);
+
+int register_blocking_lsm_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
+ nb);
+}
+EXPORT_SYMBOL(register_blocking_lsm_notifier);
+
+int unregister_blocking_lsm_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
+ nb);
+}
+EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
+
+/**
+ * lsm_cred_alloc - allocate a composite cred blob
+ * @cred: the cred that needs a blob
+ * @gfp: allocation type
+ *
+ * Allocate the cred blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
+{
+ if (blob_sizes.lbs_cred == 0) {
+ cred->security = NULL;
+ return 0;
+ }
+
+ cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
+ if (cred->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_early_cred - during initialization allocate a composite cred blob
+ * @cred: the cred that needs a blob
+ *
+ * Allocate the cred blob for all the modules
+ */
+static void __init lsm_early_cred(struct cred *cred)
+{
+ int rc = lsm_cred_alloc(cred, GFP_KERNEL);
+
+ if (rc)
+ panic("%s: Early cred alloc failed.\n", __func__);
+}
+
+/**
+ * lsm_file_alloc - allocate a composite file blob
+ * @file: the file that needs a blob
+ *
+ * Allocate the file blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_file_alloc(struct file *file)
+{
+ if (!lsm_file_cache) {
+ file->f_security = NULL;
+ return 0;
+ }
+
+ file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
+ if (file->f_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_inode_alloc - allocate a composite inode blob
+ * @inode: the inode that needs a blob
+ *
+ * Allocate the inode blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+int lsm_inode_alloc(struct inode *inode)
+{
+ if (!lsm_inode_cache) {
+ inode->i_security = NULL;
+ return 0;
+ }
+
+ inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
+ if (inode->i_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_task_alloc - allocate a composite task blob
+ * @task: the task that needs a blob
+ *
+ * Allocate the task blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_task_alloc(struct task_struct *task)
+{
+ if (blob_sizes.lbs_task == 0) {
+ task->security = NULL;
+ return 0;
+ }
+
+ task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
+ if (task->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_ipc_alloc - allocate a composite ipc blob
+ * @kip: the ipc that needs a blob
+ *
+ * Allocate the ipc blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
+{
+ if (blob_sizes.lbs_ipc == 0) {
+ kip->security = NULL;
+ return 0;
+ }
+
+ kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
+ if (kip->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_msg_msg_alloc - allocate a composite msg_msg blob
+ * @mp: the msg_msg that needs a blob
+ *
+ * Allocate the ipc blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_msg_msg_alloc(struct msg_msg *mp)
+{
+ if (blob_sizes.lbs_msg_msg == 0) {
+ mp->security = NULL;
+ return 0;
+ }
+
+ mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
+ if (mp->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_early_task - during initialization allocate a composite task blob
+ * @task: the task that needs a blob
+ *
+ * Allocate the task blob for all the modules
+ */
+static void __init lsm_early_task(struct task_struct *task)
+{
+ int rc = lsm_task_alloc(task);
+
+ if (rc)
+ panic("%s: Early task alloc failed.\n", __func__);
+}
+
+/**
+ * lsm_superblock_alloc - allocate a composite superblock blob
+ * @sb: the superblock that needs a blob
+ *
+ * Allocate the superblock blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_superblock_alloc(struct super_block *sb)
+{
+ if (blob_sizes.lbs_superblock == 0) {
+ sb->s_security = NULL;
+ return 0;
+ }
+
+ sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL);
+ if (sb->s_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
+ * can be accessed with:
+ *
+ * LSM_RET_DEFAULT(<hook_name>)
+ *
+ * The macros below define static constants for the default value of each
+ * LSM hook.
+ */
+#define LSM_RET_DEFAULT(NAME) (NAME##_default)
+#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME)
+#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \
+ static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT);
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME)
+
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+/*
+ * Hook list operation macros.
+ *
+ * call_void_hook:
+ * This is a hook that does not return a value.
+ *
+ * call_int_hook:
+ * This is a hook that returns a value.
+ */
+
+#define call_void_hook(FUNC, ...) \
+ do { \
+ struct security_hook_list *P; \
+ \
+ hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
+ P->hook.FUNC(__VA_ARGS__); \
+ } while (0)
+
+#define call_int_hook(FUNC, IRC, ...) ({ \
+ int RC = IRC; \
+ do { \
+ struct security_hook_list *P; \
+ \
+ hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
+ RC = P->hook.FUNC(__VA_ARGS__); \
+ if (RC != 0) \
+ break; \
+ } \
+ } while (0); \
+ RC; \
+})
+
+/* Security operations */
+
+int security_binder_set_context_mgr(const struct cred *mgr)
+{
+ return call_int_hook(binder_set_context_mgr, 0, mgr);
+}
+
+int security_binder_transaction(const struct cred *from,
+ const struct cred *to)
+{
+ return call_int_hook(binder_transaction, 0, from, to);
+}
+
+int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
+{
+ return call_int_hook(binder_transfer_binder, 0, from, to);
+}
+
+int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to, struct file *file)
+{
+ return call_int_hook(binder_transfer_file, 0, from, to, file);
+}
+
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
+{
+ return call_int_hook(ptrace_access_check, 0, child, mode);
+}
+
+int security_ptrace_traceme(struct task_struct *parent)
+{
+ return call_int_hook(ptrace_traceme, 0, parent);
+}
+
+int security_capget(struct task_struct *target,
+ kernel_cap_t *effective,
+ kernel_cap_t *inheritable,
+ kernel_cap_t *permitted)
+{
+ return call_int_hook(capget, 0, target,
+ effective, inheritable, permitted);
+}
+
+int security_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+{
+ return call_int_hook(capset, 0, new, old,
+ effective, inheritable, permitted);
+}
+
+int security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
+{
+ return call_int_hook(capable, 0, cred, ns, cap, opts);
+}
+
+int security_quotactl(int cmds, int type, int id, struct super_block *sb)
+{
+ return call_int_hook(quotactl, 0, cmds, type, id, sb);
+}
+
+int security_quota_on(struct dentry *dentry)
+{
+ return call_int_hook(quota_on, 0, dentry);
+}
+
+int security_syslog(int type)
+{
+ return call_int_hook(syslog, 0, type);
+}
+
+int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
+{
+ return call_int_hook(settime, 0, ts, tz);
+}
+
+int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
+{
+ struct security_hook_list *hp;
+ int cap_sys_admin = 1;
+ int rc;
+
+ /*
+ * The module will respond with a positive value if
+ * it thinks the __vm_enough_memory() call should be
+ * made with the cap_sys_admin set. If all of the modules
+ * agree that it should be set it will. If any module
+ * thinks it should not be set it won't.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
+ rc = hp->hook.vm_enough_memory(mm, pages);
+ if (rc <= 0) {
+ cap_sys_admin = 0;
+ break;
+ }
+ }
+ return __vm_enough_memory(mm, pages, cap_sys_admin);
+}
+
+int security_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ return call_int_hook(bprm_creds_for_exec, 0, bprm);
+}
+
+int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
+{
+ return call_int_hook(bprm_creds_from_file, 0, bprm, file);
+}
+
+int security_bprm_check(struct linux_binprm *bprm)
+{
+ int ret;
+
+ ret = call_int_hook(bprm_check_security, 0, bprm);
+ if (ret)
+ return ret;
+ return ima_bprm_check(bprm);
+}
+
+void security_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ call_void_hook(bprm_committing_creds, bprm);
+}
+
+void security_bprm_committed_creds(struct linux_binprm *bprm)
+{
+ call_void_hook(bprm_committed_creds, bprm);
+}
+
+/**
+ * security_fs_context_submount() - Initialise fc->security
+ * @fc: new filesystem context
+ * @reference: dentry reference for submount/remount
+ *
+ * Fill out the ->security field for a new fs_context.
+ *
+ * Return: Returns 0 on success or negative error code on failure.
+ */
+int security_fs_context_submount(struct fs_context *fc, struct super_block *reference)
+{
+ return call_int_hook(fs_context_submount, 0, fc, reference);
+}
+
+int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+ return call_int_hook(fs_context_dup, 0, fc, src_fc);
+}
+
+int security_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct security_hook_list *hp;
+ int trc;
+ int rc = -ENOPARAM;
+
+ hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param,
+ list) {
+ trc = hp->hook.fs_context_parse_param(fc, param);
+ if (trc == 0)
+ rc = 0;
+ else if (trc != -ENOPARAM)
+ return trc;
+ }
+ return rc;
+}
+
+int security_sb_alloc(struct super_block *sb)
+{
+ int rc = lsm_superblock_alloc(sb);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(sb_alloc_security, 0, sb);
+ if (unlikely(rc))
+ security_sb_free(sb);
+ return rc;
+}
+
+void security_sb_delete(struct super_block *sb)
+{
+ call_void_hook(sb_delete, sb);
+}
+
+void security_sb_free(struct super_block *sb)
+{
+ call_void_hook(sb_free_security, sb);
+ kfree(sb->s_security);
+ sb->s_security = NULL;
+}
+
+void security_free_mnt_opts(void **mnt_opts)
+{
+ if (!*mnt_opts)
+ return;
+ call_void_hook(sb_free_mnt_opts, *mnt_opts);
+ *mnt_opts = NULL;
+}
+EXPORT_SYMBOL(security_free_mnt_opts);
+
+int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
+{
+ return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
+}
+EXPORT_SYMBOL(security_sb_eat_lsm_opts);
+
+int security_sb_mnt_opts_compat(struct super_block *sb,
+ void *mnt_opts)
+{
+ return call_int_hook(sb_mnt_opts_compat, 0, sb, mnt_opts);
+}
+EXPORT_SYMBOL(security_sb_mnt_opts_compat);
+
+int security_sb_remount(struct super_block *sb,
+ void *mnt_opts)
+{
+ return call_int_hook(sb_remount, 0, sb, mnt_opts);
+}
+EXPORT_SYMBOL(security_sb_remount);
+
+int security_sb_kern_mount(struct super_block *sb)
+{
+ return call_int_hook(sb_kern_mount, 0, sb);
+}
+
+int security_sb_show_options(struct seq_file *m, struct super_block *sb)
+{
+ return call_int_hook(sb_show_options, 0, m, sb);
+}
+
+int security_sb_statfs(struct dentry *dentry)
+{
+ return call_int_hook(sb_statfs, 0, dentry);
+}
+
+int security_sb_mount(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+{
+ return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
+}
+
+int security_sb_umount(struct vfsmount *mnt, int flags)
+{
+ return call_int_hook(sb_umount, 0, mnt, flags);
+}
+
+int security_sb_pivotroot(const struct path *old_path, const struct path *new_path)
+{
+ return call_int_hook(sb_pivotroot, 0, old_path, new_path);
+}
+
+int security_sb_set_mnt_opts(struct super_block *sb,
+ void *mnt_opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+{
+ return call_int_hook(sb_set_mnt_opts,
+ mnt_opts ? -EOPNOTSUPP : 0, sb,
+ mnt_opts, kern_flags, set_kern_flags);
+}
+EXPORT_SYMBOL(security_sb_set_mnt_opts);
+
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
+ struct super_block *newsb,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+{
+ return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
+ kern_flags, set_kern_flags);
+}
+EXPORT_SYMBOL(security_sb_clone_mnt_opts);
+
+int security_move_mount(const struct path *from_path, const struct path *to_path)
+{
+ return call_int_hook(move_mount, 0, from_path, to_path);
+}
+
+int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ return call_int_hook(path_notify, 0, path, mask, obj_type);
+}
+
+int security_inode_alloc(struct inode *inode)
+{
+ int rc = lsm_inode_alloc(inode);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(inode_alloc_security, 0, inode);
+ if (unlikely(rc))
+ security_inode_free(inode);
+ return rc;
+}
+
+static void inode_free_by_rcu(struct rcu_head *head)
+{
+ /*
+ * The rcu head is at the start of the inode blob
+ */
+ kmem_cache_free(lsm_inode_cache, head);
+}
+
+void security_inode_free(struct inode *inode)
+{
+ integrity_inode_free(inode);
+ call_void_hook(inode_free_security, inode);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to security_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ if (inode->i_security)
+ call_rcu((struct rcu_head *)inode->i_security,
+ inode_free_by_rcu);
+}
+
+int security_dentry_init_security(struct dentry *dentry, int mode,
+ const struct qstr *name,
+ const char **xattr_name, void **ctx,
+ u32 *ctxlen)
+{
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * Only one module will provide a security context.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.dentry_init_security, list) {
+ rc = hp->hook.dentry_init_security(dentry, mode, name,
+ xattr_name, ctx, ctxlen);
+ if (rc != LSM_RET_DEFAULT(dentry_init_security))
+ return rc;
+ }
+ return LSM_RET_DEFAULT(dentry_init_security);
+}
+EXPORT_SYMBOL(security_dentry_init_security);
+
+int security_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old, struct cred *new)
+{
+ return call_int_hook(dentry_create_files_as, 0, dentry, mode,
+ name, old, new);
+}
+EXPORT_SYMBOL(security_dentry_create_files_as);
+
+int security_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ const initxattrs initxattrs, void *fs_data)
+{
+ struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
+ struct xattr *lsm_xattr, *evm_xattr, *xattr;
+ int ret;
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ if (!initxattrs)
+ return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
+ dir, qstr, NULL, NULL, NULL);
+ memset(new_xattrs, 0, sizeof(new_xattrs));
+ lsm_xattr = new_xattrs;
+ ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
+ &lsm_xattr->name,
+ &lsm_xattr->value,
+ &lsm_xattr->value_len);
+ if (ret)
+ goto out;
+
+ evm_xattr = lsm_xattr + 1;
+ ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
+ if (ret)
+ goto out;
+ ret = initxattrs(inode, new_xattrs, fs_data);
+out:
+ for (xattr = new_xattrs; xattr->value != NULL; xattr++)
+ kfree(xattr->value);
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+}
+EXPORT_SYMBOL(security_inode_init_security);
+
+int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode)
+{
+ return call_int_hook(inode_init_security_anon, 0, inode, name,
+ context_inode);
+}
+
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return -EOPNOTSUPP;
+ return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
+ qstr, name, value, len);
+}
+EXPORT_SYMBOL(security_old_inode_init_security);
+
+#ifdef CONFIG_SECURITY_PATH
+int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
+ unsigned int dev)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+ return 0;
+ return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
+}
+EXPORT_SYMBOL(security_path_mknod);
+
+int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+ return 0;
+ return call_int_hook(path_mkdir, 0, dir, dentry, mode);
+}
+EXPORT_SYMBOL(security_path_mkdir);
+
+int security_path_rmdir(const struct path *dir, struct dentry *dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+ return 0;
+ return call_int_hook(path_rmdir, 0, dir, dentry);
+}
+
+int security_path_unlink(const struct path *dir, struct dentry *dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+ return 0;
+ return call_int_hook(path_unlink, 0, dir, dentry);
+}
+EXPORT_SYMBOL(security_path_unlink);
+
+int security_path_symlink(const struct path *dir, struct dentry *dentry,
+ const char *old_name)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
+ return 0;
+ return call_int_hook(path_symlink, 0, dir, dentry, old_name);
+}
+
+int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
+ return 0;
+ return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
+}
+
+int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
+ (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
+ return 0;
+
+ return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+EXPORT_SYMBOL(security_path_rename);
+
+int security_path_truncate(const struct path *path)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+ return 0;
+ return call_int_hook(path_truncate, 0, path);
+}
+
+int security_path_chmod(const struct path *path, umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+ return 0;
+ return call_int_hook(path_chmod, 0, path, mode);
+}
+
+int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+ return 0;
+ return call_int_hook(path_chown, 0, path, uid, gid);
+}
+
+int security_path_chroot(const struct path *path)
+{
+ return call_int_hook(path_chroot, 0, path);
+}
+#endif
+
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_create, 0, dir, dentry, mode);
+}
+EXPORT_SYMBOL_GPL(security_inode_create);
+
+int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
+ return 0;
+ return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
+}
+
+int security_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ return call_int_hook(inode_unlink, 0, dir, dentry);
+}
+
+int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+ const char *old_name)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
+}
+
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
+}
+EXPORT_SYMBOL_GPL(security_inode_mkdir);
+
+int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ return call_int_hook(inode_rmdir, 0, dir, dentry);
+}
+
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
+}
+
+int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
+ (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
+ return 0;
+
+ if (flags & RENAME_EXCHANGE) {
+ int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
+ old_dir, old_dentry);
+ if (err)
+ return err;
+ }
+
+ return call_int_hook(inode_rename, 0, old_dir, old_dentry,
+ new_dir, new_dentry);
+}
+
+int security_inode_readlink(struct dentry *dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ return call_int_hook(inode_readlink, 0, dentry);
+}
+
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ bool rcu)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+ return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
+}
+
+int security_inode_permission(struct inode *inode, int mask)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+ return call_int_hook(inode_permission, 0, inode, mask);
+}
+
+int security_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
+{
+ int ret;
+
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ ret = call_int_hook(inode_setattr, 0, dentry, attr);
+ if (ret)
+ return ret;
+ return evm_inode_setattr(mnt_userns, dentry, attr);
+}
+EXPORT_SYMBOL_GPL(security_inode_setattr);
+
+int security_inode_getattr(const struct path *path)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
+ return 0;
+ return call_int_hook(inode_getattr, 0, path);
+}
+
+int security_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ int ret;
+
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ /*
+ * SELinux and Smack integrate the cap call,
+ * so assume that all LSMs supplying this call do so.
+ */
+ ret = call_int_hook(inode_setxattr, 1, mnt_userns, dentry, name, value,
+ size, flags);
+
+ if (ret == 1)
+ ret = cap_inode_setxattr(dentry, name, value, size, flags);
+ if (ret)
+ return ret;
+ ret = ima_inode_setxattr(dentry, name, value, size);
+ if (ret)
+ return ret;
+ return evm_inode_setxattr(mnt_userns, dentry, name, value, size);
+}
+
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return;
+ call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
+ evm_inode_post_setxattr(dentry, name, value, size);
+}
+
+int security_inode_getxattr(struct dentry *dentry, const char *name)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ return call_int_hook(inode_getxattr, 0, dentry, name);
+}
+
+int security_inode_listxattr(struct dentry *dentry)
+{
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ return call_int_hook(inode_listxattr, 0, dentry);
+}
+
+int security_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name)
+{
+ int ret;
+
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+ /*
+ * SELinux and Smack integrate the cap call,
+ * so assume that all LSMs supplying this call do so.
+ */
+ ret = call_int_hook(inode_removexattr, 1, mnt_userns, dentry, name);
+ if (ret == 1)
+ ret = cap_inode_removexattr(mnt_userns, dentry, name);
+ if (ret)
+ return ret;
+ ret = ima_inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
+ return evm_inode_removexattr(mnt_userns, dentry, name);
+}
+
+int security_inode_need_killpriv(struct dentry *dentry)
+{
+ return call_int_hook(inode_need_killpriv, 0, dentry);
+}
+
+int security_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
+{
+ return call_int_hook(inode_killpriv, 0, mnt_userns, dentry);
+}
+
+int security_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc)
+{
+ struct security_hook_list *hp;
+ int rc;
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return LSM_RET_DEFAULT(inode_getsecurity);
+ /*
+ * Only one module will provide an attribute with a given name.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
+ rc = hp->hook.inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
+ if (rc != LSM_RET_DEFAULT(inode_getsecurity))
+ return rc;
+ }
+ return LSM_RET_DEFAULT(inode_getsecurity);
+}
+
+int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
+{
+ struct security_hook_list *hp;
+ int rc;
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return LSM_RET_DEFAULT(inode_setsecurity);
+ /*
+ * Only one module will provide an attribute with a given name.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
+ rc = hp->hook.inode_setsecurity(inode, name, value, size,
+ flags);
+ if (rc != LSM_RET_DEFAULT(inode_setsecurity))
+ return rc;
+ }
+ return LSM_RET_DEFAULT(inode_setsecurity);
+}
+
+int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+ return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
+}
+EXPORT_SYMBOL(security_inode_listsecurity);
+
+void security_inode_getsecid(struct inode *inode, u32 *secid)
+{
+ call_void_hook(inode_getsecid, inode, secid);
+}
+
+int security_inode_copy_up(struct dentry *src, struct cred **new)
+{
+ return call_int_hook(inode_copy_up, 0, src, new);
+}
+EXPORT_SYMBOL(security_inode_copy_up);
+
+int security_inode_copy_up_xattr(const char *name)
+{
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * The implementation can return 0 (accept the xattr), 1 (discard the
+ * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
+ * any other error code incase of an error.
+ */
+ hlist_for_each_entry(hp,
+ &security_hook_heads.inode_copy_up_xattr, list) {
+ rc = hp->hook.inode_copy_up_xattr(name);
+ if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
+ return rc;
+ }
+
+ return LSM_RET_DEFAULT(inode_copy_up_xattr);
+}
+EXPORT_SYMBOL(security_inode_copy_up_xattr);
+
+int security_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+{
+ return call_int_hook(kernfs_init_security, 0, kn_dir, kn);
+}
+
+int security_file_permission(struct file *file, int mask)
+{
+ int ret;
+
+ ret = call_int_hook(file_permission, 0, file, mask);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, mask);
+}
+
+int security_file_alloc(struct file *file)
+{
+ int rc = lsm_file_alloc(file);
+
+ if (rc)
+ return rc;
+ rc = call_int_hook(file_alloc_security, 0, file);
+ if (unlikely(rc))
+ security_file_free(file);
+ return rc;
+}
+
+void security_file_free(struct file *file)
+{
+ void *blob;
+
+ call_void_hook(file_free_security, file);
+
+ blob = file->f_security;
+ if (blob) {
+ file->f_security = NULL;
+ kmem_cache_free(lsm_file_cache, blob);
+ }
+}
+
+int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return call_int_hook(file_ioctl, 0, file, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(security_file_ioctl);
+
+/**
+ * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
+ * @file: associated file
+ * @cmd: ioctl cmd
+ * @arg: ioctl arguments
+ *
+ * Compat version of security_file_ioctl() that correctly handles 32-bit
+ * processes running on 64-bit kernels.
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return call_int_hook(file_ioctl_compat, 0, file, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(security_file_ioctl_compat);
+
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+{
+ /*
+ * Does we have PROT_READ and does the application expect
+ * it to imply PROT_EXEC? If not, nothing to talk about...
+ */
+ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+ return prot;
+ if (!(current->personality & READ_IMPLIES_EXEC))
+ return prot;
+ /*
+ * if that's an anonymous mapping, let it.
+ */
+ if (!file)
+ return prot | PROT_EXEC;
+ /*
+ * ditto if it's not on noexec mount, except that on !MMU we need
+ * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
+ */
+ if (!path_noexec(&file->f_path)) {
+#ifndef CONFIG_MMU
+ if (file->f_op->mmap_capabilities) {
+ unsigned caps = file->f_op->mmap_capabilities(file);
+ if (!(caps & NOMMU_MAP_EXEC))
+ return prot;
+ }
+#endif
+ return prot | PROT_EXEC;
+ }
+ /* anything on noexec mount won't get PROT_EXEC */
+ return prot;
+}
+
+int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags)
+{
+ unsigned long prot_adj = mmap_prot(file, prot);
+ int ret;
+
+ ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags);
+ if (ret)
+ return ret;
+ return ima_file_mmap(file, prot, prot_adj, flags);
+}
+
+int security_mmap_addr(unsigned long addr)
+{
+ return call_int_hook(mmap_addr, 0, addr);
+}
+
+int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
+ unsigned long prot)
+{
+ int ret;
+
+ ret = call_int_hook(file_mprotect, 0, vma, reqprot, prot);
+ if (ret)
+ return ret;
+ return ima_file_mprotect(vma, prot);
+}
+
+int security_file_lock(struct file *file, unsigned int cmd)
+{
+ return call_int_hook(file_lock, 0, file, cmd);
+}
+
+int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return call_int_hook(file_fcntl, 0, file, cmd, arg);
+}
+
+void security_file_set_fowner(struct file *file)
+{
+ call_void_hook(file_set_fowner, file);
+}
+
+int security_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown, int sig)
+{
+ return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
+}
+
+int security_file_receive(struct file *file)
+{
+ return call_int_hook(file_receive, 0, file);
+}
+
+int security_file_open(struct file *file)
+{
+ int ret;
+
+ ret = call_int_hook(file_open, 0, file);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, MAY_OPEN);
+}
+
+int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
+{
+ int rc = lsm_task_alloc(task);
+
+ if (rc)
+ return rc;
+ rc = call_int_hook(task_alloc, 0, task, clone_flags);
+ if (unlikely(rc))
+ security_task_free(task);
+ return rc;
+}
+
+void security_task_free(struct task_struct *task)
+{
+ call_void_hook(task_free, task);
+
+ kfree(task->security);
+ task->security = NULL;
+}
+
+int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ int rc = lsm_cred_alloc(cred, gfp);
+
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
+ if (unlikely(rc))
+ security_cred_free(cred);
+ return rc;
+}
+
+void security_cred_free(struct cred *cred)
+{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
+ call_void_hook(cred_free, cred);
+
+ kfree(cred->security);
+ cred->security = NULL;
+}
+
+int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
+{
+ int rc = lsm_cred_alloc(new, gfp);
+
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(cred_prepare, 0, new, old, gfp);
+ if (unlikely(rc))
+ security_cred_free(new);
+ return rc;
+}
+
+void security_transfer_creds(struct cred *new, const struct cred *old)
+{
+ call_void_hook(cred_transfer, new, old);
+}
+
+void security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = 0;
+ call_void_hook(cred_getsecid, c, secid);
+}
+EXPORT_SYMBOL(security_cred_getsecid);
+
+int security_kernel_act_as(struct cred *new, u32 secid)
+{
+ return call_int_hook(kernel_act_as, 0, new, secid);
+}
+
+int security_kernel_create_files_as(struct cred *new, struct inode *inode)
+{
+ return call_int_hook(kernel_create_files_as, 0, new, inode);
+}
+
+int security_kernel_module_request(char *kmod_name)
+{
+ int ret;
+
+ ret = call_int_hook(kernel_module_request, 0, kmod_name);
+ if (ret)
+ return ret;
+ return integrity_kernel_module_request(kmod_name);
+}
+
+int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents)
+{
+ int ret;
+
+ ret = call_int_hook(kernel_read_file, 0, file, id, contents);
+ if (ret)
+ return ret;
+ return ima_read_file(file, id, contents);
+}
+EXPORT_SYMBOL_GPL(security_kernel_read_file);
+
+int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
+ enum kernel_read_file_id id)
+{
+ int ret;
+
+ ret = call_int_hook(kernel_post_read_file, 0, file, buf, size, id);
+ if (ret)
+ return ret;
+ return ima_post_read_file(file, buf, size, id);
+}
+EXPORT_SYMBOL_GPL(security_kernel_post_read_file);
+
+int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
+{
+ int ret;
+
+ ret = call_int_hook(kernel_load_data, 0, id, contents);
+ if (ret)
+ return ret;
+ return ima_load_data(id, contents);
+}
+EXPORT_SYMBOL_GPL(security_kernel_load_data);
+
+int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description)
+{
+ int ret;
+
+ ret = call_int_hook(kernel_post_load_data, 0, buf, size, id,
+ description);
+ if (ret)
+ return ret;
+ return ima_post_load_data(buf, size, id, description);
+}
+EXPORT_SYMBOL_GPL(security_kernel_post_load_data);
+
+int security_task_fix_setuid(struct cred *new, const struct cred *old,
+ int flags)
+{
+ return call_int_hook(task_fix_setuid, 0, new, old, flags);
+}
+
+int security_task_fix_setgid(struct cred *new, const struct cred *old,
+ int flags)
+{
+ return call_int_hook(task_fix_setgid, 0, new, old, flags);
+}
+
+int security_task_fix_setgroups(struct cred *new, const struct cred *old)
+{
+ return call_int_hook(task_fix_setgroups, 0, new, old);
+}
+
+int security_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+ return call_int_hook(task_setpgid, 0, p, pgid);
+}
+
+int security_task_getpgid(struct task_struct *p)
+{
+ return call_int_hook(task_getpgid, 0, p);
+}
+
+int security_task_getsid(struct task_struct *p)
+{
+ return call_int_hook(task_getsid, 0, p);
+}
+
+void security_current_getsecid_subj(u32 *secid)
+{
+ *secid = 0;
+ call_void_hook(current_getsecid_subj, secid);
+}
+EXPORT_SYMBOL(security_current_getsecid_subj);
+
+void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
+{
+ *secid = 0;
+ call_void_hook(task_getsecid_obj, p, secid);
+}
+EXPORT_SYMBOL(security_task_getsecid_obj);
+
+int security_task_setnice(struct task_struct *p, int nice)
+{
+ return call_int_hook(task_setnice, 0, p, nice);
+}
+
+int security_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return call_int_hook(task_setioprio, 0, p, ioprio);
+}
+
+int security_task_getioprio(struct task_struct *p)
+{
+ return call_int_hook(task_getioprio, 0, p);
+}
+
+int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags)
+{
+ return call_int_hook(task_prlimit, 0, cred, tcred, flags);
+}
+
+int security_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
+{
+ return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
+}
+
+int security_task_setscheduler(struct task_struct *p)
+{
+ return call_int_hook(task_setscheduler, 0, p);
+}
+
+int security_task_getscheduler(struct task_struct *p)
+{
+ return call_int_hook(task_getscheduler, 0, p);
+}
+
+int security_task_movememory(struct task_struct *p)
+{
+ return call_int_hook(task_movememory, 0, p);
+}
+
+int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+{
+ return call_int_hook(task_kill, 0, p, info, sig, cred);
+}
+
+int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+{
+ int thisrc;
+ int rc = LSM_RET_DEFAULT(task_prctl);
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
+ thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
+ if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
+ rc = thisrc;
+ if (thisrc != 0)
+ break;
+ }
+ }
+ return rc;
+}
+
+void security_task_to_inode(struct task_struct *p, struct inode *inode)
+{
+ call_void_hook(task_to_inode, p, inode);
+}
+
+int security_create_user_ns(const struct cred *cred)
+{
+ return call_int_hook(userns_create, 0, cred);
+}
+
+int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
+{
+ return call_int_hook(ipc_permission, 0, ipcp, flag);
+}
+
+void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+{
+ *secid = 0;
+ call_void_hook(ipc_getsecid, ipcp, secid);
+}
+
+int security_msg_msg_alloc(struct msg_msg *msg)
+{
+ int rc = lsm_msg_msg_alloc(msg);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(msg_msg_alloc_security, 0, msg);
+ if (unlikely(rc))
+ security_msg_msg_free(msg);
+ return rc;
+}
+
+void security_msg_msg_free(struct msg_msg *msg)
+{
+ call_void_hook(msg_msg_free_security, msg);
+ kfree(msg->security);
+ msg->security = NULL;
+}
+
+int security_msg_queue_alloc(struct kern_ipc_perm *msq)
+{
+ int rc = lsm_ipc_alloc(msq);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(msg_queue_alloc_security, 0, msq);
+ if (unlikely(rc))
+ security_msg_queue_free(msq);
+ return rc;
+}
+
+void security_msg_queue_free(struct kern_ipc_perm *msq)
+{
+ call_void_hook(msg_queue_free_security, msq);
+ kfree(msq->security);
+ msq->security = NULL;
+}
+
+int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
+{
+ return call_int_hook(msg_queue_associate, 0, msq, msqflg);
+}
+
+int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
+{
+ return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
+}
+
+int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
+ struct msg_msg *msg, int msqflg)
+{
+ return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
+}
+
+int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
+ struct task_struct *target, long type, int mode)
+{
+ return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
+}
+
+int security_shm_alloc(struct kern_ipc_perm *shp)
+{
+ int rc = lsm_ipc_alloc(shp);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(shm_alloc_security, 0, shp);
+ if (unlikely(rc))
+ security_shm_free(shp);
+ return rc;
+}
+
+void security_shm_free(struct kern_ipc_perm *shp)
+{
+ call_void_hook(shm_free_security, shp);
+ kfree(shp->security);
+ shp->security = NULL;
+}
+
+int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
+{
+ return call_int_hook(shm_associate, 0, shp, shmflg);
+}
+
+int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
+{
+ return call_int_hook(shm_shmctl, 0, shp, cmd);
+}
+
+int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg)
+{
+ return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
+}
+
+int security_sem_alloc(struct kern_ipc_perm *sma)
+{
+ int rc = lsm_ipc_alloc(sma);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(sem_alloc_security, 0, sma);
+ if (unlikely(rc))
+ security_sem_free(sma);
+ return rc;
+}
+
+void security_sem_free(struct kern_ipc_perm *sma)
+{
+ call_void_hook(sem_free_security, sma);
+ kfree(sma->security);
+ sma->security = NULL;
+}
+
+int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
+{
+ return call_int_hook(sem_associate, 0, sma, semflg);
+}
+
+int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
+{
+ return call_int_hook(sem_semctl, 0, sma, cmd);
+}
+
+int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
+ unsigned nsops, int alter)
+{
+ return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
+}
+
+void security_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+ if (unlikely(inode && IS_PRIVATE(inode)))
+ return;
+ call_void_hook(d_instantiate, dentry, inode);
+}
+EXPORT_SYMBOL(security_d_instantiate);
+
+int security_getprocattr(struct task_struct *p, const char *lsm,
+ const char *name, char **value)
+{
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
+ if (lsm != NULL && strcmp(lsm, hp->lsm))
+ continue;
+ return hp->hook.getprocattr(p, name, value);
+ }
+ return LSM_RET_DEFAULT(getprocattr);
+}
+
+int security_setprocattr(const char *lsm, const char *name, void *value,
+ size_t size)
+{
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
+ if (lsm != NULL && strcmp(lsm, hp->lsm))
+ continue;
+ return hp->hook.setprocattr(name, value, size);
+ }
+ return LSM_RET_DEFAULT(setprocattr);
+}
+
+int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return call_int_hook(netlink_send, 0, sk, skb);
+}
+
+int security_ismaclabel(const char *name)
+{
+ return call_int_hook(ismaclabel, 0, name);
+}
+EXPORT_SYMBOL(security_ismaclabel);
+
+int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * Currently, only one LSM can implement secid_to_secctx (i.e this
+ * LSM hook is not "stackable").
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) {
+ rc = hp->hook.secid_to_secctx(secid, secdata, seclen);
+ if (rc != LSM_RET_DEFAULT(secid_to_secctx))
+ return rc;
+ }
+
+ return LSM_RET_DEFAULT(secid_to_secctx);
+}
+EXPORT_SYMBOL(security_secid_to_secctx);
+
+int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+ *secid = 0;
+ return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
+}
+EXPORT_SYMBOL(security_secctx_to_secid);
+
+void security_release_secctx(char *secdata, u32 seclen)
+{
+ call_void_hook(release_secctx, secdata, seclen);
+}
+EXPORT_SYMBOL(security_release_secctx);
+
+void security_inode_invalidate_secctx(struct inode *inode)
+{
+ call_void_hook(inode_invalidate_secctx, inode);
+}
+EXPORT_SYMBOL(security_inode_invalidate_secctx);
+
+int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_notifysecctx);
+
+int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_setsecctx);
+
+int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_getsecctx);
+
+#ifdef CONFIG_WATCH_QUEUE
+int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n)
+{
+ return call_int_hook(post_notification, 0, w_cred, cred, n);
+}
+#endif /* CONFIG_WATCH_QUEUE */
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+int security_watch_key(struct key *key)
+{
+ return call_int_hook(watch_key, 0, key);
+}
+#endif
+
+#ifdef CONFIG_SECURITY_NETWORK
+
+int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
+{
+ return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
+}
+EXPORT_SYMBOL(security_unix_stream_connect);
+
+int security_unix_may_send(struct socket *sock, struct socket *other)
+{
+ return call_int_hook(unix_may_send, 0, sock, other);
+}
+EXPORT_SYMBOL(security_unix_may_send);
+
+int security_socket_create(int family, int type, int protocol, int kern)
+{
+ return call_int_hook(socket_create, 0, family, type, protocol, kern);
+}
+
+int security_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ return call_int_hook(socket_post_create, 0, sock, family, type,
+ protocol, kern);
+}
+
+int security_socket_socketpair(struct socket *socka, struct socket *sockb)
+{
+ return call_int_hook(socket_socketpair, 0, socka, sockb);
+}
+EXPORT_SYMBOL(security_socket_socketpair);
+
+int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+ return call_int_hook(socket_bind, 0, sock, address, addrlen);
+}
+
+int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+ return call_int_hook(socket_connect, 0, sock, address, addrlen);
+}
+
+int security_socket_listen(struct socket *sock, int backlog)
+{
+ return call_int_hook(socket_listen, 0, sock, backlog);
+}
+
+int security_socket_accept(struct socket *sock, struct socket *newsock)
+{
+ return call_int_hook(socket_accept, 0, sock, newsock);
+}
+
+int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
+{
+ return call_int_hook(socket_sendmsg, 0, sock, msg, size);
+}
+
+int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
+ int size, int flags)
+{
+ return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
+}
+
+int security_socket_getsockname(struct socket *sock)
+{
+ return call_int_hook(socket_getsockname, 0, sock);
+}
+
+int security_socket_getpeername(struct socket *sock)
+{
+ return call_int_hook(socket_getpeername, 0, sock);
+}
+
+int security_socket_getsockopt(struct socket *sock, int level, int optname)
+{
+ return call_int_hook(socket_getsockopt, 0, sock, level, optname);
+}
+
+int security_socket_setsockopt(struct socket *sock, int level, int optname)
+{
+ return call_int_hook(socket_setsockopt, 0, sock, level, optname);
+}
+
+int security_socket_shutdown(struct socket *sock, int how)
+{
+ return call_int_hook(socket_shutdown, 0, sock, how);
+}
+
+int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
+}
+EXPORT_SYMBOL(security_sock_rcv_skb);
+
+int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
+ int __user *optlen, unsigned len)
+{
+ return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
+ optval, optlen, len);
+}
+
+int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
+{
+ return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
+ skb, secid);
+}
+EXPORT_SYMBOL(security_socket_getpeersec_dgram);
+
+int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
+{
+ return call_int_hook(sk_alloc_security, 0, sk, family, priority);
+}
+
+void security_sk_free(struct sock *sk)
+{
+ call_void_hook(sk_free_security, sk);
+}
+
+void security_sk_clone(const struct sock *sk, struct sock *newsk)
+{
+ call_void_hook(sk_clone_security, sk, newsk);
+}
+EXPORT_SYMBOL(security_sk_clone);
+
+void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic)
+{
+ call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
+}
+EXPORT_SYMBOL(security_sk_classify_flow);
+
+void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
+{
+ call_void_hook(req_classify_flow, req, flic);
+}
+EXPORT_SYMBOL(security_req_classify_flow);
+
+void security_sock_graft(struct sock *sk, struct socket *parent)
+{
+ call_void_hook(sock_graft, sk, parent);
+}
+EXPORT_SYMBOL(security_sock_graft);
+
+int security_inet_conn_request(const struct sock *sk,
+ struct sk_buff *skb, struct request_sock *req)
+{
+ return call_int_hook(inet_conn_request, 0, sk, skb, req);
+}
+EXPORT_SYMBOL(security_inet_conn_request);
+
+void security_inet_csk_clone(struct sock *newsk,
+ const struct request_sock *req)
+{
+ call_void_hook(inet_csk_clone, newsk, req);
+}
+
+void security_inet_conn_established(struct sock *sk,
+ struct sk_buff *skb)
+{
+ call_void_hook(inet_conn_established, sk, skb);
+}
+EXPORT_SYMBOL(security_inet_conn_established);
+
+int security_secmark_relabel_packet(u32 secid)
+{
+ return call_int_hook(secmark_relabel_packet, 0, secid);
+}
+EXPORT_SYMBOL(security_secmark_relabel_packet);
+
+void security_secmark_refcount_inc(void)
+{
+ call_void_hook(secmark_refcount_inc);
+}
+EXPORT_SYMBOL(security_secmark_refcount_inc);
+
+void security_secmark_refcount_dec(void)
+{
+ call_void_hook(secmark_refcount_dec);
+}
+EXPORT_SYMBOL(security_secmark_refcount_dec);
+
+int security_tun_dev_alloc_security(void **security)
+{
+ return call_int_hook(tun_dev_alloc_security, 0, security);
+}
+EXPORT_SYMBOL(security_tun_dev_alloc_security);
+
+void security_tun_dev_free_security(void *security)
+{
+ call_void_hook(tun_dev_free_security, security);
+}
+EXPORT_SYMBOL(security_tun_dev_free_security);
+
+int security_tun_dev_create(void)
+{
+ return call_int_hook(tun_dev_create, 0);
+}
+EXPORT_SYMBOL(security_tun_dev_create);
+
+int security_tun_dev_attach_queue(void *security)
+{
+ return call_int_hook(tun_dev_attach_queue, 0, security);
+}
+EXPORT_SYMBOL(security_tun_dev_attach_queue);
+
+int security_tun_dev_attach(struct sock *sk, void *security)
+{
+ return call_int_hook(tun_dev_attach, 0, sk, security);
+}
+EXPORT_SYMBOL(security_tun_dev_attach);
+
+int security_tun_dev_open(void *security)
+{
+ return call_int_hook(tun_dev_open, 0, security);
+}
+EXPORT_SYMBOL(security_tun_dev_open);
+
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb)
+{
+ return call_int_hook(sctp_assoc_request, 0, asoc, skb);
+}
+EXPORT_SYMBOL(security_sctp_assoc_request);
+
+int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen)
+{
+ return call_int_hook(sctp_bind_connect, 0, sk, optname,
+ address, addrlen);
+}
+EXPORT_SYMBOL(security_sctp_bind_connect);
+
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
+ struct sock *newsk)
+{
+ call_void_hook(sctp_sk_clone, asoc, sk, newsk);
+}
+EXPORT_SYMBOL(security_sctp_sk_clone);
+
+int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return call_int_hook(sctp_assoc_established, 0, asoc, skb);
+}
+EXPORT_SYMBOL(security_sctp_assoc_established);
+
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+
+int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
+{
+ return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
+}
+EXPORT_SYMBOL(security_ib_pkey_access);
+
+int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
+{
+ return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num);
+}
+EXPORT_SYMBOL(security_ib_endport_manage_subnet);
+
+int security_ib_alloc_security(void **sec)
+{
+ return call_int_hook(ib_alloc_security, 0, sec);
+}
+EXPORT_SYMBOL(security_ib_alloc_security);
+
+void security_ib_free_security(void *sec)
+{
+ call_void_hook(ib_free_security, sec);
+}
+EXPORT_SYMBOL(security_ib_free_security);
+#endif /* CONFIG_SECURITY_INFINIBAND */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
+{
+ return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
+}
+EXPORT_SYMBOL(security_xfrm_policy_alloc);
+
+int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctxp)
+{
+ return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
+}
+
+void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
+{
+ call_void_hook(xfrm_policy_free_security, ctx);
+}
+EXPORT_SYMBOL(security_xfrm_policy_free);
+
+int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
+{
+ return call_int_hook(xfrm_policy_delete_security, 0, ctx);
+}
+
+int security_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+{
+ return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
+}
+EXPORT_SYMBOL(security_xfrm_state_alloc);
+
+int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+{
+ return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
+}
+
+int security_xfrm_state_delete(struct xfrm_state *x)
+{
+ return call_int_hook(xfrm_state_delete_security, 0, x);
+}
+EXPORT_SYMBOL(security_xfrm_state_delete);
+
+void security_xfrm_state_free(struct xfrm_state *x)
+{
+ call_void_hook(xfrm_state_free_security, x);
+}
+
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
+{
+ return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid);
+}
+
+int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic)
+{
+ struct security_hook_list *hp;
+ int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
+
+ /*
+ * Since this function is expected to return 0 or 1, the judgment
+ * becomes difficult if multiple LSMs supply this call. Fortunately,
+ * we can use the first LSM's judgment because currently only SELinux
+ * supplies this call.
+ *
+ * For speed optimization, we explicitly break the loop rather than
+ * using the macro
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
+ list) {
+ rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
+ break;
+ }
+ return rc;
+}
+
+int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
+{
+ return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
+}
+
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
+{
+ int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid,
+ 0);
+
+ BUG_ON(rc);
+}
+EXPORT_SYMBOL(security_skb_classify_flow);
+
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+#ifdef CONFIG_KEYS
+
+int security_key_alloc(struct key *key, const struct cred *cred,
+ unsigned long flags)
+{
+ return call_int_hook(key_alloc, 0, key, cred, flags);
+}
+
+void security_key_free(struct key *key)
+{
+ call_void_hook(key_free, key);
+}
+
+int security_key_permission(key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm)
+{
+ return call_int_hook(key_permission, 0, key_ref, cred, need_perm);
+}
+
+int security_key_getsecurity(struct key *key, char **_buffer)
+{
+ *_buffer = NULL;
+ return call_int_hook(key_getsecurity, 0, key, _buffer);
+}
+
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
+{
+ return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
+}
+
+int security_audit_rule_known(struct audit_krule *krule)
+{
+ return call_int_hook(audit_rule_known, 0, krule);
+}
+
+void security_audit_rule_free(void *lsmrule)
+{
+ call_void_hook(audit_rule_free, lsmrule);
+}
+
+int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
+{
+ return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule);
+}
+#endif /* CONFIG_AUDIT */
+
+#ifdef CONFIG_BPF_SYSCALL
+int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+{
+ return call_int_hook(bpf, 0, cmd, attr, size);
+}
+int security_bpf_map(struct bpf_map *map, fmode_t fmode)
+{
+ return call_int_hook(bpf_map, 0, map, fmode);
+}
+int security_bpf_prog(struct bpf_prog *prog)
+{
+ return call_int_hook(bpf_prog, 0, prog);
+}
+int security_bpf_map_alloc(struct bpf_map *map)
+{
+ return call_int_hook(bpf_map_alloc_security, 0, map);
+}
+int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+{
+ return call_int_hook(bpf_prog_alloc_security, 0, aux);
+}
+void security_bpf_map_free(struct bpf_map *map)
+{
+ call_void_hook(bpf_map_free_security, map);
+}
+void security_bpf_prog_free(struct bpf_prog_aux *aux)
+{
+ call_void_hook(bpf_prog_free_security, aux);
+}
+#endif /* CONFIG_BPF_SYSCALL */
+
+int security_locked_down(enum lockdown_reason what)
+{
+ return call_int_hook(locked_down, 0, what);
+}
+EXPORT_SYMBOL(security_locked_down);
+
+#ifdef CONFIG_PERF_EVENTS
+int security_perf_event_open(struct perf_event_attr *attr, int type)
+{
+ return call_int_hook(perf_event_open, 0, attr, type);
+}
+
+int security_perf_event_alloc(struct perf_event *event)
+{
+ return call_int_hook(perf_event_alloc, 0, event);
+}
+
+void security_perf_event_free(struct perf_event *event)
+{
+ call_void_hook(perf_event_free, event);
+}
+
+int security_perf_event_read(struct perf_event *event)
+{
+ return call_int_hook(perf_event_read, 0, event);
+}
+
+int security_perf_event_write(struct perf_event *event)
+{
+ return call_int_hook(perf_event_write, 0, event);
+}
+#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+int security_uring_override_creds(const struct cred *new)
+{
+ return call_int_hook(uring_override_creds, 0, new);
+}
+
+int security_uring_sqpoll(void)
+{
+ return call_int_hook(uring_sqpoll, 0);
+}
+int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return call_int_hook(uring_cmd, 0, ioucmd);
+}
+#endif /* CONFIG_IO_URING */
diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore
new file mode 100644
index 000000000..168fae13c
--- /dev/null
+++ b/security/selinux/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+av_permissions.h
+flask.h
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
new file mode 100644
index 000000000..9e921fc72
--- /dev/null
+++ b/security/selinux/Kconfig
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_SELINUX
+ bool "NSA SELinux Support"
+ depends on SECURITY_NETWORK && AUDIT && NET && INET
+ select NETWORK_SECMARK
+ default n
+ help
+ This selects NSA Security-Enhanced Linux (SELinux).
+ You will also need a policy configuration and a labeled filesystem.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_BOOTPARAM
+ bool "NSA SELinux boot parameter"
+ depends on SECURITY_SELINUX
+ default n
+ help
+ This option adds a kernel parameter 'selinux', which allows SELinux
+ to be disabled at boot. If this option is selected, SELinux
+ functionality can be disabled with selinux=0 on the kernel
+ command line. The purpose of this option is to allow a single
+ kernel image to be distributed with SELinux built in, but not
+ necessarily enabled.
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_DISABLE
+ bool "NSA SELinux runtime disable"
+ depends on SECURITY_SELINUX
+ select SECURITY_WRITABLE_HOOKS
+ default n
+ help
+ This option enables writing to a selinuxfs node 'disable', which
+ allows SELinux to be disabled at runtime prior to the policy load.
+ SELinux will then remain disabled until the next boot.
+ This option is similar to the selinux=0 boot parameter, but is to
+ support runtime disabling of SELinux, e.g. from /sbin/init, for
+ portability across platforms where boot parameters are difficult
+ to employ.
+
+ NOTE: selecting this option will disable the '__ro_after_init'
+ kernel hardening feature for security hooks. Please consider
+ using the selinux=0 boot parameter instead of enabling this
+ option.
+
+ WARNING: this option is deprecated and will be removed in a future
+ kernel release.
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SELINUX_DEVELOP
+ bool "NSA SELinux Development Support"
+ depends on SECURITY_SELINUX
+ default y
+ help
+ This enables the development support option of NSA SELinux,
+ which is useful for experimenting with SELinux and developing
+ policies. If unsure, say Y. With this option enabled, the
+ kernel will start in permissive mode (log everything, deny nothing)
+ unless you specify enforcing=1 on the kernel command line. You
+ can interactively toggle the kernel between enforcing mode and
+ permissive mode (if permitted by the policy) via
+ /sys/fs/selinux/enforce.
+
+config SECURITY_SELINUX_AVC_STATS
+ bool "NSA SELinux AVC Statistics"
+ depends on SECURITY_SELINUX
+ default y
+ help
+ This option collects access vector cache statistics to
+ /sys/fs/selinux/avc/cache_stats, which may be monitored via
+ tools such as avcstat.
+
+config SECURITY_SELINUX_CHECKREQPROT_VALUE
+ int "NSA SELinux checkreqprot default value"
+ depends on SECURITY_SELINUX
+ range 0 1
+ default 0
+ help
+ This option sets the default value for the 'checkreqprot' flag
+ that determines whether SELinux checks the protection requested
+ by the application or the protection that will be applied by the
+ kernel (including any implied execute for read-implies-exec) for
+ mmap and mprotect calls. If this option is set to 0 (zero),
+ SELinux will default to checking the protection that will be applied
+ by the kernel. If this option is set to 1 (one), SELinux will
+ default to checking the protection requested by the application.
+ The checkreqprot flag may be changed from the default via the
+ 'checkreqprot=' boot parameter. It may also be changed at runtime
+ via /sys/fs/selinux/checkreqprot if authorized by policy.
+
+ WARNING: this option is deprecated and will be removed in a future
+ kernel release.
+
+ If you are unsure how to answer this question, answer 0.
+
+config SECURITY_SELINUX_SIDTAB_HASH_BITS
+ int "NSA SELinux sidtab hashtable size"
+ depends on SECURITY_SELINUX
+ range 8 13
+ default 9
+ help
+ This option sets the number of buckets used in the sidtab hashtable
+ to 2^SECURITY_SELINUX_SIDTAB_HASH_BITS buckets. The number of hash
+ collisions may be viewed at /sys/fs/selinux/ss/sidtab_hash_stats. If
+ chain lengths are high (e.g. > 20) then selecting a higher value here
+ will ensure that lookups times are short and stable.
+
+config SECURITY_SELINUX_SID2STR_CACHE_SIZE
+ int "NSA SELinux SID to context string translation cache size"
+ depends on SECURITY_SELINUX
+ default 256
+ help
+ This option defines the size of the internal SID -> context string
+ cache, which improves the performance of context to string
+ conversion. Setting this option to 0 disables the cache completely.
+
+ If unsure, keep the default value.
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
new file mode 100644
index 000000000..8b21520bd
--- /dev/null
+++ b/security/selinux/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for building the SELinux module as part of the kernel tree.
+#
+
+obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
+
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
+ netnode.o netport.o status.o \
+ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
+ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/context.o
+
+selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
+
+selinux-$(CONFIG_NETLABEL) += netlabel.o
+
+selinux-$(CONFIG_SECURITY_INFINIBAND) += ibpkey.o
+
+selinux-$(CONFIG_IMA) += ima.o
+
+ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
+
+$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
+
+quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
+ cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
+
+targets += flask.h av_permissions.h
+# once make >= 4.3 is required, we can use grouped targets in the rule below,
+# which basically involves adding both headers and a '&' before the colon, see
+# the example below:
+# $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/...
+$(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE
+ $(call if_changed,flask)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
new file mode 100644
index 000000000..9a43af0eb
--- /dev/null
+++ b/security/selinux/avc.c
@@ -0,0 +1,1222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the kernel access vector cache (AVC).
+ *
+ * Authors: Stephen Smalley, <sds@tycho.nsa.gov>
+ * James Morris <jmorris@redhat.com>
+ *
+ * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com>
+ * Replaced the avc_lock spinlock by RCU.
+ *
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/percpu.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <linux/un.h>
+#include <net/af_unix.h>
+#include <linux/ip.h>
+#include <linux/audit.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include "avc.h"
+#include "avc_ss.h"
+#include "classmap.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/avc.h>
+
+#define AVC_CACHE_SLOTS 512
+#define AVC_DEF_CACHE_THRESHOLD 512
+#define AVC_CACHE_RECLAIM 16
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+#define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field)
+#else
+#define avc_cache_stats_incr(field) do {} while (0)
+#endif
+
+struct avc_entry {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ struct av_decision avd;
+ struct avc_xperms_node *xp_node;
+};
+
+struct avc_node {
+ struct avc_entry ae;
+ struct hlist_node list; /* anchored in avc_cache->slots[i] */
+ struct rcu_head rhead;
+};
+
+struct avc_xperms_decision_node {
+ struct extended_perms_decision xpd;
+ struct list_head xpd_list; /* list of extended_perms_decision */
+};
+
+struct avc_xperms_node {
+ struct extended_perms xp;
+ struct list_head xpd_head; /* list head of extended_perms_decision */
+};
+
+struct avc_cache {
+ struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
+ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
+ atomic_t lru_hint; /* LRU hint for reclaim scan */
+ atomic_t active_nodes;
+ u32 latest_notif; /* latest revocation notification */
+};
+
+struct avc_callback_node {
+ int (*callback) (u32 event);
+ u32 events;
+ struct avc_callback_node *next;
+};
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
+#endif
+
+struct selinux_avc {
+ unsigned int avc_cache_threshold;
+ struct avc_cache avc_cache;
+};
+
+static struct selinux_avc selinux_avc;
+
+void selinux_avc_init(struct selinux_avc **avc)
+{
+ int i;
+
+ selinux_avc.avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
+ for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+ INIT_HLIST_HEAD(&selinux_avc.avc_cache.slots[i]);
+ spin_lock_init(&selinux_avc.avc_cache.slots_lock[i]);
+ }
+ atomic_set(&selinux_avc.avc_cache.active_nodes, 0);
+ atomic_set(&selinux_avc.avc_cache.lru_hint, 0);
+ *avc = &selinux_avc;
+}
+
+unsigned int avc_get_cache_threshold(struct selinux_avc *avc)
+{
+ return avc->avc_cache_threshold;
+}
+
+void avc_set_cache_threshold(struct selinux_avc *avc,
+ unsigned int cache_threshold)
+{
+ avc->avc_cache_threshold = cache_threshold;
+}
+
+static struct avc_callback_node *avc_callbacks __ro_after_init;
+static struct kmem_cache *avc_node_cachep __ro_after_init;
+static struct kmem_cache *avc_xperms_data_cachep __ro_after_init;
+static struct kmem_cache *avc_xperms_decision_cachep __ro_after_init;
+static struct kmem_cache *avc_xperms_cachep __ro_after_init;
+
+static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
+{
+ return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
+}
+
+/**
+ * avc_init - Initialize the AVC.
+ *
+ * Initialize the access vector cache.
+ */
+void __init avc_init(void)
+{
+ avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
+ 0, SLAB_PANIC, NULL);
+ avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
+ sizeof(struct avc_xperms_node),
+ 0, SLAB_PANIC, NULL);
+ avc_xperms_decision_cachep = kmem_cache_create(
+ "avc_xperms_decision_node",
+ sizeof(struct avc_xperms_decision_node),
+ 0, SLAB_PANIC, NULL);
+ avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data",
+ sizeof(struct extended_perms_data),
+ 0, SLAB_PANIC, NULL);
+}
+
+int avc_get_hash_stats(struct selinux_avc *avc, char *page)
+{
+ int i, chain_len, max_chain_len, slots_used;
+ struct avc_node *node;
+ struct hlist_head *head;
+
+ rcu_read_lock();
+
+ slots_used = 0;
+ max_chain_len = 0;
+ for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+ head = &avc->avc_cache.slots[i];
+ if (!hlist_empty(head)) {
+ slots_used++;
+ chain_len = 0;
+ hlist_for_each_entry_rcu(node, head, list)
+ chain_len++;
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+ "longest chain: %d\n",
+ atomic_read(&avc->avc_cache.active_nodes),
+ slots_used, AVC_CACHE_SLOTS, max_chain_len);
+}
+
+/*
+ * using a linked list for extended_perms_decision lookup because the list is
+ * always small. i.e. less than 5, typically 1
+ */
+static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
+ struct avc_xperms_node *xp_node)
+{
+ struct avc_xperms_decision_node *xpd_node;
+
+ list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
+ if (xpd_node->xpd.driver == driver)
+ return &xpd_node->xpd;
+ }
+ return NULL;
+}
+
+static inline unsigned int
+avc_xperms_has_perm(struct extended_perms_decision *xpd,
+ u8 perm, u8 which)
+{
+ unsigned int rc = 0;
+
+ if ((which == XPERMS_ALLOWED) &&
+ (xpd->used & XPERMS_ALLOWED))
+ rc = security_xperm_test(xpd->allowed->p, perm);
+ else if ((which == XPERMS_AUDITALLOW) &&
+ (xpd->used & XPERMS_AUDITALLOW))
+ rc = security_xperm_test(xpd->auditallow->p, perm);
+ else if ((which == XPERMS_DONTAUDIT) &&
+ (xpd->used & XPERMS_DONTAUDIT))
+ rc = security_xperm_test(xpd->dontaudit->p, perm);
+ return rc;
+}
+
+static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
+ u8 driver, u8 perm)
+{
+ struct extended_perms_decision *xpd;
+ security_xperm_set(xp_node->xp.drivers.p, driver);
+ xpd = avc_xperms_decision_lookup(driver, xp_node);
+ if (xpd && xpd->allowed)
+ security_xperm_set(xpd->allowed->p, perm);
+}
+
+static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node)
+{
+ struct extended_perms_decision *xpd;
+
+ xpd = &xpd_node->xpd;
+ if (xpd->allowed)
+ kmem_cache_free(avc_xperms_data_cachep, xpd->allowed);
+ if (xpd->auditallow)
+ kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow);
+ if (xpd->dontaudit)
+ kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit);
+ kmem_cache_free(avc_xperms_decision_cachep, xpd_node);
+}
+
+static void avc_xperms_free(struct avc_xperms_node *xp_node)
+{
+ struct avc_xperms_decision_node *xpd_node, *tmp;
+
+ if (!xp_node)
+ return;
+
+ list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) {
+ list_del(&xpd_node->xpd_list);
+ avc_xperms_decision_free(xpd_node);
+ }
+ kmem_cache_free(avc_xperms_cachep, xp_node);
+}
+
+static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
+ struct extended_perms_decision *src)
+{
+ dest->driver = src->driver;
+ dest->used = src->used;
+ if (dest->used & XPERMS_ALLOWED)
+ memcpy(dest->allowed->p, src->allowed->p,
+ sizeof(src->allowed->p));
+ if (dest->used & XPERMS_AUDITALLOW)
+ memcpy(dest->auditallow->p, src->auditallow->p,
+ sizeof(src->auditallow->p));
+ if (dest->used & XPERMS_DONTAUDIT)
+ memcpy(dest->dontaudit->p, src->dontaudit->p,
+ sizeof(src->dontaudit->p));
+}
+
+/*
+ * similar to avc_copy_xperms_decision, but only copy decision
+ * information relevant to this perm
+ */
+static inline void avc_quick_copy_xperms_decision(u8 perm,
+ struct extended_perms_decision *dest,
+ struct extended_perms_decision *src)
+{
+ /*
+ * compute index of the u32 of the 256 bits (8 u32s) that contain this
+ * command permission
+ */
+ u8 i = perm >> 5;
+
+ dest->used = src->used;
+ if (dest->used & XPERMS_ALLOWED)
+ dest->allowed->p[i] = src->allowed->p[i];
+ if (dest->used & XPERMS_AUDITALLOW)
+ dest->auditallow->p[i] = src->auditallow->p[i];
+ if (dest->used & XPERMS_DONTAUDIT)
+ dest->dontaudit->p[i] = src->dontaudit->p[i];
+}
+
+static struct avc_xperms_decision_node
+ *avc_xperms_decision_alloc(u8 which)
+{
+ struct avc_xperms_decision_node *xpd_node;
+ struct extended_perms_decision *xpd;
+
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!xpd_node)
+ return NULL;
+
+ xpd = &xpd_node->xpd;
+ if (which & XPERMS_ALLOWED) {
+ xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!xpd->allowed)
+ goto error;
+ }
+ if (which & XPERMS_AUDITALLOW) {
+ xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!xpd->auditallow)
+ goto error;
+ }
+ if (which & XPERMS_DONTAUDIT) {
+ xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!xpd->dontaudit)
+ goto error;
+ }
+ return xpd_node;
+error:
+ avc_xperms_decision_free(xpd_node);
+ return NULL;
+}
+
+static int avc_add_xperms_decision(struct avc_node *node,
+ struct extended_perms_decision *src)
+{
+ struct avc_xperms_decision_node *dest_xpd;
+
+ node->ae.xp_node->xp.len++;
+ dest_xpd = avc_xperms_decision_alloc(src->used);
+ if (!dest_xpd)
+ return -ENOMEM;
+ avc_copy_xperms_decision(&dest_xpd->xpd, src);
+ list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
+ return 0;
+}
+
+static struct avc_xperms_node *avc_xperms_alloc(void)
+{
+ struct avc_xperms_node *xp_node;
+
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN);
+ if (!xp_node)
+ return xp_node;
+ INIT_LIST_HEAD(&xp_node->xpd_head);
+ return xp_node;
+}
+
+static int avc_xperms_populate(struct avc_node *node,
+ struct avc_xperms_node *src)
+{
+ struct avc_xperms_node *dest;
+ struct avc_xperms_decision_node *dest_xpd;
+ struct avc_xperms_decision_node *src_xpd;
+
+ if (src->xp.len == 0)
+ return 0;
+ dest = avc_xperms_alloc();
+ if (!dest)
+ return -ENOMEM;
+
+ memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
+ dest->xp.len = src->xp.len;
+
+ /* for each source xpd allocate a destination xpd and copy */
+ list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
+ dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used);
+ if (!dest_xpd)
+ goto error;
+ avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd);
+ list_add(&dest_xpd->xpd_list, &dest->xpd_head);
+ }
+ node->ae.xp_node = dest;
+ return 0;
+error:
+ avc_xperms_free(dest);
+ return -ENOMEM;
+
+}
+
+static inline u32 avc_xperms_audit_required(u32 requested,
+ struct av_decision *avd,
+ struct extended_perms_decision *xpd,
+ u8 perm,
+ int result,
+ u32 *deniedp)
+{
+ u32 denied, audited;
+
+ denied = requested & ~avd->allowed;
+ if (unlikely(denied)) {
+ audited = denied & avd->auditdeny;
+ if (audited && xpd) {
+ if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT))
+ audited &= ~requested;
+ }
+ } else if (result) {
+ audited = denied = requested;
+ } else {
+ audited = requested & avd->auditallow;
+ if (audited && xpd) {
+ if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW))
+ audited &= ~requested;
+ }
+ }
+
+ *deniedp = denied;
+ return audited;
+}
+
+static inline int avc_xperms_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, struct av_decision *avd,
+ struct extended_perms_decision *xpd,
+ u8 perm, int result,
+ struct common_audit_data *ad)
+{
+ u32 audited, denied;
+
+ audited = avc_xperms_audit_required(
+ requested, avd, xpd, perm, result, &denied);
+ if (likely(!audited))
+ return 0;
+ return slow_avc_audit(state, ssid, tsid, tclass, requested,
+ audited, denied, result, ad);
+}
+
+static void avc_node_free(struct rcu_head *rhead)
+{
+ struct avc_node *node = container_of(rhead, struct avc_node, rhead);
+ avc_xperms_free(node->ae.xp_node);
+ kmem_cache_free(avc_node_cachep, node);
+ avc_cache_stats_incr(frees);
+}
+
+static void avc_node_delete(struct selinux_avc *avc, struct avc_node *node)
+{
+ hlist_del_rcu(&node->list);
+ call_rcu(&node->rhead, avc_node_free);
+ atomic_dec(&avc->avc_cache.active_nodes);
+}
+
+static void avc_node_kill(struct selinux_avc *avc, struct avc_node *node)
+{
+ avc_xperms_free(node->ae.xp_node);
+ kmem_cache_free(avc_node_cachep, node);
+ avc_cache_stats_incr(frees);
+ atomic_dec(&avc->avc_cache.active_nodes);
+}
+
+static void avc_node_replace(struct selinux_avc *avc,
+ struct avc_node *new, struct avc_node *old)
+{
+ hlist_replace_rcu(&old->list, &new->list);
+ call_rcu(&old->rhead, avc_node_free);
+ atomic_dec(&avc->avc_cache.active_nodes);
+}
+
+static inline int avc_reclaim_node(struct selinux_avc *avc)
+{
+ struct avc_node *node;
+ int hvalue, try, ecx;
+ unsigned long flags;
+ struct hlist_head *head;
+ spinlock_t *lock;
+
+ for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
+ hvalue = atomic_inc_return(&avc->avc_cache.lru_hint) &
+ (AVC_CACHE_SLOTS - 1);
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
+
+ if (!spin_trylock_irqsave(lock, flags))
+ continue;
+
+ rcu_read_lock();
+ hlist_for_each_entry(node, head, list) {
+ avc_node_delete(avc, node);
+ avc_cache_stats_incr(reclaims);
+ ecx++;
+ if (ecx >= AVC_CACHE_RECLAIM) {
+ rcu_read_unlock();
+ spin_unlock_irqrestore(lock, flags);
+ goto out;
+ }
+ }
+ rcu_read_unlock();
+ spin_unlock_irqrestore(lock, flags);
+ }
+out:
+ return ecx;
+}
+
+static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
+{
+ struct avc_node *node;
+
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
+ if (!node)
+ goto out;
+
+ INIT_HLIST_NODE(&node->list);
+ avc_cache_stats_incr(allocations);
+
+ if (atomic_inc_return(&avc->avc_cache.active_nodes) >
+ avc->avc_cache_threshold)
+ avc_reclaim_node(avc);
+
+out:
+ return node;
+}
+
+static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
+{
+ node->ae.ssid = ssid;
+ node->ae.tsid = tsid;
+ node->ae.tclass = tclass;
+ memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
+}
+
+static inline struct avc_node *avc_search_node(struct selinux_avc *avc,
+ u32 ssid, u32 tsid, u16 tclass)
+{
+ struct avc_node *node, *ret = NULL;
+ int hvalue;
+ struct hlist_head *head;
+
+ hvalue = avc_hash(ssid, tsid, tclass);
+ head = &avc->avc_cache.slots[hvalue];
+ hlist_for_each_entry_rcu(node, head, list) {
+ if (ssid == node->ae.ssid &&
+ tclass == node->ae.tclass &&
+ tsid == node->ae.tsid) {
+ ret = node;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * avc_lookup - Look up an AVC entry.
+ * @avc: the access vector cache
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ *
+ * Look up an AVC entry that is valid for the
+ * (@ssid, @tsid), interpreting the permissions
+ * based on @tclass. If a valid AVC entry exists,
+ * then this function returns the avc_node.
+ * Otherwise, this function returns NULL.
+ */
+static struct avc_node *avc_lookup(struct selinux_avc *avc,
+ u32 ssid, u32 tsid, u16 tclass)
+{
+ struct avc_node *node;
+
+ avc_cache_stats_incr(lookups);
+ node = avc_search_node(avc, ssid, tsid, tclass);
+
+ if (node)
+ return node;
+
+ avc_cache_stats_incr(misses);
+ return NULL;
+}
+
+static int avc_latest_notif_update(struct selinux_avc *avc,
+ int seqno, int is_insert)
+{
+ int ret = 0;
+ static DEFINE_SPINLOCK(notif_lock);
+ unsigned long flag;
+
+ spin_lock_irqsave(&notif_lock, flag);
+ if (is_insert) {
+ if (seqno < avc->avc_cache.latest_notif) {
+ pr_warn("SELinux: avc: seqno %d < latest_notif %d\n",
+ seqno, avc->avc_cache.latest_notif);
+ ret = -EAGAIN;
+ }
+ } else {
+ if (seqno > avc->avc_cache.latest_notif)
+ avc->avc_cache.latest_notif = seqno;
+ }
+ spin_unlock_irqrestore(&notif_lock, flag);
+
+ return ret;
+}
+
+/**
+ * avc_insert - Insert an AVC entry.
+ * @avc: the access vector cache
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @avd: resulting av decision
+ * @xp_node: resulting extended permissions
+ *
+ * Insert an AVC entry for the SID pair
+ * (@ssid, @tsid) and class @tclass.
+ * The access vectors and the sequence number are
+ * normally provided by the security server in
+ * response to a security_compute_av() call. If the
+ * sequence number @avd->seqno is not less than the latest
+ * revocation notification, then the function copies
+ * the access vectors into a cache entry, returns
+ * avc_node inserted. Otherwise, this function returns NULL.
+ */
+static struct avc_node *avc_insert(struct selinux_avc *avc,
+ u32 ssid, u32 tsid, u16 tclass,
+ struct av_decision *avd,
+ struct avc_xperms_node *xp_node)
+{
+ struct avc_node *pos, *node = NULL;
+ int hvalue;
+ unsigned long flag;
+ spinlock_t *lock;
+ struct hlist_head *head;
+
+ if (avc_latest_notif_update(avc, avd->seqno, 1))
+ return NULL;
+
+ node = avc_alloc_node(avc);
+ if (!node)
+ return NULL;
+
+ avc_node_populate(node, ssid, tsid, tclass, avd);
+ if (avc_xperms_populate(node, xp_node)) {
+ avc_node_kill(avc, node);
+ return NULL;
+ }
+
+ hvalue = avc_hash(ssid, tsid, tclass);
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
+ spin_lock_irqsave(lock, flag);
+ hlist_for_each_entry(pos, head, list) {
+ if (pos->ae.ssid == ssid &&
+ pos->ae.tsid == tsid &&
+ pos->ae.tclass == tclass) {
+ avc_node_replace(avc, node, pos);
+ goto found;
+ }
+ }
+ hlist_add_head_rcu(&node->list, head);
+found:
+ spin_unlock_irqrestore(lock, flag);
+ return node;
+}
+
+/**
+ * avc_audit_pre_callback - SELinux specific information
+ * will be called by generic audit code
+ * @ab: the audit buffer
+ * @a: audit_data
+ */
+static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
+{
+ struct common_audit_data *ad = a;
+ struct selinux_audit_data *sad = ad->selinux_audit_data;
+ u32 av = sad->audited;
+ const char *const *perms;
+ int i, perm;
+
+ audit_log_format(ab, "avc: %s ", sad->denied ? "denied" : "granted");
+
+ if (av == 0) {
+ audit_log_format(ab, " null");
+ return;
+ }
+
+ perms = secclass_map[sad->tclass-1].perms;
+
+ audit_log_format(ab, " {");
+ i = 0;
+ perm = 1;
+ while (i < (sizeof(av) * 8)) {
+ if ((perm & av) && perms[i]) {
+ audit_log_format(ab, " %s", perms[i]);
+ av &= ~perm;
+ }
+ i++;
+ perm <<= 1;
+ }
+
+ if (av)
+ audit_log_format(ab, " 0x%x", av);
+
+ audit_log_format(ab, " } for ");
+}
+
+/**
+ * avc_audit_post_callback - SELinux specific information
+ * will be called by generic audit code
+ * @ab: the audit buffer
+ * @a: audit_data
+ */
+static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
+{
+ struct common_audit_data *ad = a;
+ struct selinux_audit_data *sad = ad->selinux_audit_data;
+ char *scontext = NULL;
+ char *tcontext = NULL;
+ const char *tclass = NULL;
+ u32 scontext_len;
+ u32 tcontext_len;
+ int rc;
+
+ rc = security_sid_to_context(sad->state, sad->ssid, &scontext,
+ &scontext_len);
+ if (rc)
+ audit_log_format(ab, " ssid=%d", sad->ssid);
+ else
+ audit_log_format(ab, " scontext=%s", scontext);
+
+ rc = security_sid_to_context(sad->state, sad->tsid, &tcontext,
+ &tcontext_len);
+ if (rc)
+ audit_log_format(ab, " tsid=%d", sad->tsid);
+ else
+ audit_log_format(ab, " tcontext=%s", tcontext);
+
+ tclass = secclass_map[sad->tclass-1].name;
+ audit_log_format(ab, " tclass=%s", tclass);
+
+ if (sad->denied)
+ audit_log_format(ab, " permissive=%u", sad->result ? 0 : 1);
+
+ trace_selinux_audited(sad, scontext, tcontext, tclass);
+ kfree(tcontext);
+ kfree(scontext);
+
+ /* in case of invalid context report also the actual context string */
+ rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
+ &scontext_len);
+ if (!rc && scontext) {
+ if (scontext_len && scontext[scontext_len - 1] == '\0')
+ scontext_len--;
+ audit_log_format(ab, " srawcon=");
+ audit_log_n_untrustedstring(ab, scontext, scontext_len);
+ kfree(scontext);
+ }
+
+ rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
+ &scontext_len);
+ if (!rc && scontext) {
+ if (scontext_len && scontext[scontext_len - 1] == '\0')
+ scontext_len--;
+ audit_log_format(ab, " trawcon=");
+ audit_log_n_untrustedstring(ab, scontext, scontext_len);
+ kfree(scontext);
+ }
+}
+
+/*
+ * This is the slow part of avc audit with big stack footprint.
+ * Note that it is non-blocking and can be called from under
+ * rcu_read_lock().
+ */
+noinline int slow_avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a)
+{
+ struct common_audit_data stack_data;
+ struct selinux_audit_data sad;
+
+ if (WARN_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)))
+ return -EINVAL;
+
+ if (!a) {
+ a = &stack_data;
+ a->type = LSM_AUDIT_DATA_NONE;
+ }
+
+ sad.tclass = tclass;
+ sad.requested = requested;
+ sad.ssid = ssid;
+ sad.tsid = tsid;
+ sad.audited = audited;
+ sad.denied = denied;
+ sad.result = result;
+ sad.state = state;
+
+ a->selinux_audit_data = &sad;
+
+ common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
+ return 0;
+}
+
+/**
+ * avc_add_callback - Register a callback for security events.
+ * @callback: callback function
+ * @events: security events
+ *
+ * Register a callback function for events in the set @events.
+ * Returns %0 on success or -%ENOMEM if insufficient memory
+ * exists to add the callback.
+ */
+int __init avc_add_callback(int (*callback)(u32 event), u32 events)
+{
+ struct avc_callback_node *c;
+ int rc = 0;
+
+ c = kmalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ c->callback = callback;
+ c->events = events;
+ c->next = avc_callbacks;
+ avc_callbacks = c;
+out:
+ return rc;
+}
+
+/**
+ * avc_update_node - Update an AVC entry
+ * @avc: the access vector cache
+ * @event : Updating event
+ * @perms : Permission mask bits
+ * @driver: xperm driver information
+ * @xperm: xperm permissions
+ * @ssid: AVC entry source sid
+ * @tsid: AVC entry target sid
+ * @tclass : AVC entry target object class
+ * @seqno : sequence number when decision was made
+ * @xpd: extended_perms_decision to be added to the node
+ * @flags: the AVC_* flags, e.g. AVC_EXTENDED_PERMS, or 0.
+ *
+ * if a valid AVC entry doesn't exist,this function returns -ENOENT.
+ * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
+ * otherwise, this function updates the AVC entry. The original AVC-entry object
+ * will release later by RCU.
+ */
+static int avc_update_node(struct selinux_avc *avc,
+ u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
+ u32 tsid, u16 tclass, u32 seqno,
+ struct extended_perms_decision *xpd,
+ u32 flags)
+{
+ int hvalue, rc = 0;
+ unsigned long flag;
+ struct avc_node *pos, *node, *orig = NULL;
+ struct hlist_head *head;
+ spinlock_t *lock;
+
+ node = avc_alloc_node(avc);
+ if (!node) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Lock the target slot */
+ hvalue = avc_hash(ssid, tsid, tclass);
+
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
+
+ spin_lock_irqsave(lock, flag);
+
+ hlist_for_each_entry(pos, head, list) {
+ if (ssid == pos->ae.ssid &&
+ tsid == pos->ae.tsid &&
+ tclass == pos->ae.tclass &&
+ seqno == pos->ae.avd.seqno){
+ orig = pos;
+ break;
+ }
+ }
+
+ if (!orig) {
+ rc = -ENOENT;
+ avc_node_kill(avc, node);
+ goto out_unlock;
+ }
+
+ /*
+ * Copy and replace original node.
+ */
+
+ avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
+
+ if (orig->ae.xp_node) {
+ rc = avc_xperms_populate(node, orig->ae.xp_node);
+ if (rc) {
+ avc_node_kill(avc, node);
+ goto out_unlock;
+ }
+ }
+
+ switch (event) {
+ case AVC_CALLBACK_GRANT:
+ node->ae.avd.allowed |= perms;
+ if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
+ avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
+ break;
+ case AVC_CALLBACK_TRY_REVOKE:
+ case AVC_CALLBACK_REVOKE:
+ node->ae.avd.allowed &= ~perms;
+ break;
+ case AVC_CALLBACK_AUDITALLOW_ENABLE:
+ node->ae.avd.auditallow |= perms;
+ break;
+ case AVC_CALLBACK_AUDITALLOW_DISABLE:
+ node->ae.avd.auditallow &= ~perms;
+ break;
+ case AVC_CALLBACK_AUDITDENY_ENABLE:
+ node->ae.avd.auditdeny |= perms;
+ break;
+ case AVC_CALLBACK_AUDITDENY_DISABLE:
+ node->ae.avd.auditdeny &= ~perms;
+ break;
+ case AVC_CALLBACK_ADD_XPERMS:
+ avc_add_xperms_decision(node, xpd);
+ break;
+ }
+ avc_node_replace(avc, node, orig);
+out_unlock:
+ spin_unlock_irqrestore(lock, flag);
+out:
+ return rc;
+}
+
+/**
+ * avc_flush - Flush the cache
+ * @avc: the access vector cache
+ */
+static void avc_flush(struct selinux_avc *avc)
+{
+ struct hlist_head *head;
+ struct avc_node *node;
+ spinlock_t *lock;
+ unsigned long flag;
+ int i;
+
+ for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+ head = &avc->avc_cache.slots[i];
+ lock = &avc->avc_cache.slots_lock[i];
+
+ spin_lock_irqsave(lock, flag);
+ /*
+ * With preemptable RCU, the outer spinlock does not
+ * prevent RCU grace periods from ending.
+ */
+ rcu_read_lock();
+ hlist_for_each_entry(node, head, list)
+ avc_node_delete(avc, node);
+ rcu_read_unlock();
+ spin_unlock_irqrestore(lock, flag);
+ }
+}
+
+/**
+ * avc_ss_reset - Flush the cache and revalidate migrated permissions.
+ * @avc: the access vector cache
+ * @seqno: policy sequence number
+ */
+int avc_ss_reset(struct selinux_avc *avc, u32 seqno)
+{
+ struct avc_callback_node *c;
+ int rc = 0, tmprc;
+
+ avc_flush(avc);
+
+ for (c = avc_callbacks; c; c = c->next) {
+ if (c->events & AVC_CALLBACK_RESET) {
+ tmprc = c->callback(AVC_CALLBACK_RESET);
+ /* save the first error encountered for the return
+ value and continue processing the callbacks */
+ if (!rc)
+ rc = tmprc;
+ }
+ }
+
+ avc_latest_notif_update(avc, seqno, 0);
+ return rc;
+}
+
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline
+struct avc_node *avc_compute_av(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd,
+ struct avc_xperms_node *xp_node)
+{
+ rcu_read_unlock();
+ INIT_LIST_HEAD(&xp_node->xpd_head);
+ security_compute_av(state, ssid, tsid, tclass, avd, &xp_node->xp);
+ rcu_read_lock();
+ return avc_insert(state->avc, ssid, tsid, tclass, avd, xp_node);
+}
+
+static noinline int avc_denied(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ u8 driver, u8 xperm, unsigned int flags,
+ struct av_decision *avd)
+{
+ if (flags & AVC_STRICT)
+ return -EACCES;
+
+ if (enforcing_enabled(state) &&
+ !(avd->flags & AVD_FLAGS_PERMISSIVE))
+ return -EACCES;
+
+ avc_update_node(state->avc, AVC_CALLBACK_GRANT, requested, driver,
+ xperm, ssid, tsid, tclass, avd->seqno, NULL, flags);
+ return 0;
+}
+
+/*
+ * The avc extended permissions logic adds an additional 256 bits of
+ * permissions to an avc node when extended permissions for that node are
+ * specified in the avtab. If the additional 256 permissions is not adequate,
+ * as-is the case with ioctls, then multiple may be chained together and the
+ * driver field is used to specify which set contains the permission.
+ */
+int avc_has_extended_perms(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u8 driver, u8 xperm, struct common_audit_data *ad)
+{
+ struct avc_node *node;
+ struct av_decision avd;
+ u32 denied;
+ struct extended_perms_decision local_xpd;
+ struct extended_perms_decision *xpd = NULL;
+ struct extended_perms_data allowed;
+ struct extended_perms_data auditallow;
+ struct extended_perms_data dontaudit;
+ struct avc_xperms_node local_xp_node;
+ struct avc_xperms_node *xp_node;
+ int rc = 0, rc2;
+
+ xp_node = &local_xp_node;
+ if (WARN_ON(!requested))
+ return -EACCES;
+
+ rcu_read_lock();
+
+ node = avc_lookup(state->avc, ssid, tsid, tclass);
+ if (unlikely(!node)) {
+ avc_compute_av(state, ssid, tsid, tclass, &avd, xp_node);
+ } else {
+ memcpy(&avd, &node->ae.avd, sizeof(avd));
+ xp_node = node->ae.xp_node;
+ }
+ /* if extended permissions are not defined, only consider av_decision */
+ if (!xp_node || !xp_node->xp.len)
+ goto decision;
+
+ local_xpd.allowed = &allowed;
+ local_xpd.auditallow = &auditallow;
+ local_xpd.dontaudit = &dontaudit;
+
+ xpd = avc_xperms_decision_lookup(driver, xp_node);
+ if (unlikely(!xpd)) {
+ /*
+ * Compute the extended_perms_decision only if the driver
+ * is flagged
+ */
+ if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
+ avd.allowed &= ~requested;
+ goto decision;
+ }
+ rcu_read_unlock();
+ security_compute_xperms_decision(state, ssid, tsid, tclass,
+ driver, &local_xpd);
+ rcu_read_lock();
+ avc_update_node(state->avc, AVC_CALLBACK_ADD_XPERMS, requested,
+ driver, xperm, ssid, tsid, tclass, avd.seqno,
+ &local_xpd, 0);
+ } else {
+ avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
+ }
+ xpd = &local_xpd;
+
+ if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED))
+ avd.allowed &= ~requested;
+
+decision:
+ denied = requested & ~(avd.allowed);
+ if (unlikely(denied))
+ rc = avc_denied(state, ssid, tsid, tclass, requested,
+ driver, xperm, AVC_EXTENDED_PERMS, &avd);
+
+ rcu_read_unlock();
+
+ rc2 = avc_xperms_audit(state, ssid, tsid, tclass, requested,
+ &avd, xpd, xperm, rc, ad);
+ if (rc2)
+ return rc2;
+ return rc;
+}
+
+/**
+ * avc_has_perm_noaudit - Check permissions but perform no auditing.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions, interpreted based on @tclass
+ * @flags: AVC_STRICT or 0
+ * @avd: access vector decisions
+ *
+ * Check the AVC to determine whether the @requested permissions are granted
+ * for the SID pair (@ssid, @tsid), interpreting the permissions
+ * based on @tclass, and call the security server on a cache miss to obtain
+ * a new decision and add it to the cache. Return a copy of the decisions
+ * in @avd. Return %0 if all @requested permissions are granted,
+ * -%EACCES if any permissions are denied, or another -errno upon
+ * other errors. This function is typically called by avc_has_perm(),
+ * but may also be called directly to separate permission checking from
+ * auditing, e.g. in cases where a lock must be held for the check but
+ * should be released for the auditing.
+ */
+inline int avc_has_perm_noaudit(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ unsigned int flags,
+ struct av_decision *avd)
+{
+ struct avc_node *node;
+ struct avc_xperms_node xp_node;
+ int rc = 0;
+ u32 denied;
+
+ if (WARN_ON(!requested))
+ return -EACCES;
+
+ rcu_read_lock();
+
+ node = avc_lookup(state->avc, ssid, tsid, tclass);
+ if (unlikely(!node))
+ avc_compute_av(state, ssid, tsid, tclass, avd, &xp_node);
+ else
+ memcpy(avd, &node->ae.avd, sizeof(*avd));
+
+ denied = requested & ~(avd->allowed);
+ if (unlikely(denied))
+ rc = avc_denied(state, ssid, tsid, tclass, requested, 0, 0,
+ flags, avd);
+
+ rcu_read_unlock();
+ return rc;
+}
+
+/**
+ * avc_has_perm - Check permissions and perform any appropriate auditing.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions, interpreted based on @tclass
+ * @auditdata: auxiliary audit data
+ *
+ * Check the AVC to determine whether the @requested permissions are granted
+ * for the SID pair (@ssid, @tsid), interpreting the permissions
+ * based on @tclass, and call the security server on a cache miss to obtain
+ * a new decision and add it to the cache. Audit the granting or denial of
+ * permissions in accordance with the policy. Return %0 if all @requested
+ * permissions are granted, -%EACCES if any permissions are denied, or
+ * another -errno upon other errors.
+ */
+int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, struct common_audit_data *auditdata)
+{
+ struct av_decision avd;
+ int rc, rc2;
+
+ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
+ &avd);
+
+ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
+ auditdata);
+ if (rc2)
+ return rc2;
+ return rc;
+}
+
+u32 avc_policy_seqno(struct selinux_state *state)
+{
+ return state->avc->avc_cache.latest_notif;
+}
+
+void avc_disable(void)
+{
+ /*
+ * If you are looking at this because you have realized that we are
+ * not destroying the avc_node_cachep it might be easy to fix, but
+ * I don't know the memory barrier semantics well enough to know. It's
+ * possible that some other task dereferenced security_ops when
+ * it still pointed to selinux operations. If that is the case it's
+ * possible that it is about to use the avc and is about to need the
+ * avc_node_cachep. I know I could wrap the security.c security_ops call
+ * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
+ * the cache and get that memory back.
+ */
+ if (avc_node_cachep) {
+ avc_flush(selinux_state.avc);
+ /* kmem_cache_destroy(avc_node_cachep); */
+ }
+}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
new file mode 100644
index 000000000..78f3da39b
--- /dev/null
+++ b/security/selinux/hooks.c
@@ -0,0 +1,7570 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NSA Security-Enhanced Linux (SELinux) security module
+ *
+ * This file contains the SELinux hook function implementations.
+ *
+ * Authors: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Chris Vance, <cvance@nai.com>
+ * Wayne Salamon, <wsalamon@nai.com>
+ * James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2001,2002 Networks Associates Technology, Inc.
+ * Copyright (C) 2003-2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Eric Paris <eparis@redhat.com>
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ * <dgoeddel@trustedcs.com>
+ * Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
+ * Paul Moore <paul@paul-moore.com>
+ * Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
+ * Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * Copyright (C) 2016 Mellanox Technologies
+ */
+
+#include <linux/init.h>
+#include <linux/kd.h>
+#include <linux/kernel.h>
+#include <linux/kernel_read_file.h>
+#include <linux/errno.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/lsm_hooks.h>
+#include <linux/xattr.h>
+#include <linux/capability.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/swap.h>
+#include <linux/spinlock.h>
+#include <linux/syscalls.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/tty.h>
+#include <net/icmp.h>
+#include <net/ip.h> /* for local_port_range[] */
+#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
+#include <net/inet_connection_sock.h>
+#include <net/net_namespace.h>
+#include <net/netlabel.h>
+#include <linux/uaccess.h>
+#include <asm/ioctls.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h> /* for network interface checks */
+#include <net/netlink.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/sctp.h>
+#include <net/sctp/structs.h>
+#include <linux/quota.h>
+#include <linux/un.h> /* for Unix socket types */
+#include <net/af_unix.h> /* for Unix socket types */
+#include <linux/parser.h>
+#include <linux/nfs_mount.h>
+#include <net/ipv6.h>
+#include <linux/hugetlb.h>
+#include <linux/personality.h>
+#include <linux/audit.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/posix-timers.h>
+#include <linux/syslog.h>
+#include <linux/user_namespace.h>
+#include <linux/export.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/bpf.h>
+#include <linux/kernfs.h>
+#include <linux/stringhash.h> /* for hashlen_string() */
+#include <uapi/linux/mount.h>
+#include <linux/fsnotify.h>
+#include <linux/fanotify.h>
+#include <linux/io_uring.h>
+
+#include "avc.h"
+#include "objsec.h"
+#include "netif.h"
+#include "netnode.h"
+#include "netport.h"
+#include "ibpkey.h"
+#include "xfrm.h"
+#include "netlabel.h"
+#include "audit.h"
+#include "avc_ss.h"
+
+struct selinux_state selinux_state;
+
+/* SECMARK reference count */
+static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+static int selinux_enforcing_boot __initdata;
+
+static int __init enforcing_setup(char *str)
+{
+ unsigned long enforcing;
+ if (!kstrtoul(str, 0, &enforcing))
+ selinux_enforcing_boot = enforcing ? 1 : 0;
+ return 1;
+}
+__setup("enforcing=", enforcing_setup);
+#else
+#define selinux_enforcing_boot 1
+#endif
+
+int selinux_enabled_boot __initdata = 1;
+#ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
+static int __init selinux_enabled_setup(char *str)
+{
+ unsigned long enabled;
+ if (!kstrtoul(str, 0, &enabled))
+ selinux_enabled_boot = enabled ? 1 : 0;
+ return 1;
+}
+__setup("selinux=", selinux_enabled_setup);
+#endif
+
+static unsigned int selinux_checkreqprot_boot =
+ CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
+
+static int __init checkreqprot_setup(char *str)
+{
+ unsigned long checkreqprot;
+
+ if (!kstrtoul(str, 0, &checkreqprot)) {
+ selinux_checkreqprot_boot = checkreqprot ? 1 : 0;
+ if (checkreqprot)
+ pr_err("SELinux: checkreqprot set to 1 via kernel parameter. This is deprecated and will be rejected in a future kernel release.\n");
+ }
+ return 1;
+}
+__setup("checkreqprot=", checkreqprot_setup);
+
+/**
+ * selinux_secmark_enabled - Check to see if SECMARK is currently enabled
+ *
+ * Description:
+ * This function checks the SECMARK reference counter to see if any SECMARK
+ * targets are currently configured, if the reference counter is greater than
+ * zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
+ * enabled, false (0) if SECMARK is disabled. If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
+ *
+ */
+static int selinux_secmark_enabled(void)
+{
+ return (selinux_policycap_alwaysnetwork() ||
+ atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled. Returns true
+ * (1) if any are enabled or false (0) if neither are enabled. If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+ return (selinux_policycap_alwaysnetwork() ||
+ netlbl_enabled() || selinux_xfrm_enabled());
+}
+
+static int selinux_netcache_avc_callback(u32 event)
+{
+ if (event == AVC_CALLBACK_RESET) {
+ sel_netif_flush();
+ sel_netnode_flush();
+ sel_netport_flush();
+ synchronize_net();
+ }
+ return 0;
+}
+
+static int selinux_lsm_notifier_avc_callback(u32 event)
+{
+ if (event == AVC_CALLBACK_RESET) {
+ sel_ib_pkey_flush();
+ call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+ }
+
+ return 0;
+}
+
+/*
+ * initialise the security for the init task
+ */
+static void cred_init_security(void)
+{
+ struct task_security_struct *tsec;
+
+ tsec = selinux_cred(unrcu_pointer(current->real_cred));
+ tsec->osid = tsec->sid = SECINITSID_KERNEL;
+}
+
+/*
+ * get the security ID of a set of credentials
+ */
+static inline u32 cred_sid(const struct cred *cred)
+{
+ const struct task_security_struct *tsec;
+
+ tsec = selinux_cred(cred);
+ return tsec->sid;
+}
+
+/*
+ * get the objective security ID of a task
+ */
+static inline u32 task_sid_obj(const struct task_struct *task)
+{
+ u32 sid;
+
+ rcu_read_lock();
+ sid = cred_sid(__task_cred(task));
+ rcu_read_unlock();
+ return sid;
+}
+
+static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
+
+/*
+ * Try reloading inode security labels that have been marked as invalid. The
+ * @may_sleep parameter indicates when sleeping and thus reloading labels is
+ * allowed; when set to false, returns -ECHILD when the label is
+ * invalid. The @dentry parameter should be set to a dentry of the inode.
+ */
+static int __inode_security_revalidate(struct inode *inode,
+ struct dentry *dentry,
+ bool may_sleep)
+{
+ struct inode_security_struct *isec = selinux_inode(inode);
+
+ might_sleep_if(may_sleep);
+
+ if (selinux_initialized(&selinux_state) &&
+ isec->initialized != LABEL_INITIALIZED) {
+ if (!may_sleep)
+ return -ECHILD;
+
+ /*
+ * Try reloading the inode security label. This will fail if
+ * @opt_dentry is NULL and no dentry for this inode can be
+ * found; in that case, continue using the old label.
+ */
+ inode_doinit_with_dentry(inode, dentry);
+ }
+ return 0;
+}
+
+static struct inode_security_struct *inode_security_novalidate(struct inode *inode)
+{
+ return selinux_inode(inode);
+}
+
+static struct inode_security_struct *inode_security_rcu(struct inode *inode, bool rcu)
+{
+ int error;
+
+ error = __inode_security_revalidate(inode, NULL, !rcu);
+ if (error)
+ return ERR_PTR(error);
+ return selinux_inode(inode);
+}
+
+/*
+ * Get the security label of an inode.
+ */
+static struct inode_security_struct *inode_security(struct inode *inode)
+{
+ __inode_security_revalidate(inode, NULL, true);
+ return selinux_inode(inode);
+}
+
+static struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+
+ return selinux_inode(inode);
+}
+
+/*
+ * Get the security label of a dentry's backing inode.
+ */
+static struct inode_security_struct *backing_inode_security(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+
+ __inode_security_revalidate(inode, dentry, true);
+ return selinux_inode(inode);
+}
+
+static void inode_free_security(struct inode *inode)
+{
+ struct inode_security_struct *isec = selinux_inode(inode);
+ struct superblock_security_struct *sbsec;
+
+ if (!isec)
+ return;
+ sbsec = selinux_superblock(inode->i_sb);
+ /*
+ * As not all inode security structures are in a list, we check for
+ * empty list outside of the lock to make sure that we won't waste
+ * time taking a lock doing nothing.
+ *
+ * The list_del_init() function can be safely called more than once.
+ * It should not be possible for this function to be called with
+ * concurrent list_add(), but for better safety against future changes
+ * in the code, we use list_empty_careful() here.
+ */
+ if (!list_empty_careful(&isec->list)) {
+ spin_lock(&sbsec->isec_lock);
+ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+ }
+}
+
+struct selinux_mnt_opts {
+ u32 fscontext_sid;
+ u32 context_sid;
+ u32 rootcontext_sid;
+ u32 defcontext_sid;
+};
+
+static void selinux_free_mnt_opts(void *mnt_opts)
+{
+ kfree(mnt_opts);
+}
+
+enum {
+ Opt_error = -1,
+ Opt_context = 0,
+ Opt_defcontext = 1,
+ Opt_fscontext = 2,
+ Opt_rootcontext = 3,
+ Opt_seclabel = 4,
+};
+
+#define A(s, has_arg) {#s, sizeof(#s) - 1, Opt_##s, has_arg}
+static struct {
+ const char *name;
+ int len;
+ int opt;
+ bool has_arg;
+} tokens[] = {
+ A(context, true),
+ A(fscontext, true),
+ A(defcontext, true),
+ A(rootcontext, true),
+ A(seclabel, false),
+};
+#undef A
+
+static int match_opt_prefix(char *s, int l, char **arg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tokens); i++) {
+ size_t len = tokens[i].len;
+ if (len > l || memcmp(s, tokens[i].name, len))
+ continue;
+ if (tokens[i].has_arg) {
+ if (len == l || s[len] != '=')
+ continue;
+ *arg = s + len + 1;
+ } else if (len != l)
+ continue;
+ return tokens[i].opt;
+ }
+ return Opt_error;
+}
+
+#define SEL_MOUNT_FAIL_MSG "SELinux: duplicate or incompatible mount options\n"
+
+static int may_context_mount_sb_relabel(u32 sid,
+ struct superblock_security_struct *sbsec,
+ const struct cred *cred)
+{
+ const struct task_security_struct *tsec = selinux_cred(cred);
+ int rc;
+
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ FILESYSTEM__RELABELFROM, NULL);
+ if (rc)
+ return rc;
+
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, sid, SECCLASS_FILESYSTEM,
+ FILESYSTEM__RELABELTO, NULL);
+ return rc;
+}
+
+static int may_context_mount_inode_relabel(u32 sid,
+ struct superblock_security_struct *sbsec,
+ const struct cred *cred)
+{
+ const struct task_security_struct *tsec = selinux_cred(cred);
+ int rc;
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ FILESYSTEM__RELABELFROM, NULL);
+ if (rc)
+ return rc;
+
+ rc = avc_has_perm(&selinux_state,
+ sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ FILESYSTEM__ASSOCIATE, NULL);
+ return rc;
+}
+
+static int selinux_is_genfs_special_handling(struct super_block *sb)
+{
+ /* Special handling. Genfs but also in-core setxattr handler */
+ return !strcmp(sb->s_type->name, "sysfs") ||
+ !strcmp(sb->s_type->name, "pstore") ||
+ !strcmp(sb->s_type->name, "debugfs") ||
+ !strcmp(sb->s_type->name, "tracefs") ||
+ !strcmp(sb->s_type->name, "rootfs") ||
+ (selinux_policycap_cgroupseclabel() &&
+ (!strcmp(sb->s_type->name, "cgroup") ||
+ !strcmp(sb->s_type->name, "cgroup2")));
+}
+
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+
+ /*
+ * IMPORTANT: Double-check logic in this function when adding a new
+ * SECURITY_FS_USE_* definition!
+ */
+ BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
+
+ switch (sbsec->behavior) {
+ case SECURITY_FS_USE_XATTR:
+ case SECURITY_FS_USE_TRANS:
+ case SECURITY_FS_USE_TASK:
+ case SECURITY_FS_USE_NATIVE:
+ return 1;
+
+ case SECURITY_FS_USE_GENFS:
+ return selinux_is_genfs_special_handling(sb);
+
+ /* Never allow relabeling on context mounts */
+ case SECURITY_FS_USE_MNTPOINT:
+ case SECURITY_FS_USE_NONE:
+ default:
+ return 0;
+ }
+}
+
+static int sb_check_xattr_support(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+ struct dentry *root = sb->s_root;
+ struct inode *root_inode = d_backing_inode(root);
+ u32 sid;
+ int rc;
+
+ /*
+ * Make sure that the xattr handler exists and that no
+ * error other than -ENODATA is returned by getxattr on
+ * the root directory. -ENODATA is ok, as this may be
+ * the first boot of the SELinux kernel before we have
+ * assigned xattr values to the filesystem.
+ */
+ if (!(root_inode->i_opflags & IOP_XATTR)) {
+ pr_warn("SELinux: (dev %s, type %s) has no xattr support\n",
+ sb->s_id, sb->s_type->name);
+ goto fallback;
+ }
+
+ rc = __vfs_getxattr(root, root_inode, XATTR_NAME_SELINUX, NULL, 0);
+ if (rc < 0 && rc != -ENODATA) {
+ if (rc == -EOPNOTSUPP) {
+ pr_warn("SELinux: (dev %s, type %s) has no security xattr handler\n",
+ sb->s_id, sb->s_type->name);
+ goto fallback;
+ } else {
+ pr_warn("SELinux: (dev %s, type %s) getxattr errno %d\n",
+ sb->s_id, sb->s_type->name, -rc);
+ return rc;
+ }
+ }
+ return 0;
+
+fallback:
+ /* No xattr support - try to fallback to genfs if possible. */
+ rc = security_genfs_sid(&selinux_state, sb->s_type->name, "/",
+ SECCLASS_DIR, &sid);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ pr_warn("SELinux: (dev %s, type %s) falling back to genfs\n",
+ sb->s_id, sb->s_type->name);
+ sbsec->behavior = SECURITY_FS_USE_GENFS;
+ sbsec->sid = sid;
+ return 0;
+}
+
+static int sb_finish_set_opts(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+ struct dentry *root = sb->s_root;
+ struct inode *root_inode = d_backing_inode(root);
+ int rc = 0;
+
+ if (sbsec->behavior == SECURITY_FS_USE_XATTR) {
+ rc = sb_check_xattr_support(sb);
+ if (rc)
+ return rc;
+ }
+
+ sbsec->flags |= SE_SBINITIALIZED;
+
+ /*
+ * Explicitly set or clear SBLABEL_MNT. It's not sufficient to simply
+ * leave the flag untouched because sb_clone_mnt_opts might be handing
+ * us a superblock that needs the flag to be cleared.
+ */
+ if (selinux_is_sblabel_mnt(sb))
+ sbsec->flags |= SBLABEL_MNT;
+ else
+ sbsec->flags &= ~SBLABEL_MNT;
+
+ /* Initialize the root inode. */
+ rc = inode_doinit_with_dentry(root_inode, root);
+
+ /* Initialize any other inodes associated with the superblock, e.g.
+ inodes created prior to initial policy load or inodes created
+ during get_sb by a pseudo filesystem that directly
+ populates itself. */
+ spin_lock(&sbsec->isec_lock);
+ while (!list_empty(&sbsec->isec_head)) {
+ struct inode_security_struct *isec =
+ list_first_entry(&sbsec->isec_head,
+ struct inode_security_struct, list);
+ struct inode *inode = isec->inode;
+ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+ inode = igrab(inode);
+ if (inode) {
+ if (!IS_PRIVATE(inode))
+ inode_doinit_with_dentry(inode, NULL);
+ iput(inode);
+ }
+ spin_lock(&sbsec->isec_lock);
+ }
+ spin_unlock(&sbsec->isec_lock);
+ return rc;
+}
+
+static int bad_option(struct superblock_security_struct *sbsec, char flag,
+ u32 old_sid, u32 new_sid)
+{
+ char mnt_flags = sbsec->flags & SE_MNTMASK;
+
+ /* check if the old mount command had the same options */
+ if (sbsec->flags & SE_SBINITIALIZED)
+ if (!(sbsec->flags & flag) ||
+ (old_sid != new_sid))
+ return 1;
+
+ /* check if we were passed the same options twice,
+ * aka someone passed context=a,context=b
+ */
+ if (!(sbsec->flags & SE_SBINITIALIZED))
+ if (mnt_flags & flag)
+ return 1;
+ return 0;
+}
+
+/*
+ * Allow filesystems with binary mount data to explicitly set mount point
+ * labeling information.
+ */
+static int selinux_set_mnt_opts(struct super_block *sb,
+ void *mnt_opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+{
+ const struct cred *cred = current_cred();
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+ struct dentry *root = sb->s_root;
+ struct selinux_mnt_opts *opts = mnt_opts;
+ struct inode_security_struct *root_isec;
+ u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
+ u32 defcontext_sid = 0;
+ int rc = 0;
+
+ mutex_lock(&sbsec->lock);
+
+ if (!selinux_initialized(&selinux_state)) {
+ if (!opts) {
+ /* Defer initialization until selinux_complete_init,
+ after the initial policy is loaded and the security
+ server is ready to handle calls. */
+ goto out;
+ }
+ rc = -EINVAL;
+ pr_warn("SELinux: Unable to set superblock options "
+ "before the security server is initialized\n");
+ goto out;
+ }
+ if (kern_flags && !set_kern_flags) {
+ /* Specifying internal flags without providing a place to
+ * place the results is not allowed */
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Binary mount data FS will come through this function twice. Once
+ * from an explicit call and once from the generic calls from the vfs.
+ * Since the generic VFS calls will not contain any security mount data
+ * we need to skip the double mount verification.
+ *
+ * This does open a hole in which we will not notice if the first
+ * mount using this sb set explicit options and a second mount using
+ * this sb does not set any security options. (The first options
+ * will be used for both mounts)
+ */
+ if ((sbsec->flags & SE_SBINITIALIZED) && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
+ && !opts)
+ goto out;
+
+ root_isec = backing_inode_security_novalidate(root);
+
+ /*
+ * parse the mount options, check if they are valid sids.
+ * also check if someone is trying to mount the same sb more
+ * than once with different security options.
+ */
+ if (opts) {
+ if (opts->fscontext_sid) {
+ fscontext_sid = opts->fscontext_sid;
+ if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
+ fscontext_sid))
+ goto out_double_mount;
+ sbsec->flags |= FSCONTEXT_MNT;
+ }
+ if (opts->context_sid) {
+ context_sid = opts->context_sid;
+ if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
+ context_sid))
+ goto out_double_mount;
+ sbsec->flags |= CONTEXT_MNT;
+ }
+ if (opts->rootcontext_sid) {
+ rootcontext_sid = opts->rootcontext_sid;
+ if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
+ rootcontext_sid))
+ goto out_double_mount;
+ sbsec->flags |= ROOTCONTEXT_MNT;
+ }
+ if (opts->defcontext_sid) {
+ defcontext_sid = opts->defcontext_sid;
+ if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
+ defcontext_sid))
+ goto out_double_mount;
+ sbsec->flags |= DEFCONTEXT_MNT;
+ }
+ }
+
+ if (sbsec->flags & SE_SBINITIALIZED) {
+ /* previously mounted with options, but not on this attempt? */
+ if ((sbsec->flags & SE_MNTMASK) && !opts)
+ goto out_double_mount;
+ rc = 0;
+ goto out;
+ }
+
+ if (strcmp(sb->s_type->name, "proc") == 0)
+ sbsec->flags |= SE_SBPROC | SE_SBGENFS;
+
+ if (!strcmp(sb->s_type->name, "debugfs") ||
+ !strcmp(sb->s_type->name, "tracefs") ||
+ !strcmp(sb->s_type->name, "binder") ||
+ !strcmp(sb->s_type->name, "bpf") ||
+ !strcmp(sb->s_type->name, "pstore") ||
+ !strcmp(sb->s_type->name, "securityfs"))
+ sbsec->flags |= SE_SBGENFS;
+
+ if (!strcmp(sb->s_type->name, "sysfs") ||
+ !strcmp(sb->s_type->name, "cgroup") ||
+ !strcmp(sb->s_type->name, "cgroup2"))
+ sbsec->flags |= SE_SBGENFS | SE_SBGENFS_XATTR;
+
+ if (!sbsec->behavior) {
+ /*
+ * Determine the labeling behavior to use for this
+ * filesystem type.
+ */
+ rc = security_fs_use(&selinux_state, sb);
+ if (rc) {
+ pr_warn("%s: security_fs_use(%s) returned %d\n",
+ __func__, sb->s_type->name, rc);
+ goto out;
+ }
+ }
+
+ /*
+ * If this is a user namespace mount and the filesystem type is not
+ * explicitly whitelisted, then no contexts are allowed on the command
+ * line and security labels must be ignored.
+ */
+ if (sb->s_user_ns != &init_user_ns &&
+ strcmp(sb->s_type->name, "tmpfs") &&
+ strcmp(sb->s_type->name, "ramfs") &&
+ strcmp(sb->s_type->name, "devpts") &&
+ strcmp(sb->s_type->name, "overlay")) {
+ if (context_sid || fscontext_sid || rootcontext_sid ||
+ defcontext_sid) {
+ rc = -EACCES;
+ goto out;
+ }
+ if (sbsec->behavior == SECURITY_FS_USE_XATTR) {
+ sbsec->behavior = SECURITY_FS_USE_MNTPOINT;
+ rc = security_transition_sid(&selinux_state,
+ current_sid(),
+ current_sid(),
+ SECCLASS_FILE, NULL,
+ &sbsec->mntpoint_sid);
+ if (rc)
+ goto out;
+ }
+ goto out_set_opts;
+ }
+
+ /* sets the context of the superblock for the fs being mounted. */
+ if (fscontext_sid) {
+ rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, cred);
+ if (rc)
+ goto out;
+
+ sbsec->sid = fscontext_sid;
+ }
+
+ /*
+ * Switch to using mount point labeling behavior.
+ * sets the label used on all file below the mountpoint, and will set
+ * the superblock context if not already set.
+ */
+ if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !context_sid) {
+ sbsec->behavior = SECURITY_FS_USE_NATIVE;
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+ }
+
+ if (context_sid) {
+ if (!fscontext_sid) {
+ rc = may_context_mount_sb_relabel(context_sid, sbsec,
+ cred);
+ if (rc)
+ goto out;
+ sbsec->sid = context_sid;
+ } else {
+ rc = may_context_mount_inode_relabel(context_sid, sbsec,
+ cred);
+ if (rc)
+ goto out;
+ }
+ if (!rootcontext_sid)
+ rootcontext_sid = context_sid;
+
+ sbsec->mntpoint_sid = context_sid;
+ sbsec->behavior = SECURITY_FS_USE_MNTPOINT;
+ }
+
+ if (rootcontext_sid) {
+ rc = may_context_mount_inode_relabel(rootcontext_sid, sbsec,
+ cred);
+ if (rc)
+ goto out;
+
+ root_isec->sid = rootcontext_sid;
+ root_isec->initialized = LABEL_INITIALIZED;
+ }
+
+ if (defcontext_sid) {
+ if (sbsec->behavior != SECURITY_FS_USE_XATTR &&
+ sbsec->behavior != SECURITY_FS_USE_NATIVE) {
+ rc = -EINVAL;
+ pr_warn("SELinux: defcontext option is "
+ "invalid for this filesystem type\n");
+ goto out;
+ }
+
+ if (defcontext_sid != sbsec->def_sid) {
+ rc = may_context_mount_inode_relabel(defcontext_sid,
+ sbsec, cred);
+ if (rc)
+ goto out;
+ }
+
+ sbsec->def_sid = defcontext_sid;
+ }
+
+out_set_opts:
+ rc = sb_finish_set_opts(sb);
+out:
+ mutex_unlock(&sbsec->lock);
+ return rc;
+out_double_mount:
+ rc = -EINVAL;
+ pr_warn("SELinux: mount invalid. Same superblock, different "
+ "security settings for (dev %s, type %s)\n", sb->s_id,
+ sb->s_type->name);
+ goto out;
+}
+
+static int selinux_cmp_sb_context(const struct super_block *oldsb,
+ const struct super_block *newsb)
+{
+ struct superblock_security_struct *old = selinux_superblock(oldsb);
+ struct superblock_security_struct *new = selinux_superblock(newsb);
+ char oldflags = old->flags & SE_MNTMASK;
+ char newflags = new->flags & SE_MNTMASK;
+
+ if (oldflags != newflags)
+ goto mismatch;
+ if ((oldflags & FSCONTEXT_MNT) && old->sid != new->sid)
+ goto mismatch;
+ if ((oldflags & CONTEXT_MNT) && old->mntpoint_sid != new->mntpoint_sid)
+ goto mismatch;
+ if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
+ goto mismatch;
+ if (oldflags & ROOTCONTEXT_MNT) {
+ struct inode_security_struct *oldroot = backing_inode_security(oldsb->s_root);
+ struct inode_security_struct *newroot = backing_inode_security(newsb->s_root);
+ if (oldroot->sid != newroot->sid)
+ goto mismatch;
+ }
+ return 0;
+mismatch:
+ pr_warn("SELinux: mount invalid. Same superblock, "
+ "different security settings for (dev %s, "
+ "type %s)\n", newsb->s_id, newsb->s_type->name);
+ return -EBUSY;
+}
+
+static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
+ struct super_block *newsb,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+{
+ int rc = 0;
+ const struct superblock_security_struct *oldsbsec =
+ selinux_superblock(oldsb);
+ struct superblock_security_struct *newsbsec = selinux_superblock(newsb);
+
+ int set_fscontext = (oldsbsec->flags & FSCONTEXT_MNT);
+ int set_context = (oldsbsec->flags & CONTEXT_MNT);
+ int set_rootcontext = (oldsbsec->flags & ROOTCONTEXT_MNT);
+
+ /*
+ * if the parent was able to be mounted it clearly had no special lsm
+ * mount options. thus we can safely deal with this superblock later
+ */
+ if (!selinux_initialized(&selinux_state))
+ return 0;
+
+ /*
+ * Specifying internal flags without providing a place to
+ * place the results is not allowed.
+ */
+ if (kern_flags && !set_kern_flags)
+ return -EINVAL;
+
+ /* how can we clone if the old one wasn't set up?? */
+ BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
+
+ /* if fs is reusing a sb, make sure that the contexts match */
+ if (newsbsec->flags & SE_SBINITIALIZED) {
+ if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+ return selinux_cmp_sb_context(oldsb, newsb);
+ }
+
+ mutex_lock(&newsbsec->lock);
+
+ newsbsec->flags = oldsbsec->flags;
+
+ newsbsec->sid = oldsbsec->sid;
+ newsbsec->def_sid = oldsbsec->def_sid;
+ newsbsec->behavior = oldsbsec->behavior;
+
+ if (newsbsec->behavior == SECURITY_FS_USE_NATIVE &&
+ !(kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context) {
+ rc = security_fs_use(&selinux_state, newsb);
+ if (rc)
+ goto out;
+ }
+
+ if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !set_context) {
+ newsbsec->behavior = SECURITY_FS_USE_NATIVE;
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+ }
+
+ if (set_context) {
+ u32 sid = oldsbsec->mntpoint_sid;
+
+ if (!set_fscontext)
+ newsbsec->sid = sid;
+ if (!set_rootcontext) {
+ struct inode_security_struct *newisec = backing_inode_security(newsb->s_root);
+ newisec->sid = sid;
+ }
+ newsbsec->mntpoint_sid = sid;
+ }
+ if (set_rootcontext) {
+ const struct inode_security_struct *oldisec = backing_inode_security(oldsb->s_root);
+ struct inode_security_struct *newisec = backing_inode_security(newsb->s_root);
+
+ newisec->sid = oldisec->sid;
+ }
+
+ sb_finish_set_opts(newsb);
+out:
+ mutex_unlock(&newsbsec->lock);
+ return rc;
+}
+
+/*
+ * NOTE: the caller is resposible for freeing the memory even if on error.
+ */
+static int selinux_add_opt(int token, const char *s, void **mnt_opts)
+{
+ struct selinux_mnt_opts *opts = *mnt_opts;
+ u32 *dst_sid;
+ int rc;
+
+ if (token == Opt_seclabel)
+ /* eaten and completely ignored */
+ return 0;
+ if (!s)
+ return -EINVAL;
+
+ if (!selinux_initialized(&selinux_state)) {
+ pr_warn("SELinux: Unable to set superblock options before the security server is initialized\n");
+ return -EINVAL;
+ }
+
+ if (!opts) {
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+ *mnt_opts = opts;
+ }
+
+ switch (token) {
+ case Opt_context:
+ if (opts->context_sid || opts->defcontext_sid)
+ goto err;
+ dst_sid = &opts->context_sid;
+ break;
+ case Opt_fscontext:
+ if (opts->fscontext_sid)
+ goto err;
+ dst_sid = &opts->fscontext_sid;
+ break;
+ case Opt_rootcontext:
+ if (opts->rootcontext_sid)
+ goto err;
+ dst_sid = &opts->rootcontext_sid;
+ break;
+ case Opt_defcontext:
+ if (opts->context_sid || opts->defcontext_sid)
+ goto err;
+ dst_sid = &opts->defcontext_sid;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ rc = security_context_str_to_sid(&selinux_state, s, dst_sid, GFP_KERNEL);
+ if (rc)
+ pr_warn("SELinux: security_context_str_to_sid (%s) failed with errno=%d\n",
+ s, rc);
+ return rc;
+
+err:
+ pr_warn(SEL_MOUNT_FAIL_MSG);
+ return -EINVAL;
+}
+
+static int show_sid(struct seq_file *m, u32 sid)
+{
+ char *context = NULL;
+ u32 len;
+ int rc;
+
+ rc = security_sid_to_context(&selinux_state, sid,
+ &context, &len);
+ if (!rc) {
+ bool has_comma = strchr(context, ',');
+
+ seq_putc(m, '=');
+ if (has_comma)
+ seq_putc(m, '\"');
+ seq_escape(m, context, "\"\n\\");
+ if (has_comma)
+ seq_putc(m, '\"');
+ }
+ kfree(context);
+ return rc;
+}
+
+static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+ int rc;
+
+ if (!(sbsec->flags & SE_SBINITIALIZED))
+ return 0;
+
+ if (!selinux_initialized(&selinux_state))
+ return 0;
+
+ if (sbsec->flags & FSCONTEXT_MNT) {
+ seq_putc(m, ',');
+ seq_puts(m, FSCONTEXT_STR);
+ rc = show_sid(m, sbsec->sid);
+ if (rc)
+ return rc;
+ }
+ if (sbsec->flags & CONTEXT_MNT) {
+ seq_putc(m, ',');
+ seq_puts(m, CONTEXT_STR);
+ rc = show_sid(m, sbsec->mntpoint_sid);
+ if (rc)
+ return rc;
+ }
+ if (sbsec->flags & DEFCONTEXT_MNT) {
+ seq_putc(m, ',');
+ seq_puts(m, DEFCONTEXT_STR);
+ rc = show_sid(m, sbsec->def_sid);
+ if (rc)
+ return rc;
+ }
+ if (sbsec->flags & ROOTCONTEXT_MNT) {
+ struct dentry *root = sb->s_root;
+ struct inode_security_struct *isec = backing_inode_security(root);
+ seq_putc(m, ',');
+ seq_puts(m, ROOTCONTEXT_STR);
+ rc = show_sid(m, isec->sid);
+ if (rc)
+ return rc;
+ }
+ if (sbsec->flags & SBLABEL_MNT) {
+ seq_putc(m, ',');
+ seq_puts(m, SECLABEL_STR);
+ }
+ return 0;
+}
+
+static inline u16 inode_mode_to_security_class(umode_t mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFSOCK:
+ return SECCLASS_SOCK_FILE;
+ case S_IFLNK:
+ return SECCLASS_LNK_FILE;
+ case S_IFREG:
+ return SECCLASS_FILE;
+ case S_IFBLK:
+ return SECCLASS_BLK_FILE;
+ case S_IFDIR:
+ return SECCLASS_DIR;
+ case S_IFCHR:
+ return SECCLASS_CHR_FILE;
+ case S_IFIFO:
+ return SECCLASS_FIFO_FILE;
+
+ }
+
+ return SECCLASS_FILE;
+}
+
+static inline int default_protocol_stream(int protocol)
+{
+ return (protocol == IPPROTO_IP || protocol == IPPROTO_TCP ||
+ protocol == IPPROTO_MPTCP);
+}
+
+static inline int default_protocol_dgram(int protocol)
+{
+ return (protocol == IPPROTO_IP || protocol == IPPROTO_UDP);
+}
+
+static inline u16 socket_type_to_security_class(int family, int type, int protocol)
+{
+ int extsockclass = selinux_policycap_extsockclass();
+
+ switch (family) {
+ case PF_UNIX:
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ return SECCLASS_UNIX_STREAM_SOCKET;
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ return SECCLASS_UNIX_DGRAM_SOCKET;
+ }
+ break;
+ case PF_INET:
+ case PF_INET6:
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ if (default_protocol_stream(protocol))
+ return SECCLASS_TCP_SOCKET;
+ else if (extsockclass && protocol == IPPROTO_SCTP)
+ return SECCLASS_SCTP_SOCKET;
+ else
+ return SECCLASS_RAWIP_SOCKET;
+ case SOCK_DGRAM:
+ if (default_protocol_dgram(protocol))
+ return SECCLASS_UDP_SOCKET;
+ else if (extsockclass && (protocol == IPPROTO_ICMP ||
+ protocol == IPPROTO_ICMPV6))
+ return SECCLASS_ICMP_SOCKET;
+ else
+ return SECCLASS_RAWIP_SOCKET;
+ case SOCK_DCCP:
+ return SECCLASS_DCCP_SOCKET;
+ default:
+ return SECCLASS_RAWIP_SOCKET;
+ }
+ break;
+ case PF_NETLINK:
+ switch (protocol) {
+ case NETLINK_ROUTE:
+ return SECCLASS_NETLINK_ROUTE_SOCKET;
+ case NETLINK_SOCK_DIAG:
+ return SECCLASS_NETLINK_TCPDIAG_SOCKET;
+ case NETLINK_NFLOG:
+ return SECCLASS_NETLINK_NFLOG_SOCKET;
+ case NETLINK_XFRM:
+ return SECCLASS_NETLINK_XFRM_SOCKET;
+ case NETLINK_SELINUX:
+ return SECCLASS_NETLINK_SELINUX_SOCKET;
+ case NETLINK_ISCSI:
+ return SECCLASS_NETLINK_ISCSI_SOCKET;
+ case NETLINK_AUDIT:
+ return SECCLASS_NETLINK_AUDIT_SOCKET;
+ case NETLINK_FIB_LOOKUP:
+ return SECCLASS_NETLINK_FIB_LOOKUP_SOCKET;
+ case NETLINK_CONNECTOR:
+ return SECCLASS_NETLINK_CONNECTOR_SOCKET;
+ case NETLINK_NETFILTER:
+ return SECCLASS_NETLINK_NETFILTER_SOCKET;
+ case NETLINK_DNRTMSG:
+ return SECCLASS_NETLINK_DNRT_SOCKET;
+ case NETLINK_KOBJECT_UEVENT:
+ return SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET;
+ case NETLINK_GENERIC:
+ return SECCLASS_NETLINK_GENERIC_SOCKET;
+ case NETLINK_SCSITRANSPORT:
+ return SECCLASS_NETLINK_SCSITRANSPORT_SOCKET;
+ case NETLINK_RDMA:
+ return SECCLASS_NETLINK_RDMA_SOCKET;
+ case NETLINK_CRYPTO:
+ return SECCLASS_NETLINK_CRYPTO_SOCKET;
+ default:
+ return SECCLASS_NETLINK_SOCKET;
+ }
+ case PF_PACKET:
+ return SECCLASS_PACKET_SOCKET;
+ case PF_KEY:
+ return SECCLASS_KEY_SOCKET;
+ case PF_APPLETALK:
+ return SECCLASS_APPLETALK_SOCKET;
+ }
+
+ if (extsockclass) {
+ switch (family) {
+ case PF_AX25:
+ return SECCLASS_AX25_SOCKET;
+ case PF_IPX:
+ return SECCLASS_IPX_SOCKET;
+ case PF_NETROM:
+ return SECCLASS_NETROM_SOCKET;
+ case PF_ATMPVC:
+ return SECCLASS_ATMPVC_SOCKET;
+ case PF_X25:
+ return SECCLASS_X25_SOCKET;
+ case PF_ROSE:
+ return SECCLASS_ROSE_SOCKET;
+ case PF_DECnet:
+ return SECCLASS_DECNET_SOCKET;
+ case PF_ATMSVC:
+ return SECCLASS_ATMSVC_SOCKET;
+ case PF_RDS:
+ return SECCLASS_RDS_SOCKET;
+ case PF_IRDA:
+ return SECCLASS_IRDA_SOCKET;
+ case PF_PPPOX:
+ return SECCLASS_PPPOX_SOCKET;
+ case PF_LLC:
+ return SECCLASS_LLC_SOCKET;
+ case PF_CAN:
+ return SECCLASS_CAN_SOCKET;
+ case PF_TIPC:
+ return SECCLASS_TIPC_SOCKET;
+ case PF_BLUETOOTH:
+ return SECCLASS_BLUETOOTH_SOCKET;
+ case PF_IUCV:
+ return SECCLASS_IUCV_SOCKET;
+ case PF_RXRPC:
+ return SECCLASS_RXRPC_SOCKET;
+ case PF_ISDN:
+ return SECCLASS_ISDN_SOCKET;
+ case PF_PHONET:
+ return SECCLASS_PHONET_SOCKET;
+ case PF_IEEE802154:
+ return SECCLASS_IEEE802154_SOCKET;
+ case PF_CAIF:
+ return SECCLASS_CAIF_SOCKET;
+ case PF_ALG:
+ return SECCLASS_ALG_SOCKET;
+ case PF_NFC:
+ return SECCLASS_NFC_SOCKET;
+ case PF_VSOCK:
+ return SECCLASS_VSOCK_SOCKET;
+ case PF_KCM:
+ return SECCLASS_KCM_SOCKET;
+ case PF_QIPCRTR:
+ return SECCLASS_QIPCRTR_SOCKET;
+ case PF_SMC:
+ return SECCLASS_SMC_SOCKET;
+ case PF_XDP:
+ return SECCLASS_XDP_SOCKET;
+ case PF_MCTP:
+ return SECCLASS_MCTP_SOCKET;
+#if PF_MAX > 46
+#error New address family defined, please update this function.
+#endif
+ }
+ }
+
+ return SECCLASS_SOCKET;
+}
+
+static int selinux_genfs_get_sid(struct dentry *dentry,
+ u16 tclass,
+ u16 flags,
+ u32 *sid)
+{
+ int rc;
+ struct super_block *sb = dentry->d_sb;
+ char *buffer, *path;
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ path = dentry_path_raw(dentry, buffer, PAGE_SIZE);
+ if (IS_ERR(path))
+ rc = PTR_ERR(path);
+ else {
+ if (flags & SE_SBPROC) {
+ /* each process gets a /proc/PID/ entry. Strip off the
+ * PID part to get a valid selinux labeling.
+ * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
+ while (path[1] >= '0' && path[1] <= '9') {
+ path[1] = '/';
+ path++;
+ }
+ }
+ rc = security_genfs_sid(&selinux_state, sb->s_type->name,
+ path, tclass, sid);
+ if (rc == -ENOENT) {
+ /* No match in policy, mark as unlabeled. */
+ *sid = SECINITSID_UNLABELED;
+ rc = 0;
+ }
+ }
+ free_page((unsigned long)buffer);
+ return rc;
+}
+
+static int inode_doinit_use_xattr(struct inode *inode, struct dentry *dentry,
+ u32 def_sid, u32 *sid)
+{
+#define INITCONTEXTLEN 255
+ char *context;
+ unsigned int len;
+ int rc;
+
+ len = INITCONTEXTLEN;
+ context = kmalloc(len + 1, GFP_NOFS);
+ if (!context)
+ return -ENOMEM;
+
+ context[len] = '\0';
+ rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len);
+ if (rc == -ERANGE) {
+ kfree(context);
+
+ /* Need a larger buffer. Query for the right size. */
+ rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0);
+ if (rc < 0)
+ return rc;
+
+ len = rc;
+ context = kmalloc(len + 1, GFP_NOFS);
+ if (!context)
+ return -ENOMEM;
+
+ context[len] = '\0';
+ rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX,
+ context, len);
+ }
+ if (rc < 0) {
+ kfree(context);
+ if (rc != -ENODATA) {
+ pr_warn("SELinux: %s: getxattr returned %d for dev=%s ino=%ld\n",
+ __func__, -rc, inode->i_sb->s_id, inode->i_ino);
+ return rc;
+ }
+ *sid = def_sid;
+ return 0;
+ }
+
+ rc = security_context_to_sid_default(&selinux_state, context, rc, sid,
+ def_sid, GFP_NOFS);
+ if (rc) {
+ char *dev = inode->i_sb->s_id;
+ unsigned long ino = inode->i_ino;
+
+ if (rc == -EINVAL) {
+ pr_notice_ratelimited("SELinux: inode=%lu on dev=%s was found to have an invalid context=%s. This indicates you may need to relabel the inode or the filesystem in question.\n",
+ ino, dev, context);
+ } else {
+ pr_warn("SELinux: %s: context_to_sid(%s) returned %d for dev=%s ino=%ld\n",
+ __func__, context, -rc, dev, ino);
+ }
+ }
+ kfree(context);
+ return 0;
+}
+
+/* The inode's security attributes must be initialized before first use. */
+static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry)
+{
+ struct superblock_security_struct *sbsec = NULL;
+ struct inode_security_struct *isec = selinux_inode(inode);
+ u32 task_sid, sid = 0;
+ u16 sclass;
+ struct dentry *dentry;
+ int rc = 0;
+
+ if (isec->initialized == LABEL_INITIALIZED)
+ return 0;
+
+ spin_lock(&isec->lock);
+ if (isec->initialized == LABEL_INITIALIZED)
+ goto out_unlock;
+
+ if (isec->sclass == SECCLASS_FILE)
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+
+ sbsec = selinux_superblock(inode->i_sb);
+ if (!(sbsec->flags & SE_SBINITIALIZED)) {
+ /* Defer initialization until selinux_complete_init,
+ after the initial policy is loaded and the security
+ server is ready to handle calls. */
+ spin_lock(&sbsec->isec_lock);
+ if (list_empty(&isec->list))
+ list_add(&isec->list, &sbsec->isec_head);
+ spin_unlock(&sbsec->isec_lock);
+ goto out_unlock;
+ }
+
+ sclass = isec->sclass;
+ task_sid = isec->task_sid;
+ sid = isec->sid;
+ isec->initialized = LABEL_PENDING;
+ spin_unlock(&isec->lock);
+
+ switch (sbsec->behavior) {
+ case SECURITY_FS_USE_NATIVE:
+ break;
+ case SECURITY_FS_USE_XATTR:
+ if (!(inode->i_opflags & IOP_XATTR)) {
+ sid = sbsec->def_sid;
+ break;
+ }
+ /* Need a dentry, since the xattr API requires one.
+ Life would be simpler if we could just pass the inode. */
+ if (opt_dentry) {
+ /* Called from d_instantiate or d_splice_alias. */
+ dentry = dget(opt_dentry);
+ } else {
+ /*
+ * Called from selinux_complete_init, try to find a dentry.
+ * Some filesystems really want a connected one, so try
+ * that first. We could split SECURITY_FS_USE_XATTR in
+ * two, depending upon that...
+ */
+ dentry = d_find_alias(inode);
+ if (!dentry)
+ dentry = d_find_any_alias(inode);
+ }
+ if (!dentry) {
+ /*
+ * this is can be hit on boot when a file is accessed
+ * before the policy is loaded. When we load policy we
+ * may find inodes that have no dentry on the
+ * sbsec->isec_head list. No reason to complain as these
+ * will get fixed up the next time we go through
+ * inode_doinit with a dentry, before these inodes could
+ * be used again by userspace.
+ */
+ goto out_invalid;
+ }
+
+ rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid,
+ &sid);
+ dput(dentry);
+ if (rc)
+ goto out;
+ break;
+ case SECURITY_FS_USE_TASK:
+ sid = task_sid;
+ break;
+ case SECURITY_FS_USE_TRANS:
+ /* Default to the fs SID. */
+ sid = sbsec->sid;
+
+ /* Try to obtain a transition SID. */
+ rc = security_transition_sid(&selinux_state, task_sid, sid,
+ sclass, NULL, &sid);
+ if (rc)
+ goto out;
+ break;
+ case SECURITY_FS_USE_MNTPOINT:
+ sid = sbsec->mntpoint_sid;
+ break;
+ default:
+ /* Default to the fs superblock SID. */
+ sid = sbsec->sid;
+
+ if ((sbsec->flags & SE_SBGENFS) &&
+ (!S_ISLNK(inode->i_mode) ||
+ selinux_policycap_genfs_seclabel_symlinks())) {
+ /* We must have a dentry to determine the label on
+ * procfs inodes */
+ if (opt_dentry) {
+ /* Called from d_instantiate or
+ * d_splice_alias. */
+ dentry = dget(opt_dentry);
+ } else {
+ /* Called from selinux_complete_init, try to
+ * find a dentry. Some filesystems really want
+ * a connected one, so try that first.
+ */
+ dentry = d_find_alias(inode);
+ if (!dentry)
+ dentry = d_find_any_alias(inode);
+ }
+ /*
+ * This can be hit on boot when a file is accessed
+ * before the policy is loaded. When we load policy we
+ * may find inodes that have no dentry on the
+ * sbsec->isec_head list. No reason to complain as
+ * these will get fixed up the next time we go through
+ * inode_doinit() with a dentry, before these inodes
+ * could be used again by userspace.
+ */
+ if (!dentry)
+ goto out_invalid;
+ rc = selinux_genfs_get_sid(dentry, sclass,
+ sbsec->flags, &sid);
+ if (rc) {
+ dput(dentry);
+ goto out;
+ }
+
+ if ((sbsec->flags & SE_SBGENFS_XATTR) &&
+ (inode->i_opflags & IOP_XATTR)) {
+ rc = inode_doinit_use_xattr(inode, dentry,
+ sid, &sid);
+ if (rc) {
+ dput(dentry);
+ goto out;
+ }
+ }
+ dput(dentry);
+ }
+ break;
+ }
+
+out:
+ spin_lock(&isec->lock);
+ if (isec->initialized == LABEL_PENDING) {
+ if (rc) {
+ isec->initialized = LABEL_INVALID;
+ goto out_unlock;
+ }
+ isec->initialized = LABEL_INITIALIZED;
+ isec->sid = sid;
+ }
+
+out_unlock:
+ spin_unlock(&isec->lock);
+ return rc;
+
+out_invalid:
+ spin_lock(&isec->lock);
+ if (isec->initialized == LABEL_PENDING) {
+ isec->initialized = LABEL_INVALID;
+ isec->sid = sid;
+ }
+ spin_unlock(&isec->lock);
+ return 0;
+}
+
+/* Convert a Linux signal to an access vector. */
+static inline u32 signal_to_av(int sig)
+{
+ u32 perm = 0;
+
+ switch (sig) {
+ case SIGCHLD:
+ /* Commonly granted from child to parent. */
+ perm = PROCESS__SIGCHLD;
+ break;
+ case SIGKILL:
+ /* Cannot be caught or ignored */
+ perm = PROCESS__SIGKILL;
+ break;
+ case SIGSTOP:
+ /* Cannot be caught or ignored */
+ perm = PROCESS__SIGSTOP;
+ break;
+ default:
+ /* All other signals. */
+ perm = PROCESS__SIGNAL;
+ break;
+ }
+
+ return perm;
+}
+
+#if CAP_LAST_CAP > 63
+#error Fix SELinux to handle capabilities > 63.
+#endif
+
+/* Check whether a task is allowed to use a capability. */
+static int cred_has_capability(const struct cred *cred,
+ int cap, unsigned int opts, bool initns)
+{
+ struct common_audit_data ad;
+ struct av_decision avd;
+ u16 sclass;
+ u32 sid = cred_sid(cred);
+ u32 av = CAP_TO_MASK(cap);
+ int rc;
+
+ ad.type = LSM_AUDIT_DATA_CAP;
+ ad.u.cap = cap;
+
+ switch (CAP_TO_INDEX(cap)) {
+ case 0:
+ sclass = initns ? SECCLASS_CAPABILITY : SECCLASS_CAP_USERNS;
+ break;
+ case 1:
+ sclass = initns ? SECCLASS_CAPABILITY2 : SECCLASS_CAP2_USERNS;
+ break;
+ default:
+ pr_err("SELinux: out of range capability %d\n", cap);
+ BUG();
+ return -EINVAL;
+ }
+
+ rc = avc_has_perm_noaudit(&selinux_state,
+ sid, sid, sclass, av, 0, &avd);
+ if (!(opts & CAP_OPT_NOAUDIT)) {
+ int rc2 = avc_audit(&selinux_state,
+ sid, sid, sclass, av, &avd, rc, &ad);
+ if (rc2)
+ return rc2;
+ }
+ return rc;
+}
+
+/* Check whether a task has a particular permission to an inode.
+ The 'adp' parameter is optional and allows other audit
+ data to be passed (e.g. the dentry). */
+static int inode_has_perm(const struct cred *cred,
+ struct inode *inode,
+ u32 perms,
+ struct common_audit_data *adp)
+{
+ struct inode_security_struct *isec;
+ u32 sid;
+
+ validate_creds(cred);
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ sid = cred_sid(cred);
+ isec = selinux_inode(inode);
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, perms, adp);
+}
+
+/* Same as inode_has_perm, but pass explicit audit data containing
+ the dentry to help the auditing code to more easily generate the
+ pathname if needed. */
+static inline int dentry_has_perm(const struct cred *cred,
+ struct dentry *dentry,
+ u32 av)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
+ __inode_security_revalidate(inode, dentry, true);
+ return inode_has_perm(cred, inode, av, &ad);
+}
+
+/* Same as inode_has_perm, but pass explicit audit data containing
+ the path to help the auditing code to more easily generate the
+ pathname if needed. */
+static inline int path_has_perm(const struct cred *cred,
+ const struct path *path,
+ u32 av)
+{
+ struct inode *inode = d_backing_inode(path->dentry);
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = *path;
+ __inode_security_revalidate(inode, path->dentry, true);
+ return inode_has_perm(cred, inode, av, &ad);
+}
+
+/* Same as path_has_perm, but uses the inode from the file struct. */
+static inline int file_path_has_perm(const struct cred *cred,
+ struct file *file,
+ u32 av)
+{
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+ return inode_has_perm(cred, file_inode(file), av, &ad);
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static int bpf_fd_pass(struct file *file, u32 sid);
+#endif
+
+/* Check whether a task can use an open file descriptor to
+ access an inode in a given way. Check access to the
+ descriptor itself, and then use dentry_has_perm to
+ check a particular permission to the file.
+ Access to the descriptor is implicitly granted if it
+ has the same SID as the process. If av is zero, then
+ access to the file is not checked, e.g. for cases
+ where only the descriptor is affected like seek. */
+static int file_has_perm(const struct cred *cred,
+ struct file *file,
+ u32 av)
+{
+ struct file_security_struct *fsec = selinux_file(file);
+ struct inode *inode = file_inode(file);
+ struct common_audit_data ad;
+ u32 sid = cred_sid(cred);
+ int rc;
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+
+ if (sid != fsec->sid) {
+ rc = avc_has_perm(&selinux_state,
+ sid, fsec->sid,
+ SECCLASS_FD,
+ FD__USE,
+ &ad);
+ if (rc)
+ goto out;
+ }
+
+#ifdef CONFIG_BPF_SYSCALL
+ rc = bpf_fd_pass(file, cred_sid(cred));
+ if (rc)
+ return rc;
+#endif
+
+ /* av is zero if only checking access to the descriptor. */
+ rc = 0;
+ if (av)
+ rc = inode_has_perm(cred, inode, av, &ad);
+
+out:
+ return rc;
+}
+
+/*
+ * Determine the label for an inode that might be unioned.
+ */
+static int
+selinux_determine_inode_label(const struct task_security_struct *tsec,
+ struct inode *dir,
+ const struct qstr *name, u16 tclass,
+ u32 *_new_isid)
+{
+ const struct superblock_security_struct *sbsec =
+ selinux_superblock(dir->i_sb);
+
+ if ((sbsec->flags & SE_SBINITIALIZED) &&
+ (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) {
+ *_new_isid = sbsec->mntpoint_sid;
+ } else if ((sbsec->flags & SBLABEL_MNT) &&
+ tsec->create_sid) {
+ *_new_isid = tsec->create_sid;
+ } else {
+ const struct inode_security_struct *dsec = inode_security(dir);
+ return security_transition_sid(&selinux_state, tsec->sid,
+ dsec->sid, tclass,
+ name, _new_isid);
+ }
+
+ return 0;
+}
+
+/* Check whether a task can create a file. */
+static int may_create(struct inode *dir,
+ struct dentry *dentry,
+ u16 tclass)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ struct inode_security_struct *dsec;
+ struct superblock_security_struct *sbsec;
+ u32 sid, newsid;
+ struct common_audit_data ad;
+ int rc;
+
+ dsec = inode_security(dir);
+ sbsec = selinux_superblock(dir->i_sb);
+
+ sid = tsec->sid;
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
+
+ rc = avc_has_perm(&selinux_state,
+ sid, dsec->sid, SECCLASS_DIR,
+ DIR__ADD_NAME | DIR__SEARCH,
+ &ad);
+ if (rc)
+ return rc;
+
+ rc = selinux_determine_inode_label(tsec, dir, &dentry->d_name, tclass,
+ &newsid);
+ if (rc)
+ return rc;
+
+ rc = avc_has_perm(&selinux_state,
+ sid, newsid, tclass, FILE__CREATE, &ad);
+ if (rc)
+ return rc;
+
+ return avc_has_perm(&selinux_state,
+ newsid, sbsec->sid,
+ SECCLASS_FILESYSTEM,
+ FILESYSTEM__ASSOCIATE, &ad);
+}
+
+#define MAY_LINK 0
+#define MAY_UNLINK 1
+#define MAY_RMDIR 2
+
+/* Check whether a task can link, unlink, or rmdir a file/directory. */
+static int may_link(struct inode *dir,
+ struct dentry *dentry,
+ int kind)
+
+{
+ struct inode_security_struct *dsec, *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+ u32 av;
+ int rc;
+
+ dsec = inode_security(dir);
+ isec = backing_inode_security(dentry);
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
+
+ av = DIR__SEARCH;
+ av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
+ rc = avc_has_perm(&selinux_state,
+ sid, dsec->sid, SECCLASS_DIR, av, &ad);
+ if (rc)
+ return rc;
+
+ switch (kind) {
+ case MAY_LINK:
+ av = FILE__LINK;
+ break;
+ case MAY_UNLINK:
+ av = FILE__UNLINK;
+ break;
+ case MAY_RMDIR:
+ av = DIR__RMDIR;
+ break;
+ default:
+ pr_warn("SELinux: %s: unrecognized kind %d\n",
+ __func__, kind);
+ return 0;
+ }
+
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, av, &ad);
+ return rc;
+}
+
+static inline int may_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry)
+{
+ struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+ u32 av;
+ int old_is_dir, new_is_dir;
+ int rc;
+
+ old_dsec = inode_security(old_dir);
+ old_isec = backing_inode_security(old_dentry);
+ old_is_dir = d_is_dir(old_dentry);
+ new_dsec = inode_security(new_dir);
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+
+ ad.u.dentry = old_dentry;
+ rc = avc_has_perm(&selinux_state,
+ sid, old_dsec->sid, SECCLASS_DIR,
+ DIR__REMOVE_NAME | DIR__SEARCH, &ad);
+ if (rc)
+ return rc;
+ rc = avc_has_perm(&selinux_state,
+ sid, old_isec->sid,
+ old_isec->sclass, FILE__RENAME, &ad);
+ if (rc)
+ return rc;
+ if (old_is_dir && new_dir != old_dir) {
+ rc = avc_has_perm(&selinux_state,
+ sid, old_isec->sid,
+ old_isec->sclass, DIR__REPARENT, &ad);
+ if (rc)
+ return rc;
+ }
+
+ ad.u.dentry = new_dentry;
+ av = DIR__ADD_NAME | DIR__SEARCH;
+ if (d_is_positive(new_dentry))
+ av |= DIR__REMOVE_NAME;
+ rc = avc_has_perm(&selinux_state,
+ sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
+ if (rc)
+ return rc;
+ if (d_is_positive(new_dentry)) {
+ new_isec = backing_inode_security(new_dentry);
+ new_is_dir = d_is_dir(new_dentry);
+ rc = avc_has_perm(&selinux_state,
+ sid, new_isec->sid,
+ new_isec->sclass,
+ (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Check whether a task can perform a filesystem operation. */
+static int superblock_has_perm(const struct cred *cred,
+ struct super_block *sb,
+ u32 perms,
+ struct common_audit_data *ad)
+{
+ struct superblock_security_struct *sbsec;
+ u32 sid = cred_sid(cred);
+
+ sbsec = selinux_superblock(sb);
+ return avc_has_perm(&selinux_state,
+ sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad);
+}
+
+/* Convert a Linux mode and permission mask to an access vector. */
+static inline u32 file_mask_to_av(int mode, int mask)
+{
+ u32 av = 0;
+
+ if (!S_ISDIR(mode)) {
+ if (mask & MAY_EXEC)
+ av |= FILE__EXECUTE;
+ if (mask & MAY_READ)
+ av |= FILE__READ;
+
+ if (mask & MAY_APPEND)
+ av |= FILE__APPEND;
+ else if (mask & MAY_WRITE)
+ av |= FILE__WRITE;
+
+ } else {
+ if (mask & MAY_EXEC)
+ av |= DIR__SEARCH;
+ if (mask & MAY_WRITE)
+ av |= DIR__WRITE;
+ if (mask & MAY_READ)
+ av |= DIR__READ;
+ }
+
+ return av;
+}
+
+/* Convert a Linux file to an access vector. */
+static inline u32 file_to_av(struct file *file)
+{
+ u32 av = 0;
+
+ if (file->f_mode & FMODE_READ)
+ av |= FILE__READ;
+ if (file->f_mode & FMODE_WRITE) {
+ if (file->f_flags & O_APPEND)
+ av |= FILE__APPEND;
+ else
+ av |= FILE__WRITE;
+ }
+ if (!av) {
+ /*
+ * Special file opened with flags 3 for ioctl-only use.
+ */
+ av = FILE__IOCTL;
+ }
+
+ return av;
+}
+
+/*
+ * Convert a file to an access vector and include the correct
+ * open permission.
+ */
+static inline u32 open_file_to_av(struct file *file)
+{
+ u32 av = file_to_av(file);
+ struct inode *inode = file_inode(file);
+
+ if (selinux_policycap_openperm() &&
+ inode->i_sb->s_magic != SOCKFS_MAGIC)
+ av |= FILE__OPEN;
+
+ return av;
+}
+
+/* Hook functions begin here. */
+
+static int selinux_binder_set_context_mgr(const struct cred *mgr)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), cred_sid(mgr), SECCLASS_BINDER,
+ BINDER__SET_CONTEXT_MGR, NULL);
+}
+
+static int selinux_binder_transaction(const struct cred *from,
+ const struct cred *to)
+{
+ u32 mysid = current_sid();
+ u32 fromsid = cred_sid(from);
+ u32 tosid = cred_sid(to);
+ int rc;
+
+ if (mysid != fromsid) {
+ rc = avc_has_perm(&selinux_state,
+ mysid, fromsid, SECCLASS_BINDER,
+ BINDER__IMPERSONATE, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return avc_has_perm(&selinux_state, fromsid, tosid,
+ SECCLASS_BINDER, BINDER__CALL, NULL);
+}
+
+static int selinux_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
+{
+ return avc_has_perm(&selinux_state,
+ cred_sid(from), cred_sid(to),
+ SECCLASS_BINDER, BINDER__TRANSFER,
+ NULL);
+}
+
+static int selinux_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
+ struct file *file)
+{
+ u32 sid = cred_sid(to);
+ struct file_security_struct *fsec = selinux_file(file);
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode_security_struct *isec;
+ struct common_audit_data ad;
+ int rc;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = file->f_path;
+
+ if (sid != fsec->sid) {
+ rc = avc_has_perm(&selinux_state,
+ sid, fsec->sid,
+ SECCLASS_FD,
+ FD__USE,
+ &ad);
+ if (rc)
+ return rc;
+ }
+
+#ifdef CONFIG_BPF_SYSCALL
+ rc = bpf_fd_pass(file, sid);
+ if (rc)
+ return rc;
+#endif
+
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+ return 0;
+
+ isec = backing_inode_security(dentry);
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, file_to_av(file),
+ &ad);
+}
+
+static int selinux_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ u32 sid = current_sid();
+ u32 csid = task_sid_obj(child);
+
+ if (mode & PTRACE_MODE_READ)
+ return avc_has_perm(&selinux_state,
+ sid, csid, SECCLASS_FILE, FILE__READ, NULL);
+
+ return avc_has_perm(&selinux_state,
+ sid, csid, SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
+}
+
+static int selinux_ptrace_traceme(struct task_struct *parent)
+{
+ return avc_has_perm(&selinux_state,
+ task_sid_obj(parent), task_sid_obj(current),
+ SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
+}
+
+static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(target), SECCLASS_PROCESS,
+ PROCESS__GETCAP, NULL);
+}
+
+static int selinux_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+{
+ return avc_has_perm(&selinux_state,
+ cred_sid(old), cred_sid(new), SECCLASS_PROCESS,
+ PROCESS__SETCAP, NULL);
+}
+
+/*
+ * (This comment used to live with the selinux_task_setuid hook,
+ * which was removed).
+ *
+ * Since setuid only affects the current process, and since the SELinux
+ * controls are not based on the Linux identity attributes, SELinux does not
+ * need to control this operation. However, SELinux does control the use of
+ * the CAP_SETUID and CAP_SETGID capabilities using the capable hook.
+ */
+
+static int selinux_capable(const struct cred *cred, struct user_namespace *ns,
+ int cap, unsigned int opts)
+{
+ return cred_has_capability(cred, cap, opts, ns == &init_user_ns);
+}
+
+static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
+{
+ const struct cred *cred = current_cred();
+ int rc = 0;
+
+ if (!sb)
+ return 0;
+
+ switch (cmds) {
+ case Q_SYNC:
+ case Q_QUOTAON:
+ case Q_QUOTAOFF:
+ case Q_SETINFO:
+ case Q_SETQUOTA:
+ case Q_XQUOTAOFF:
+ case Q_XQUOTAON:
+ case Q_XSETQLIM:
+ rc = superblock_has_perm(cred, sb, FILESYSTEM__QUOTAMOD, NULL);
+ break;
+ case Q_GETFMT:
+ case Q_GETINFO:
+ case Q_GETQUOTA:
+ case Q_XGETQUOTA:
+ case Q_XGETQSTAT:
+ case Q_XGETQSTATV:
+ case Q_XGETNEXTQUOTA:
+ rc = superblock_has_perm(cred, sb, FILESYSTEM__QUOTAGET, NULL);
+ break;
+ default:
+ rc = 0; /* let the kernel handle invalid cmds */
+ break;
+ }
+ return rc;
+}
+
+static int selinux_quota_on(struct dentry *dentry)
+{
+ const struct cred *cred = current_cred();
+
+ return dentry_has_perm(cred, dentry, FILE__QUOTAON);
+}
+
+static int selinux_syslog(int type)
+{
+ switch (type) {
+ case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
+ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
+ SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, NULL);
+ case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
+ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
+ /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_LEVEL:
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
+ SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE,
+ NULL);
+ }
+ /* All other syslog types */
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
+ SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, NULL);
+}
+
+/*
+ * Check that a process has enough memory to allocate a new virtual
+ * mapping. 0 means there is enough memory for the allocation to
+ * succeed and -ENOMEM implies there is not.
+ *
+ * Do not audit the selinux permission check, as this is applied to all
+ * processes that allocate mappings.
+ */
+static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
+{
+ int rc, cap_sys_admin = 0;
+
+ rc = cred_has_capability(current_cred(), CAP_SYS_ADMIN,
+ CAP_OPT_NOAUDIT, true);
+ if (rc == 0)
+ cap_sys_admin = 1;
+
+ return cap_sys_admin;
+}
+
+/* binprm security operations */
+
+static u32 ptrace_parent_sid(void)
+{
+ u32 sid = 0;
+ struct task_struct *tracer;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+ if (tracer)
+ sid = task_sid_obj(tracer);
+ rcu_read_unlock();
+
+ return sid;
+}
+
+static int check_nnp_nosuid(const struct linux_binprm *bprm,
+ const struct task_security_struct *old_tsec,
+ const struct task_security_struct *new_tsec)
+{
+ int nnp = (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS);
+ int nosuid = !mnt_may_suid(bprm->file->f_path.mnt);
+ int rc;
+ u32 av;
+
+ if (!nnp && !nosuid)
+ return 0; /* neither NNP nor nosuid */
+
+ if (new_tsec->sid == old_tsec->sid)
+ return 0; /* No change in credentials */
+
+ /*
+ * If the policy enables the nnp_nosuid_transition policy capability,
+ * then we permit transitions under NNP or nosuid if the
+ * policy allows the corresponding permission between
+ * the old and new contexts.
+ */
+ if (selinux_policycap_nnp_nosuid_transition()) {
+ av = 0;
+ if (nnp)
+ av |= PROCESS2__NNP_TRANSITION;
+ if (nosuid)
+ av |= PROCESS2__NOSUID_TRANSITION;
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
+ SECCLASS_PROCESS2, av, NULL);
+ if (!rc)
+ return 0;
+ }
+
+ /*
+ * We also permit NNP or nosuid transitions to bounded SIDs,
+ * i.e. SIDs that are guaranteed to only be allowed a subset
+ * of the permissions of the current SID.
+ */
+ rc = security_bounded_transition(&selinux_state, old_tsec->sid,
+ new_tsec->sid);
+ if (!rc)
+ return 0;
+
+ /*
+ * On failure, preserve the errno values for NNP vs nosuid.
+ * NNP: Operation not permitted for caller.
+ * nosuid: Permission denied to file.
+ */
+ if (nnp)
+ return -EPERM;
+ return -EACCES;
+}
+
+static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ const struct task_security_struct *old_tsec;
+ struct task_security_struct *new_tsec;
+ struct inode_security_struct *isec;
+ struct common_audit_data ad;
+ struct inode *inode = file_inode(bprm->file);
+ int rc;
+
+ /* SELinux context only depends on initial program or script and not
+ * the script interpreter */
+
+ old_tsec = selinux_cred(current_cred());
+ new_tsec = selinux_cred(bprm->cred);
+ isec = inode_security(inode);
+
+ /* Default to the current task SID. */
+ new_tsec->sid = old_tsec->sid;
+ new_tsec->osid = old_tsec->sid;
+
+ /* Reset fs, key, and sock SIDs on execve. */
+ new_tsec->create_sid = 0;
+ new_tsec->keycreate_sid = 0;
+ new_tsec->sockcreate_sid = 0;
+
+ if (old_tsec->exec_sid) {
+ new_tsec->sid = old_tsec->exec_sid;
+ /* Reset exec SID on execve. */
+ new_tsec->exec_sid = 0;
+
+ /* Fail on NNP or nosuid if not an allowed transition. */
+ rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+ if (rc)
+ return rc;
+ } else {
+ /* Check for a default transition on this program. */
+ rc = security_transition_sid(&selinux_state, old_tsec->sid,
+ isec->sid, SECCLASS_PROCESS, NULL,
+ &new_tsec->sid);
+ if (rc)
+ return rc;
+
+ /*
+ * Fallback to old SID on NNP or nosuid if not an allowed
+ * transition.
+ */
+ rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+ if (rc)
+ new_tsec->sid = old_tsec->sid;
+ }
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = bprm->file;
+
+ if (new_tsec->sid == old_tsec->sid) {
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, isec->sid,
+ SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
+ if (rc)
+ return rc;
+ } else {
+ /* Check permissions for the transition. */
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
+ SECCLASS_PROCESS, PROCESS__TRANSITION, &ad);
+ if (rc)
+ return rc;
+
+ rc = avc_has_perm(&selinux_state,
+ new_tsec->sid, isec->sid,
+ SECCLASS_FILE, FILE__ENTRYPOINT, &ad);
+ if (rc)
+ return rc;
+
+ /* Check for shared state */
+ if (bprm->unsafe & LSM_UNSAFE_SHARE) {
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
+ SECCLASS_PROCESS, PROCESS__SHARE,
+ NULL);
+ if (rc)
+ return -EPERM;
+ }
+
+ /* Make sure that anyone attempting to ptrace over a task that
+ * changes its SID has the appropriate permit */
+ if (bprm->unsafe & LSM_UNSAFE_PTRACE) {
+ u32 ptsid = ptrace_parent_sid();
+ if (ptsid != 0) {
+ rc = avc_has_perm(&selinux_state,
+ ptsid, new_tsec->sid,
+ SECCLASS_PROCESS,
+ PROCESS__PTRACE, NULL);
+ if (rc)
+ return -EPERM;
+ }
+ }
+
+ /* Clear any possibly unsafe personality bits on exec: */
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+ /* Enable secure mode for SIDs transitions unless
+ the noatsecure permission is granted between
+ the two SIDs, i.e. ahp returns 0. */
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
+ SECCLASS_PROCESS, PROCESS__NOATSECURE,
+ NULL);
+ bprm->secureexec |= !!rc;
+ }
+
+ return 0;
+}
+
+static int match_file(const void *p, struct file *file, unsigned fd)
+{
+ return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
+}
+
+/* Derived from fs/exec.c:flush_old_files. */
+static inline void flush_unauthorized_files(const struct cred *cred,
+ struct files_struct *files)
+{
+ struct file *file, *devnull = NULL;
+ struct tty_struct *tty;
+ int drop_tty = 0;
+ unsigned n;
+
+ tty = get_current_tty();
+ if (tty) {
+ spin_lock(&tty->files_lock);
+ if (!list_empty(&tty->tty_files)) {
+ struct tty_file_private *file_priv;
+
+ /* Revalidate access to controlling tty.
+ Use file_path_has_perm on the tty path directly
+ rather than using file_has_perm, as this particular
+ open file may belong to another process and we are
+ only interested in the inode-based check here. */
+ file_priv = list_first_entry(&tty->tty_files,
+ struct tty_file_private, list);
+ file = file_priv->file;
+ if (file_path_has_perm(cred, file, FILE__READ | FILE__WRITE))
+ drop_tty = 1;
+ }
+ spin_unlock(&tty->files_lock);
+ tty_kref_put(tty);
+ }
+ /* Reset controlling tty. */
+ if (drop_tty)
+ no_tty();
+
+ /* Revalidate access to inherited open files. */
+ n = iterate_fd(files, 0, match_file, cred);
+ if (!n) /* none found? */
+ return;
+
+ devnull = dentry_open(&selinux_null, O_RDWR, cred);
+ if (IS_ERR(devnull))
+ devnull = NULL;
+ /* replace all the matching ones with this */
+ do {
+ replace_fd(n - 1, devnull, 0);
+ } while ((n = iterate_fd(files, n, match_file, cred)) != 0);
+ if (devnull)
+ fput(devnull);
+}
+
+/*
+ * Prepare a process for imminent new credential changes due to exec
+ */
+static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ struct task_security_struct *new_tsec;
+ struct rlimit *rlim, *initrlim;
+ int rc, i;
+
+ new_tsec = selinux_cred(bprm->cred);
+ if (new_tsec->sid == new_tsec->osid)
+ return;
+
+ /* Close files for which the new task SID is not authorized. */
+ flush_unauthorized_files(bprm->cred, current->files);
+
+ /* Always clear parent death signal on SID transitions. */
+ current->pdeath_signal = 0;
+
+ /* Check whether the new SID can inherit resource limits from the old
+ * SID. If not, reset all soft limits to the lower of the current
+ * task's hard limit and the init task's soft limit.
+ *
+ * Note that the setting of hard limits (even to lower them) can be
+ * controlled by the setrlimit check. The inclusion of the init task's
+ * soft limit into the computation is to avoid resetting soft limits
+ * higher than the default soft limit for cases where the default is
+ * lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK.
+ */
+ rc = avc_has_perm(&selinux_state,
+ new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
+ PROCESS__RLIMITINH, NULL);
+ if (rc) {
+ /* protect against do_prlimit() */
+ task_lock(current);
+ for (i = 0; i < RLIM_NLIMITS; i++) {
+ rlim = current->signal->rlim + i;
+ initrlim = init_task.signal->rlim + i;
+ rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
+ }
+ task_unlock(current);
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
+ }
+}
+
+/*
+ * Clean up the process immediately after the installation of new credentials
+ * due to exec
+ */
+static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ u32 osid, sid;
+ int rc;
+
+ osid = tsec->osid;
+ sid = tsec->sid;
+
+ if (sid == osid)
+ return;
+
+ /* Check whether the new SID can inherit signal state from the old SID.
+ * If not, clear itimers to avoid subsequent signal generation and
+ * flush and unblock signals.
+ *
+ * This must occur _after_ the task SID has been updated so that any
+ * kill done after the flush will be checked against the new SID.
+ */
+ rc = avc_has_perm(&selinux_state,
+ osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
+ if (rc) {
+ clear_itimer();
+
+ spin_lock_irq(&unrcu_pointer(current->sighand)->siglock);
+ if (!fatal_signal_pending(current)) {
+ flush_sigqueue(&current->pending);
+ flush_sigqueue(&current->signal->shared_pending);
+ flush_signal_handlers(current, 1);
+ sigemptyset(&current->blocked);
+ recalc_sigpending();
+ }
+ spin_unlock_irq(&unrcu_pointer(current->sighand)->siglock);
+ }
+
+ /* Wake up the parent if it is waiting so that it can recheck
+ * wait permission to the new task SID. */
+ read_lock(&tasklist_lock);
+ __wake_up_parent(current, unrcu_pointer(current->real_parent));
+ read_unlock(&tasklist_lock);
+}
+
+/* superblock security operations */
+
+static int selinux_sb_alloc_security(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+
+ mutex_init(&sbsec->lock);
+ INIT_LIST_HEAD(&sbsec->isec_head);
+ spin_lock_init(&sbsec->isec_lock);
+ sbsec->sid = SECINITSID_UNLABELED;
+ sbsec->def_sid = SECINITSID_FILE;
+ sbsec->mntpoint_sid = SECINITSID_UNLABELED;
+
+ return 0;
+}
+
+static inline int opt_len(const char *s)
+{
+ bool open_quote = false;
+ int len;
+ char c;
+
+ for (len = 0; (c = s[len]) != '\0'; len++) {
+ if (c == '"')
+ open_quote = !open_quote;
+ if (c == ',' && !open_quote)
+ break;
+ }
+ return len;
+}
+
+static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
+{
+ char *from = options;
+ char *to = options;
+ bool first = true;
+ int rc;
+
+ while (1) {
+ int len = opt_len(from);
+ int token;
+ char *arg = NULL;
+
+ token = match_opt_prefix(from, len, &arg);
+
+ if (token != Opt_error) {
+ char *p, *q;
+
+ /* strip quotes */
+ if (arg) {
+ for (p = q = arg; p < from + len; p++) {
+ char c = *p;
+ if (c != '"')
+ *q++ = c;
+ }
+ arg = kmemdup_nul(arg, q - arg, GFP_KERNEL);
+ if (!arg) {
+ rc = -ENOMEM;
+ goto free_opt;
+ }
+ }
+ rc = selinux_add_opt(token, arg, mnt_opts);
+ kfree(arg);
+ arg = NULL;
+ if (unlikely(rc)) {
+ goto free_opt;
+ }
+ } else {
+ if (!first) { // copy with preceding comma
+ from--;
+ len++;
+ }
+ if (to != from)
+ memmove(to, from, len);
+ to += len;
+ first = false;
+ }
+ if (!from[len])
+ break;
+ from += len + 1;
+ }
+ *to = '\0';
+ return 0;
+
+free_opt:
+ if (*mnt_opts) {
+ selinux_free_mnt_opts(*mnt_opts);
+ *mnt_opts = NULL;
+ }
+ return rc;
+}
+
+static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
+{
+ struct selinux_mnt_opts *opts = mnt_opts;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+
+ /*
+ * Superblock not initialized (i.e. no options) - reject if any
+ * options specified, otherwise accept.
+ */
+ if (!(sbsec->flags & SE_SBINITIALIZED))
+ return opts ? 1 : 0;
+
+ /*
+ * Superblock initialized and no options specified - reject if
+ * superblock has any options set, otherwise accept.
+ */
+ if (!opts)
+ return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
+
+ if (opts->fscontext_sid) {
+ if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
+ opts->fscontext_sid))
+ return 1;
+ }
+ if (opts->context_sid) {
+ if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
+ opts->context_sid))
+ return 1;
+ }
+ if (opts->rootcontext_sid) {
+ struct inode_security_struct *root_isec;
+
+ root_isec = backing_inode_security(sb->s_root);
+ if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
+ opts->rootcontext_sid))
+ return 1;
+ }
+ if (opts->defcontext_sid) {
+ if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
+ opts->defcontext_sid))
+ return 1;
+ }
+ return 0;
+}
+
+static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
+{
+ struct selinux_mnt_opts *opts = mnt_opts;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+
+ if (!(sbsec->flags & SE_SBINITIALIZED))
+ return 0;
+
+ if (!opts)
+ return 0;
+
+ if (opts->fscontext_sid) {
+ if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
+ opts->fscontext_sid))
+ goto out_bad_option;
+ }
+ if (opts->context_sid) {
+ if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
+ opts->context_sid))
+ goto out_bad_option;
+ }
+ if (opts->rootcontext_sid) {
+ struct inode_security_struct *root_isec;
+ root_isec = backing_inode_security(sb->s_root);
+ if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
+ opts->rootcontext_sid))
+ goto out_bad_option;
+ }
+ if (opts->defcontext_sid) {
+ if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
+ opts->defcontext_sid))
+ goto out_bad_option;
+ }
+ return 0;
+
+out_bad_option:
+ pr_warn("SELinux: unable to change security options "
+ "during remount (dev %s, type=%s)\n", sb->s_id,
+ sb->s_type->name);
+ return -EINVAL;
+}
+
+static int selinux_sb_kern_mount(struct super_block *sb)
+{
+ const struct cred *cred = current_cred();
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = sb->s_root;
+ return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
+}
+
+static int selinux_sb_statfs(struct dentry *dentry)
+{
+ const struct cred *cred = current_cred();
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry->d_sb->s_root;
+ return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
+}
+
+static int selinux_mount(const char *dev_name,
+ const struct path *path,
+ const char *type,
+ unsigned long flags,
+ void *data)
+{
+ const struct cred *cred = current_cred();
+
+ if (flags & MS_REMOUNT)
+ return superblock_has_perm(cred, path->dentry->d_sb,
+ FILESYSTEM__REMOUNT, NULL);
+ else
+ return path_has_perm(cred, path, FILE__MOUNTON);
+}
+
+static int selinux_move_mount(const struct path *from_path,
+ const struct path *to_path)
+{
+ const struct cred *cred = current_cred();
+
+ return path_has_perm(cred, to_path, FILE__MOUNTON);
+}
+
+static int selinux_umount(struct vfsmount *mnt, int flags)
+{
+ const struct cred *cred = current_cred();
+
+ return superblock_has_perm(cred, mnt->mnt_sb,
+ FILESYSTEM__UNMOUNT, NULL);
+}
+
+static int selinux_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ const struct superblock_security_struct *sbsec = selinux_superblock(reference);
+ struct selinux_mnt_opts *opts;
+
+ /*
+ * Ensure that fc->security remains NULL when no options are set
+ * as expected by selinux_set_mnt_opts().
+ */
+ if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
+ return 0;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ if (sbsec->flags & FSCONTEXT_MNT)
+ opts->fscontext_sid = sbsec->sid;
+ if (sbsec->flags & CONTEXT_MNT)
+ opts->context_sid = sbsec->mntpoint_sid;
+ if (sbsec->flags & DEFCONTEXT_MNT)
+ opts->defcontext_sid = sbsec->def_sid;
+ fc->security = opts;
+ return 0;
+}
+
+static int selinux_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc)
+{
+ const struct selinux_mnt_opts *src = src_fc->security;
+
+ if (!src)
+ return 0;
+
+ fc->security = kmemdup(src, sizeof(*src), GFP_KERNEL);
+ return fc->security ? 0 : -ENOMEM;
+}
+
+static const struct fs_parameter_spec selinux_fs_parameters[] = {
+ fsparam_string(CONTEXT_STR, Opt_context),
+ fsparam_string(DEFCONTEXT_STR, Opt_defcontext),
+ fsparam_string(FSCONTEXT_STR, Opt_fscontext),
+ fsparam_string(ROOTCONTEXT_STR, Opt_rootcontext),
+ fsparam_flag (SECLABEL_STR, Opt_seclabel),
+ {}
+};
+
+static int selinux_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, selinux_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ return selinux_add_opt(opt, param->string, &fc->security);
+}
+
+/* inode security operations */
+
+static int selinux_inode_alloc_security(struct inode *inode)
+{
+ struct inode_security_struct *isec = selinux_inode(inode);
+ u32 sid = current_sid();
+
+ spin_lock_init(&isec->lock);
+ INIT_LIST_HEAD(&isec->list);
+ isec->inode = inode;
+ isec->sid = SECINITSID_UNLABELED;
+ isec->sclass = SECCLASS_FILE;
+ isec->task_sid = sid;
+ isec->initialized = LABEL_INVALID;
+
+ return 0;
+}
+
+static void selinux_inode_free_security(struct inode *inode)
+{
+ inode_free_security(inode);
+}
+
+static int selinux_dentry_init_security(struct dentry *dentry, int mode,
+ const struct qstr *name,
+ const char **xattr_name, void **ctx,
+ u32 *ctxlen)
+{
+ u32 newsid;
+ int rc;
+
+ rc = selinux_determine_inode_label(selinux_cred(current_cred()),
+ d_inode(dentry->d_parent), name,
+ inode_mode_to_security_class(mode),
+ &newsid);
+ if (rc)
+ return rc;
+
+ if (xattr_name)
+ *xattr_name = XATTR_NAME_SELINUX;
+
+ return security_sid_to_context(&selinux_state, newsid, (char **)ctx,
+ ctxlen);
+}
+
+static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ u32 newsid;
+ int rc;
+ struct task_security_struct *tsec;
+
+ rc = selinux_determine_inode_label(selinux_cred(old),
+ d_inode(dentry->d_parent), name,
+ inode_mode_to_security_class(mode),
+ &newsid);
+ if (rc)
+ return rc;
+
+ tsec = selinux_cred(new);
+ tsec->create_sid = newsid;
+ return 0;
+}
+
+static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ const char **name,
+ void **value, size_t *len)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ struct superblock_security_struct *sbsec;
+ u32 newsid, clen;
+ int rc;
+ char *context;
+
+ sbsec = selinux_superblock(dir->i_sb);
+
+ newsid = tsec->create_sid;
+
+ rc = selinux_determine_inode_label(tsec, dir, qstr,
+ inode_mode_to_security_class(inode->i_mode),
+ &newsid);
+ if (rc)
+ return rc;
+
+ /* Possibly defer initialization to selinux_complete_init. */
+ if (sbsec->flags & SE_SBINITIALIZED) {
+ struct inode_security_struct *isec = selinux_inode(inode);
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ isec->sid = newsid;
+ isec->initialized = LABEL_INITIALIZED;
+ }
+
+ if (!selinux_initialized(&selinux_state) ||
+ !(sbsec->flags & SBLABEL_MNT))
+ return -EOPNOTSUPP;
+
+ if (name)
+ *name = XATTR_SELINUX_SUFFIX;
+
+ if (value && len) {
+ rc = security_sid_to_context_force(&selinux_state, newsid,
+ &context, &clen);
+ if (rc)
+ return rc;
+ *value = context;
+ *len = clen;
+ }
+
+ return 0;
+}
+
+static int selinux_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ struct common_audit_data ad;
+ struct inode_security_struct *isec;
+ int rc;
+
+ if (unlikely(!selinux_initialized(&selinux_state)))
+ return 0;
+
+ isec = selinux_inode(inode);
+
+ /*
+ * We only get here once per ephemeral inode. The inode has
+ * been initialized via inode_alloc_security but is otherwise
+ * untouched.
+ */
+
+ if (context_inode) {
+ struct inode_security_struct *context_isec =
+ selinux_inode(context_inode);
+ if (context_isec->initialized != LABEL_INITIALIZED) {
+ pr_err("SELinux: context_inode is not initialized");
+ return -EACCES;
+ }
+
+ isec->sclass = context_isec->sclass;
+ isec->sid = context_isec->sid;
+ } else {
+ isec->sclass = SECCLASS_ANON_INODE;
+ rc = security_transition_sid(
+ &selinux_state, tsec->sid, tsec->sid,
+ isec->sclass, name, &isec->sid);
+ if (rc)
+ return rc;
+ }
+
+ isec->initialized = LABEL_INITIALIZED;
+ /*
+ * Now that we've initialized security, check whether we're
+ * allowed to actually create this type of anonymous inode.
+ */
+
+ ad.type = LSM_AUDIT_DATA_ANONINODE;
+ ad.u.anonclass = name ? (const char *)name->name : "?";
+
+ return avc_has_perm(&selinux_state,
+ tsec->sid,
+ isec->sid,
+ isec->sclass,
+ FILE__CREATE,
+ &ad);
+}
+
+static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ return may_create(dir, dentry, SECCLASS_FILE);
+}
+
+static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+{
+ return may_link(dir, old_dentry, MAY_LINK);
+}
+
+static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+ return may_link(dir, dentry, MAY_UNLINK);
+}
+
+static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const char *name)
+{
+ return may_create(dir, dentry, SECCLASS_LNK_FILE);
+}
+
+static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mask)
+{
+ return may_create(dir, dentry, SECCLASS_DIR);
+}
+
+static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ return may_link(dir, dentry, MAY_RMDIR);
+}
+
+static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+{
+ return may_create(dir, dentry, inode_mode_to_security_class(mode));
+}
+
+static int selinux_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
+ struct inode *new_inode, struct dentry *new_dentry)
+{
+ return may_rename(old_inode, old_dentry, new_inode, new_dentry);
+}
+
+static int selinux_inode_readlink(struct dentry *dentry)
+{
+ const struct cred *cred = current_cred();
+
+ return dentry_has_perm(cred, dentry, FILE__READ);
+}
+
+static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ bool rcu)
+{
+ const struct cred *cred = current_cred();
+ struct common_audit_data ad;
+ struct inode_security_struct *isec;
+ u32 sid;
+
+ validate_creds(cred);
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
+ sid = cred_sid(cred);
+ isec = inode_security_rcu(inode, rcu);
+ if (IS_ERR(isec))
+ return PTR_ERR(isec);
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, FILE__READ, &ad);
+}
+
+static noinline int audit_inode_permission(struct inode *inode,
+ u32 perms, u32 audited, u32 denied,
+ int result)
+{
+ struct common_audit_data ad;
+ struct inode_security_struct *isec = selinux_inode(inode);
+
+ ad.type = LSM_AUDIT_DATA_INODE;
+ ad.u.inode = inode;
+
+ return slow_avc_audit(&selinux_state,
+ current_sid(), isec->sid, isec->sclass, perms,
+ audited, denied, result, &ad);
+}
+
+static int selinux_inode_permission(struct inode *inode, int mask)
+{
+ const struct cred *cred = current_cred();
+ u32 perms;
+ bool from_access;
+ bool no_block = mask & MAY_NOT_BLOCK;
+ struct inode_security_struct *isec;
+ u32 sid;
+ struct av_decision avd;
+ int rc, rc2;
+ u32 audited, denied;
+
+ from_access = mask & MAY_ACCESS;
+ mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+
+ /* No permission to check. Existence test. */
+ if (!mask)
+ return 0;
+
+ validate_creds(cred);
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ perms = file_mask_to_av(inode->i_mode, mask);
+
+ sid = cred_sid(cred);
+ isec = inode_security_rcu(inode, no_block);
+ if (IS_ERR(isec))
+ return PTR_ERR(isec);
+
+ rc = avc_has_perm_noaudit(&selinux_state,
+ sid, isec->sid, isec->sclass, perms, 0,
+ &avd);
+ audited = avc_audit_required(perms, &avd, rc,
+ from_access ? FILE__AUDIT_ACCESS : 0,
+ &denied);
+ if (likely(!audited))
+ return rc;
+
+ rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
+ if (rc2)
+ return rc2;
+ return rc;
+}
+
+static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ const struct cred *cred = current_cred();
+ struct inode *inode = d_backing_inode(dentry);
+ unsigned int ia_valid = iattr->ia_valid;
+ __u32 av = FILE__WRITE;
+
+ /* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
+ if (ia_valid & ATTR_FORCE) {
+ ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_MODE |
+ ATTR_FORCE);
+ if (!ia_valid)
+ return 0;
+ }
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
+ ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
+ return dentry_has_perm(cred, dentry, FILE__SETATTR);
+
+ if (selinux_policycap_openperm() &&
+ inode->i_sb->s_magic != SOCKFS_MAGIC &&
+ (ia_valid & ATTR_SIZE) &&
+ !(ia_valid & ATTR_FILE))
+ av |= FILE__OPEN;
+
+ return dentry_has_perm(cred, dentry, av);
+}
+
+static int selinux_inode_getattr(const struct path *path)
+{
+ return path_has_perm(current_cred(), path, FILE__GETATTR);
+}
+
+static bool has_cap_mac_admin(bool audit)
+{
+ const struct cred *cred = current_cred();
+ unsigned int opts = audit ? CAP_OPT_NONE : CAP_OPT_NOAUDIT;
+
+ if (cap_capable(cred, &init_user_ns, CAP_MAC_ADMIN, opts))
+ return false;
+ if (cred_has_capability(cred, CAP_MAC_ADMIN, opts, true))
+ return false;
+ return true;
+}
+
+static int selinux_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct inode_security_struct *isec;
+ struct superblock_security_struct *sbsec;
+ struct common_audit_data ad;
+ u32 newsid, sid = current_sid();
+ int rc = 0;
+
+ if (strcmp(name, XATTR_NAME_SELINUX)) {
+ rc = cap_inode_setxattr(dentry, name, value, size, flags);
+ if (rc)
+ return rc;
+
+ /* Not an attribute we recognize, so just check the
+ ordinary setattr permission. */
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+ }
+
+ if (!selinux_initialized(&selinux_state))
+ return (inode_owner_or_capable(mnt_userns, inode) ? 0 : -EPERM);
+
+ sbsec = selinux_superblock(inode->i_sb);
+ if (!(sbsec->flags & SBLABEL_MNT))
+ return -EOPNOTSUPP;
+
+ if (!inode_owner_or_capable(mnt_userns, inode))
+ return -EPERM;
+
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
+
+ isec = backing_inode_security(dentry);
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass,
+ FILE__RELABELFROM, &ad);
+ if (rc)
+ return rc;
+
+ rc = security_context_to_sid(&selinux_state, value, size, &newsid,
+ GFP_KERNEL);
+ if (rc == -EINVAL) {
+ if (!has_cap_mac_admin(true)) {
+ struct audit_buffer *ab;
+ size_t audit_size;
+
+ /* We strip a nul only if it is at the end, otherwise the
+ * context contains a nul and we should audit that */
+ if (value) {
+ const char *str = value;
+
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ } else {
+ audit_size = 0;
+ }
+ ab = audit_log_start(audit_context(),
+ GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ if (!ab)
+ return rc;
+ audit_log_format(ab, "op=setxattr invalid_context=");
+ audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_end(ab);
+
+ return rc;
+ }
+ rc = security_context_to_sid_force(&selinux_state, value,
+ size, &newsid);
+ }
+ if (rc)
+ return rc;
+
+ rc = avc_has_perm(&selinux_state,
+ sid, newsid, isec->sclass,
+ FILE__RELABELTO, &ad);
+ if (rc)
+ return rc;
+
+ rc = security_validate_transition(&selinux_state, isec->sid, newsid,
+ sid, isec->sclass);
+ if (rc)
+ return rc;
+
+ return avc_has_perm(&selinux_state,
+ newsid,
+ sbsec->sid,
+ SECCLASS_FILESYSTEM,
+ FILESYSTEM__ASSOCIATE,
+ &ad);
+}
+
+static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size,
+ int flags)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct inode_security_struct *isec;
+ u32 newsid;
+ int rc;
+
+ if (strcmp(name, XATTR_NAME_SELINUX)) {
+ /* Not an attribute we recognize, so nothing to do. */
+ return;
+ }
+
+ if (!selinux_initialized(&selinux_state)) {
+ /* If we haven't even been initialized, then we can't validate
+ * against a policy, so leave the label as invalid. It may
+ * resolve to a valid label on the next revalidation try if
+ * we've since initialized.
+ */
+ return;
+ }
+
+ rc = security_context_to_sid_force(&selinux_state, value, size,
+ &newsid);
+ if (rc) {
+ pr_err("SELinux: unable to map context to SID"
+ "for (%s, %lu), rc=%d\n",
+ inode->i_sb->s_id, inode->i_ino, -rc);
+ return;
+ }
+
+ isec = backing_inode_security(dentry);
+ spin_lock(&isec->lock);
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ isec->sid = newsid;
+ isec->initialized = LABEL_INITIALIZED;
+ spin_unlock(&isec->lock);
+}
+
+static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
+{
+ const struct cred *cred = current_cred();
+
+ return dentry_has_perm(cred, dentry, FILE__GETATTR);
+}
+
+static int selinux_inode_listxattr(struct dentry *dentry)
+{
+ const struct cred *cred = current_cred();
+
+ return dentry_has_perm(cred, dentry, FILE__GETATTR);
+}
+
+static int selinux_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name)
+{
+ if (strcmp(name, XATTR_NAME_SELINUX)) {
+ int rc = cap_inode_removexattr(mnt_userns, dentry, name);
+ if (rc)
+ return rc;
+
+ /* Not an attribute we recognize, so just check the
+ ordinary setattr permission. */
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+ }
+
+ if (!selinux_initialized(&selinux_state))
+ return 0;
+
+ /* No one is allowed to remove a SELinux security label.
+ You can change the label, but all data must be labeled. */
+ return -EACCES;
+}
+
+static int selinux_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ int ret;
+ u32 perm;
+
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = *path;
+
+ /*
+ * Set permission needed based on the type of mark being set.
+ * Performs an additional check for sb watches.
+ */
+ switch (obj_type) {
+ case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
+ perm = FILE__WATCH_MOUNT;
+ break;
+ case FSNOTIFY_OBJ_TYPE_SB:
+ perm = FILE__WATCH_SB;
+ ret = superblock_has_perm(current_cred(), path->dentry->d_sb,
+ FILESYSTEM__WATCH, &ad);
+ if (ret)
+ return ret;
+ break;
+ case FSNOTIFY_OBJ_TYPE_INODE:
+ perm = FILE__WATCH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* blocking watches require the file:watch_with_perm permission */
+ if (mask & (ALL_FSNOTIFY_PERM_EVENTS))
+ perm |= FILE__WATCH_WITH_PERM;
+
+ /* watches on read-like events need the file:watch_reads permission */
+ if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE))
+ perm |= FILE__WATCH_READS;
+
+ return path_has_perm(current_cred(), path, perm);
+}
+
+/*
+ * Copy the inode security context value to the user.
+ *
+ * Permission check is handled by selinux_inode_getxattr hook.
+ */
+static int selinux_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc)
+{
+ u32 size;
+ int error;
+ char *context = NULL;
+ struct inode_security_struct *isec;
+
+ /*
+ * If we're not initialized yet, then we can't validate contexts, so
+ * just let vfs_getxattr fall back to using the on-disk xattr.
+ */
+ if (!selinux_initialized(&selinux_state) ||
+ strcmp(name, XATTR_SELINUX_SUFFIX))
+ return -EOPNOTSUPP;
+
+ /*
+ * If the caller has CAP_MAC_ADMIN, then get the raw context
+ * value even if it is not defined by current policy; otherwise,
+ * use the in-core value under current policy.
+ * Use the non-auditing forms of the permission checks since
+ * getxattr may be called by unprivileged processes commonly
+ * and lack of permission just means that we fall back to the
+ * in-core context value, not a denial.
+ */
+ isec = inode_security(inode);
+ if (has_cap_mac_admin(false))
+ error = security_sid_to_context_force(&selinux_state,
+ isec->sid, &context,
+ &size);
+ else
+ error = security_sid_to_context(&selinux_state, isec->sid,
+ &context, &size);
+ if (error)
+ return error;
+ error = size;
+ if (alloc) {
+ *buffer = context;
+ goto out_nofree;
+ }
+ kfree(context);
+out_nofree:
+ return error;
+}
+
+static int selinux_inode_setsecurity(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode_security_struct *isec = inode_security_novalidate(inode);
+ struct superblock_security_struct *sbsec;
+ u32 newsid;
+ int rc;
+
+ if (strcmp(name, XATTR_SELINUX_SUFFIX))
+ return -EOPNOTSUPP;
+
+ sbsec = selinux_superblock(inode->i_sb);
+ if (!(sbsec->flags & SBLABEL_MNT))
+ return -EOPNOTSUPP;
+
+ if (!value || !size)
+ return -EACCES;
+
+ rc = security_context_to_sid(&selinux_state, value, size, &newsid,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ spin_lock(&isec->lock);
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ isec->sid = newsid;
+ isec->initialized = LABEL_INITIALIZED;
+ spin_unlock(&isec->lock);
+ return 0;
+}
+
+static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
+{
+ const int len = sizeof(XATTR_NAME_SELINUX);
+
+ if (!selinux_initialized(&selinux_state))
+ return 0;
+
+ if (buffer && len <= buffer_size)
+ memcpy(buffer, XATTR_NAME_SELINUX, len);
+ return len;
+}
+
+static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
+{
+ struct inode_security_struct *isec = inode_security_novalidate(inode);
+ *secid = isec->sid;
+}
+
+static int selinux_inode_copy_up(struct dentry *src, struct cred **new)
+{
+ u32 sid;
+ struct task_security_struct *tsec;
+ struct cred *new_creds = *new;
+
+ if (new_creds == NULL) {
+ new_creds = prepare_creds();
+ if (!new_creds)
+ return -ENOMEM;
+ }
+
+ tsec = selinux_cred(new_creds);
+ /* Get label from overlay inode and set it in create_sid */
+ selinux_inode_getsecid(d_inode(src), &sid);
+ tsec->create_sid = sid;
+ *new = new_creds;
+ return 0;
+}
+
+static int selinux_inode_copy_up_xattr(const char *name)
+{
+ /* The copy_up hook above sets the initial context on an inode, but we
+ * don't then want to overwrite it by blindly copying all the lower
+ * xattrs up. Instead, we have to filter out SELinux-related xattrs.
+ */
+ if (strcmp(name, XATTR_NAME_SELINUX) == 0)
+ return 1; /* Discard */
+ /*
+ * Any other attribute apart from SELINUX is not claimed, supported
+ * by selinux.
+ */
+ return -EOPNOTSUPP;
+}
+
+/* kernfs node operations */
+
+static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ u32 parent_sid, newsid, clen;
+ int rc;
+ char *context;
+
+ rc = kernfs_xattr_get(kn_dir, XATTR_NAME_SELINUX, NULL, 0);
+ if (rc == -ENODATA)
+ return 0;
+ else if (rc < 0)
+ return rc;
+
+ clen = (u32)rc;
+ context = kmalloc(clen, GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ rc = kernfs_xattr_get(kn_dir, XATTR_NAME_SELINUX, context, clen);
+ if (rc < 0) {
+ kfree(context);
+ return rc;
+ }
+
+ rc = security_context_to_sid(&selinux_state, context, clen, &parent_sid,
+ GFP_KERNEL);
+ kfree(context);
+ if (rc)
+ return rc;
+
+ if (tsec->create_sid) {
+ newsid = tsec->create_sid;
+ } else {
+ u16 secclass = inode_mode_to_security_class(kn->mode);
+ struct qstr q;
+
+ q.name = kn->name;
+ q.hash_len = hashlen_string(kn_dir, kn->name);
+
+ rc = security_transition_sid(&selinux_state, tsec->sid,
+ parent_sid, secclass, &q,
+ &newsid);
+ if (rc)
+ return rc;
+ }
+
+ rc = security_sid_to_context_force(&selinux_state, newsid,
+ &context, &clen);
+ if (rc)
+ return rc;
+
+ rc = kernfs_xattr_set(kn, XATTR_NAME_SELINUX, context, clen,
+ XATTR_CREATE);
+ kfree(context);
+ return rc;
+}
+
+
+/* file security operations */
+
+static int selinux_revalidate_file_permission(struct file *file, int mask)
+{
+ const struct cred *cred = current_cred();
+ struct inode *inode = file_inode(file);
+
+ /* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
+ if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
+ mask |= MAY_APPEND;
+
+ return file_has_perm(cred, file,
+ file_mask_to_av(inode->i_mode, mask));
+}
+
+static int selinux_file_permission(struct file *file, int mask)
+{
+ struct inode *inode = file_inode(file);
+ struct file_security_struct *fsec = selinux_file(file);
+ struct inode_security_struct *isec;
+ u32 sid = current_sid();
+
+ if (!mask)
+ /* No permission to check. Existence test. */
+ return 0;
+
+ isec = inode_security(inode);
+ if (sid == fsec->sid && fsec->isid == isec->sid &&
+ fsec->pseqno == avc_policy_seqno(&selinux_state))
+ /* No change since file_open check. */
+ return 0;
+
+ return selinux_revalidate_file_permission(file, mask);
+}
+
+static int selinux_file_alloc_security(struct file *file)
+{
+ struct file_security_struct *fsec = selinux_file(file);
+ u32 sid = current_sid();
+
+ fsec->sid = sid;
+ fsec->fown_sid = sid;
+
+ return 0;
+}
+
+/*
+ * Check whether a task has the ioctl permission and cmd
+ * operation to an inode.
+ */
+static int ioctl_has_perm(const struct cred *cred, struct file *file,
+ u32 requested, u16 cmd)
+{
+ struct common_audit_data ad;
+ struct file_security_struct *fsec = selinux_file(file);
+ struct inode *inode = file_inode(file);
+ struct inode_security_struct *isec;
+ struct lsm_ioctlop_audit ioctl;
+ u32 ssid = cred_sid(cred);
+ int rc;
+ u8 driver = cmd >> 8;
+ u8 xperm = cmd & 0xff;
+
+ ad.type = LSM_AUDIT_DATA_IOCTL_OP;
+ ad.u.op = &ioctl;
+ ad.u.op->cmd = cmd;
+ ad.u.op->path = file->f_path;
+
+ if (ssid != fsec->sid) {
+ rc = avc_has_perm(&selinux_state,
+ ssid, fsec->sid,
+ SECCLASS_FD,
+ FD__USE,
+ &ad);
+ if (rc)
+ goto out;
+ }
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ isec = inode_security(inode);
+ rc = avc_has_extended_perms(&selinux_state,
+ ssid, isec->sid, isec->sclass,
+ requested, driver, xperm, &ad);
+out:
+ return rc;
+}
+
+static int selinux_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ const struct cred *cred = current_cred();
+ int error = 0;
+
+ switch (cmd) {
+ case FIONREAD:
+ case FIBMAP:
+ case FIGETBSZ:
+ case FS_IOC_GETFLAGS:
+ case FS_IOC_GETVERSION:
+ error = file_has_perm(cred, file, FILE__GETATTR);
+ break;
+
+ case FS_IOC_SETFLAGS:
+ case FS_IOC_SETVERSION:
+ error = file_has_perm(cred, file, FILE__SETATTR);
+ break;
+
+ /* sys_ioctl() checks */
+ case FIONBIO:
+ case FIOASYNC:
+ error = file_has_perm(cred, file, 0);
+ break;
+
+ case KDSKBENT:
+ case KDSKBSENT:
+ error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG,
+ CAP_OPT_NONE, true);
+ break;
+
+ case FIOCLEX:
+ case FIONCLEX:
+ if (!selinux_policycap_ioctl_skip_cloexec())
+ error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
+ break;
+
+ /* default case assumes that the command will go
+ * to the file's ioctl() function.
+ */
+ default:
+ error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
+ }
+ return error;
+}
+
+static int selinux_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /*
+ * If we are in a 64-bit kernel running 32-bit userspace, we need to
+ * make sure we don't compare 32-bit flags to 64-bit flags.
+ */
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
+ case FS_IOC32_SETVERSION:
+ cmd = FS_IOC_SETVERSION;
+ break;
+ default:
+ break;
+ }
+
+ return selinux_file_ioctl(file, cmd, arg);
+}
+
+static int default_noexec __ro_after_init;
+
+static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
+{
+ const struct cred *cred = current_cred();
+ u32 sid = cred_sid(cred);
+ int rc = 0;
+
+ if (default_noexec &&
+ (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
+ (!shared && (prot & PROT_WRITE)))) {
+ /*
+ * We are making executable an anonymous mapping or a
+ * private file mapping that will also be writable.
+ * This has an additional check.
+ */
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS,
+ PROCESS__EXECMEM, NULL);
+ if (rc)
+ goto error;
+ }
+
+ if (file) {
+ /* read access is always possible with a mapping */
+ u32 av = FILE__READ;
+
+ /* write access only matters if the mapping is shared */
+ if (shared && (prot & PROT_WRITE))
+ av |= FILE__WRITE;
+
+ if (prot & PROT_EXEC)
+ av |= FILE__EXECUTE;
+
+ return file_has_perm(cred, file, av);
+ }
+
+error:
+ return rc;
+}
+
+static int selinux_mmap_addr(unsigned long addr)
+{
+ int rc = 0;
+
+ if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
+ u32 sid = current_sid();
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_MEMPROTECT,
+ MEMPROTECT__MMAP_ZERO, NULL);
+ }
+
+ return rc;
+}
+
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ struct common_audit_data ad;
+ int rc;
+
+ if (file) {
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+ rc = inode_has_perm(current_cred(), file_inode(file),
+ FILE__MAP, &ad);
+ if (rc)
+ return rc;
+ }
+
+ if (checkreqprot_get(&selinux_state))
+ prot = reqprot;
+
+ return file_map_prot_check(file, prot,
+ (flags & MAP_TYPE) == MAP_SHARED);
+}
+
+static int selinux_file_mprotect(struct vm_area_struct *vma,
+ unsigned long reqprot,
+ unsigned long prot)
+{
+ const struct cred *cred = current_cred();
+ u32 sid = cred_sid(cred);
+
+ if (checkreqprot_get(&selinux_state))
+ prot = reqprot;
+
+ if (default_noexec &&
+ (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+ int rc = 0;
+ if (vma->vm_start >= vma->vm_mm->start_brk &&
+ vma->vm_end <= vma->vm_mm->brk) {
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS,
+ PROCESS__EXECHEAP, NULL);
+ } else if (!vma->vm_file &&
+ ((vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack) ||
+ vma_is_stack_for_current(vma))) {
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS,
+ PROCESS__EXECSTACK, NULL);
+ } else if (vma->vm_file && vma->anon_vma) {
+ /*
+ * We are making executable a file mapping that has
+ * had some COW done. Since pages might have been
+ * written, check ability to execute the possibly
+ * modified content. This typically should only
+ * occur for text relocations.
+ */
+ rc = file_has_perm(cred, vma->vm_file, FILE__EXECMOD);
+ }
+ if (rc)
+ return rc;
+ }
+
+ return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
+}
+
+static int selinux_file_lock(struct file *file, unsigned int cmd)
+{
+ const struct cred *cred = current_cred();
+
+ return file_has_perm(cred, file, FILE__LOCK);
+}
+
+static int selinux_file_fcntl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ const struct cred *cred = current_cred();
+ int err = 0;
+
+ switch (cmd) {
+ case F_SETFL:
+ if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) {
+ err = file_has_perm(cred, file, FILE__WRITE);
+ break;
+ }
+ fallthrough;
+ case F_SETOWN:
+ case F_SETSIG:
+ case F_GETFL:
+ case F_GETOWN:
+ case F_GETSIG:
+ case F_GETOWNER_UIDS:
+ /* Just check FD__USE permission */
+ err = file_has_perm(cred, file, 0);
+ break;
+ case F_GETLK:
+ case F_SETLK:
+ case F_SETLKW:
+ case F_OFD_GETLK:
+ case F_OFD_SETLK:
+ case F_OFD_SETLKW:
+#if BITS_PER_LONG == 32
+ case F_GETLK64:
+ case F_SETLK64:
+ case F_SETLKW64:
+#endif
+ err = file_has_perm(cred, file, FILE__LOCK);
+ break;
+ }
+
+ return err;
+}
+
+static void selinux_file_set_fowner(struct file *file)
+{
+ struct file_security_struct *fsec;
+
+ fsec = selinux_file(file);
+ fsec->fown_sid = current_sid();
+}
+
+static int selinux_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown, int signum)
+{
+ struct file *file;
+ u32 sid = task_sid_obj(tsk);
+ u32 perm;
+ struct file_security_struct *fsec;
+
+ /* struct fown_struct is never outside the context of a struct file */
+ file = container_of(fown, struct file, f_owner);
+
+ fsec = selinux_file(file);
+
+ if (!signum)
+ perm = signal_to_av(SIGIO); /* as per send_sigio_to_task */
+ else
+ perm = signal_to_av(signum);
+
+ return avc_has_perm(&selinux_state,
+ fsec->fown_sid, sid,
+ SECCLASS_PROCESS, perm, NULL);
+}
+
+static int selinux_file_receive(struct file *file)
+{
+ const struct cred *cred = current_cred();
+
+ return file_has_perm(cred, file, file_to_av(file));
+}
+
+static int selinux_file_open(struct file *file)
+{
+ struct file_security_struct *fsec;
+ struct inode_security_struct *isec;
+
+ fsec = selinux_file(file);
+ isec = inode_security(file_inode(file));
+ /*
+ * Save inode label and policy sequence number
+ * at open-time so that selinux_file_permission
+ * can determine whether revalidation is necessary.
+ * Task label is already saved in the file security
+ * struct as its SID.
+ */
+ fsec->isid = isec->sid;
+ fsec->pseqno = avc_policy_seqno(&selinux_state);
+ /*
+ * Since the inode label or policy seqno may have changed
+ * between the selinux_inode_permission check and the saving
+ * of state above, recheck that access is still permitted.
+ * Otherwise, access might never be revalidated against the
+ * new inode label or new policy.
+ * This check is not redundant - do not remove.
+ */
+ return file_path_has_perm(file->f_cred, file, open_file_to_av(file));
+}
+
+/* task security operations */
+
+static int selinux_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS, PROCESS__FORK, NULL);
+}
+
+/*
+ * prepare a new set of credentials for modification
+ */
+static int selinux_cred_prepare(struct cred *new, const struct cred *old,
+ gfp_t gfp)
+{
+ const struct task_security_struct *old_tsec = selinux_cred(old);
+ struct task_security_struct *tsec = selinux_cred(new);
+
+ *tsec = *old_tsec;
+ return 0;
+}
+
+/*
+ * transfer the SELinux data to a blank set of creds
+ */
+static void selinux_cred_transfer(struct cred *new, const struct cred *old)
+{
+ const struct task_security_struct *old_tsec = selinux_cred(old);
+ struct task_security_struct *tsec = selinux_cred(new);
+
+ *tsec = *old_tsec;
+}
+
+static void selinux_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = cred_sid(c);
+}
+
+/*
+ * set the security data for a kernel service
+ * - all the creation contexts are set to unlabelled
+ */
+static int selinux_kernel_act_as(struct cred *new, u32 secid)
+{
+ struct task_security_struct *tsec = selinux_cred(new);
+ u32 sid = current_sid();
+ int ret;
+
+ ret = avc_has_perm(&selinux_state,
+ sid, secid,
+ SECCLASS_KERNEL_SERVICE,
+ KERNEL_SERVICE__USE_AS_OVERRIDE,
+ NULL);
+ if (ret == 0) {
+ tsec->sid = secid;
+ tsec->create_sid = 0;
+ tsec->keycreate_sid = 0;
+ tsec->sockcreate_sid = 0;
+ }
+ return ret;
+}
+
+/*
+ * set the file creation context in a security record to the same as the
+ * objective context of the specified inode
+ */
+static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
+{
+ struct inode_security_struct *isec = inode_security(inode);
+ struct task_security_struct *tsec = selinux_cred(new);
+ u32 sid = current_sid();
+ int ret;
+
+ ret = avc_has_perm(&selinux_state,
+ sid, isec->sid,
+ SECCLASS_KERNEL_SERVICE,
+ KERNEL_SERVICE__CREATE_FILES_AS,
+ NULL);
+
+ if (ret == 0)
+ tsec->create_sid = isec->sid;
+ return ret;
+}
+
+static int selinux_kernel_module_request(char *kmod_name)
+{
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_KMOD;
+ ad.u.kmod_name = kmod_name;
+
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL, SECCLASS_SYSTEM,
+ SYSTEM__MODULE_REQUEST, &ad);
+}
+
+static int selinux_kernel_module_from_file(struct file *file)
+{
+ struct common_audit_data ad;
+ struct inode_security_struct *isec;
+ struct file_security_struct *fsec;
+ u32 sid = current_sid();
+ int rc;
+
+ /* init_module */
+ if (file == NULL)
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_SYSTEM,
+ SYSTEM__MODULE_LOAD, NULL);
+
+ /* finit_module */
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+
+ fsec = selinux_file(file);
+ if (sid != fsec->sid) {
+ rc = avc_has_perm(&selinux_state,
+ sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
+ if (rc)
+ return rc;
+ }
+
+ isec = inode_security(file_inode(file));
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SYSTEM,
+ SYSTEM__MODULE_LOAD, &ad);
+}
+
+static int selinux_kernel_read_file(struct file *file,
+ enum kernel_read_file_id id,
+ bool contents)
+{
+ int rc = 0;
+
+ switch (id) {
+ case READING_MODULE:
+ rc = selinux_kernel_module_from_file(contents ? file : NULL);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int selinux_kernel_load_data(enum kernel_load_data_id id, bool contents)
+{
+ int rc = 0;
+
+ switch (id) {
+ case LOADING_MODULE:
+ rc = selinux_kernel_module_from_file(NULL);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__SETPGID, NULL);
+}
+
+static int selinux_task_getpgid(struct task_struct *p)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__GETPGID, NULL);
+}
+
+static int selinux_task_getsid(struct task_struct *p)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__GETSESSION, NULL);
+}
+
+static void selinux_current_getsecid_subj(u32 *secid)
+{
+ *secid = current_sid();
+}
+
+static void selinux_task_getsecid_obj(struct task_struct *p, u32 *secid)
+{
+ *secid = task_sid_obj(p);
+}
+
+static int selinux_task_setnice(struct task_struct *p, int nice)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__SETSCHED, NULL);
+}
+
+static int selinux_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__SETSCHED, NULL);
+}
+
+static int selinux_task_getioprio(struct task_struct *p)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__GETSCHED, NULL);
+}
+
+static int selinux_task_prlimit(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags)
+{
+ u32 av = 0;
+
+ if (!flags)
+ return 0;
+ if (flags & LSM_PRLIMIT_WRITE)
+ av |= PROCESS__SETRLIMIT;
+ if (flags & LSM_PRLIMIT_READ)
+ av |= PROCESS__GETRLIMIT;
+ return avc_has_perm(&selinux_state,
+ cred_sid(cred), cred_sid(tcred),
+ SECCLASS_PROCESS, av, NULL);
+}
+
+static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
+{
+ struct rlimit *old_rlim = p->signal->rlim + resource;
+
+ /* Control the ability to change the hard limit (whether
+ lowering or raising it), so that the hard limit can
+ later be used as a safe reset point for the soft limit
+ upon context transitions. See selinux_bprm_committing_creds. */
+ if (old_rlim->rlim_max != new_rlim->rlim_max)
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p),
+ SECCLASS_PROCESS, PROCESS__SETRLIMIT, NULL);
+
+ return 0;
+}
+
+static int selinux_task_setscheduler(struct task_struct *p)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__SETSCHED, NULL);
+}
+
+static int selinux_task_getscheduler(struct task_struct *p)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__GETSCHED, NULL);
+}
+
+static int selinux_task_movememory(struct task_struct *p)
+{
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid_obj(p), SECCLASS_PROCESS,
+ PROCESS__SETSCHED, NULL);
+}
+
+static int selinux_task_kill(struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+{
+ u32 secid;
+ u32 perm;
+
+ if (!sig)
+ perm = PROCESS__SIGNULL; /* null signal; existence test */
+ else
+ perm = signal_to_av(sig);
+ if (!cred)
+ secid = current_sid();
+ else
+ secid = cred_sid(cred);
+ return avc_has_perm(&selinux_state,
+ secid, task_sid_obj(p), SECCLASS_PROCESS, perm, NULL);
+}
+
+static void selinux_task_to_inode(struct task_struct *p,
+ struct inode *inode)
+{
+ struct inode_security_struct *isec = selinux_inode(inode);
+ u32 sid = task_sid_obj(p);
+
+ spin_lock(&isec->lock);
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ isec->sid = sid;
+ isec->initialized = LABEL_INITIALIZED;
+ spin_unlock(&isec->lock);
+}
+
+static int selinux_userns_create(const struct cred *cred)
+{
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, sid, SECCLASS_USER_NAMESPACE,
+ USER_NAMESPACE__CREATE, NULL);
+}
+
+/* Returns error only if unable to parse addresses */
+static int selinux_parse_skb_ipv4(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto)
+{
+ int offset, ihlen, ret = -EINVAL;
+ struct iphdr _iph, *ih;
+
+ offset = skb_network_offset(skb);
+ ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
+ if (ih == NULL)
+ goto out;
+
+ ihlen = ih->ihl * 4;
+ if (ihlen < sizeof(_iph))
+ goto out;
+
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
+ ret = 0;
+
+ if (proto)
+ *proto = ih->protocol;
+
+ switch (ih->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph, *th;
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ offset += ihlen;
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ break;
+
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
+ break;
+ }
+
+ case IPPROTO_UDP: {
+ struct udphdr _udph, *uh;
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ offset += ihlen;
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh == NULL)
+ break;
+
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
+ break;
+ }
+
+ case IPPROTO_DCCP: {
+ struct dccp_hdr _dccph, *dh;
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ offset += ihlen;
+ dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+ if (dh == NULL)
+ break;
+
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
+ break;
+ }
+
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ case IPPROTO_SCTP: {
+ struct sctphdr _sctph, *sh;
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ offset += ihlen;
+ sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ break;
+
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+/* Returns error only if unable to parse addresses */
+static int selinux_parse_skb_ipv6(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto)
+{
+ u8 nexthdr;
+ int ret = -EINVAL, offset;
+ struct ipv6hdr _ipv6h, *ip6;
+ __be16 frag_off;
+
+ offset = skb_network_offset(skb);
+ ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+ if (ip6 == NULL)
+ goto out;
+
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
+ ret = 0;
+
+ nexthdr = ip6->nexthdr;
+ offset += sizeof(_ipv6h);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+ if (offset < 0)
+ goto out;
+
+ if (proto)
+ *proto = nexthdr;
+
+ switch (nexthdr) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph, *th;
+
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ break;
+
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
+ break;
+ }
+
+ case IPPROTO_UDP: {
+ struct udphdr _udph, *uh;
+
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh == NULL)
+ break;
+
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
+ break;
+ }
+
+ case IPPROTO_DCCP: {
+ struct dccp_hdr _dccph, *dh;
+
+ dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+ if (dh == NULL)
+ break;
+
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
+ break;
+ }
+
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ case IPPROTO_SCTP: {
+ struct sctphdr _sctph, *sh;
+
+ sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ break;
+
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
+ break;
+ }
+#endif
+ /* includes fragments */
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+#endif /* IPV6 */
+
+static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
+ char **_addrp, int src, u8 *proto)
+{
+ char *addrp;
+ int ret;
+
+ switch (ad->u.net->family) {
+ case PF_INET:
+ ret = selinux_parse_skb_ipv4(skb, ad, proto);
+ if (ret)
+ goto parse_error;
+ addrp = (char *)(src ? &ad->u.net->v4info.saddr :
+ &ad->u.net->v4info.daddr);
+ goto okay;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ case PF_INET6:
+ ret = selinux_parse_skb_ipv6(skb, ad, proto);
+ if (ret)
+ goto parse_error;
+ addrp = (char *)(src ? &ad->u.net->v6info.saddr :
+ &ad->u.net->v6info.daddr);
+ goto okay;
+#endif /* IPV6 */
+ default:
+ addrp = NULL;
+ goto okay;
+ }
+
+parse_error:
+ pr_warn(
+ "SELinux: failure in selinux_parse_skb(),"
+ " unable to parse packet\n");
+ return ret;
+
+okay:
+ if (_addrp)
+ *_addrp = addrp;
+ return 0;
+}
+
+/**
+ * selinux_skb_peerlbl_sid - Determine the peer label of a packet
+ * @skb: the packet
+ * @family: protocol family
+ * @sid: the packet's peer label SID
+ *
+ * Description:
+ * Check the various different forms of network peer labeling and determine
+ * the peer label/SID for the packet; most of the magic actually occurs in
+ * the security server function security_net_peersid_cmp(). The function
+ * returns zero if the value in @sid is valid (although it may be SECSID_NULL)
+ * or -EACCES if @sid is invalid due to inconsistencies with the different
+ * peer labels.
+ *
+ */
+static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+{
+ int err;
+ u32 xfrm_sid;
+ u32 nlbl_sid;
+ u32 nlbl_type;
+
+ err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
+ if (unlikely(err))
+ return -EACCES;
+ err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ if (unlikely(err))
+ return -EACCES;
+
+ err = security_net_peersid_resolve(&selinux_state, nlbl_sid,
+ nlbl_type, xfrm_sid, sid);
+ if (unlikely(err)) {
+ pr_warn(
+ "SELinux: failure in selinux_skb_peerlbl_sid(),"
+ " unable to determine packet's peer label\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+/**
+ * selinux_conn_sid - Determine the child socket label for a connection
+ * @sk_sid: the parent socket's SID
+ * @skb_sid: the packet's SID
+ * @conn_sid: the resulting connection SID
+ *
+ * If @skb_sid is valid then the user:role:type information from @sk_sid is
+ * combined with the MLS information from @skb_sid in order to create
+ * @conn_sid. If @skb_sid is not valid then @conn_sid is simply a copy
+ * of @sk_sid. Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
+{
+ int err = 0;
+
+ if (skb_sid != SECSID_NULL)
+ err = security_sid_mls_copy(&selinux_state, sk_sid, skb_sid,
+ conn_sid);
+ else
+ *conn_sid = sk_sid;
+
+ return err;
+}
+
+/* socket security operations */
+
+static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+ u16 secclass, u32 *socksid)
+{
+ if (tsec->sockcreate_sid > SECSID_NULL) {
+ *socksid = tsec->sockcreate_sid;
+ return 0;
+ }
+
+ return security_transition_sid(&selinux_state, tsec->sid, tsec->sid,
+ secclass, NULL, socksid);
+}
+
+static int sock_has_perm(struct sock *sk, u32 perms)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+
+ if (sksec->sid == SECINITSID_KERNEL)
+ return 0;
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = sk;
+
+ return avc_has_perm(&selinux_state,
+ current_sid(), sksec->sid, sksec->sclass, perms,
+ &ad);
+}
+
+static int selinux_socket_create(int family, int type,
+ int protocol, int kern)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ u32 newsid;
+ u16 secclass;
+ int rc;
+
+ if (kern)
+ return 0;
+
+ secclass = socket_type_to_security_class(family, type, protocol);
+ rc = socket_sockcreate_sid(tsec, secclass, &newsid);
+ if (rc)
+ return rc;
+
+ return avc_has_perm(&selinux_state,
+ tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
+}
+
+static int selinux_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock));
+ struct sk_security_struct *sksec;
+ u16 sclass = socket_type_to_security_class(family, type, protocol);
+ u32 sid = SECINITSID_KERNEL;
+ int err = 0;
+
+ if (!kern) {
+ err = socket_sockcreate_sid(tsec, sclass, &sid);
+ if (err)
+ return err;
+ }
+
+ isec->sclass = sclass;
+ isec->sid = sid;
+ isec->initialized = LABEL_INITIALIZED;
+
+ if (sock->sk) {
+ sksec = sock->sk->sk_security;
+ sksec->sclass = sclass;
+ sksec->sid = sid;
+ /* Allows detection of the first association on this socket */
+ if (sksec->sclass == SECCLASS_SCTP_SOCKET)
+ sksec->sctp_assoc_state = SCTP_ASSOC_UNSET;
+
+ err = selinux_netlbl_socket_post_create(sock->sk, family);
+ }
+
+ return err;
+}
+
+static int selinux_socket_socketpair(struct socket *socka,
+ struct socket *sockb)
+{
+ struct sk_security_struct *sksec_a = socka->sk->sk_security;
+ struct sk_security_struct *sksec_b = sockb->sk->sk_security;
+
+ sksec_a->peer_sid = sksec_b->sid;
+ sksec_b->peer_sid = sksec_a->sid;
+
+ return 0;
+}
+
+/* Range of port numbers used to automatically bind.
+ Need to determine whether we should perform a name_bind
+ permission check between the socket and the port number. */
+
+static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
+{
+ struct sock *sk = sock->sk;
+ struct sk_security_struct *sksec = sk->sk_security;
+ u16 family;
+ int err;
+
+ err = sock_has_perm(sk, SOCKET__BIND);
+ if (err)
+ goto out;
+
+ /* If PF_INET or PF_INET6, check name_bind permission for the port. */
+ family = sk->sk_family;
+ if (family == PF_INET || family == PF_INET6) {
+ char *addrp;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ struct sockaddr_in *addr4 = NULL;
+ struct sockaddr_in6 *addr6 = NULL;
+ u16 family_sa;
+ unsigned short snum;
+ u32 sid, node_perm;
+
+ /*
+ * sctp_bindx(3) calls via selinux_sctp_bind_connect()
+ * that validates multiple binding addresses. Because of this
+ * need to check address->sa_family as it is possible to have
+ * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET.
+ */
+ if (addrlen < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
+ family_sa = address->sa_family;
+ switch (family_sa) {
+ case AF_UNSPEC:
+ case AF_INET:
+ if (addrlen < sizeof(struct sockaddr_in))
+ return -EINVAL;
+ addr4 = (struct sockaddr_in *)address;
+ if (family_sa == AF_UNSPEC) {
+ if (family == PF_INET6) {
+ /* Length check from inet6_bind_sk() */
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ /* Family check from __inet6_bind() */
+ goto err_af;
+ }
+ /* see __inet_bind(), we only want to allow
+ * AF_UNSPEC if the address is INADDR_ANY
+ */
+ if (addr4->sin_addr.s_addr != htonl(INADDR_ANY))
+ goto err_af;
+ family_sa = AF_INET;
+ }
+ snum = ntohs(addr4->sin_port);
+ addrp = (char *)&addr4->sin_addr.s_addr;
+ break;
+ case AF_INET6:
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ addr6 = (struct sockaddr_in6 *)address;
+ snum = ntohs(addr6->sin6_port);
+ addrp = (char *)&addr6->sin6_addr.s6_addr;
+ break;
+ default:
+ goto err_af;
+ }
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family_sa;
+
+ if (snum) {
+ int low, high;
+
+ inet_get_local_port_range(sock_net(sk), &low, &high);
+
+ if (inet_port_requires_bind_service(sock_net(sk), snum) ||
+ snum < low || snum > high) {
+ err = sel_netport_sid(sk->sk_protocol,
+ snum, &sid);
+ if (err)
+ goto out;
+ err = avc_has_perm(&selinux_state,
+ sksec->sid, sid,
+ sksec->sclass,
+ SOCKET__NAME_BIND, &ad);
+ if (err)
+ goto out;
+ }
+ }
+
+ switch (sksec->sclass) {
+ case SECCLASS_TCP_SOCKET:
+ node_perm = TCP_SOCKET__NODE_BIND;
+ break;
+
+ case SECCLASS_UDP_SOCKET:
+ node_perm = UDP_SOCKET__NODE_BIND;
+ break;
+
+ case SECCLASS_DCCP_SOCKET:
+ node_perm = DCCP_SOCKET__NODE_BIND;
+ break;
+
+ case SECCLASS_SCTP_SOCKET:
+ node_perm = SCTP_SOCKET__NODE_BIND;
+ break;
+
+ default:
+ node_perm = RAWIP_SOCKET__NODE_BIND;
+ break;
+ }
+
+ err = sel_netnode_sid(addrp, family_sa, &sid);
+ if (err)
+ goto out;
+
+ if (family_sa == AF_INET)
+ ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
+ else
+ ad.u.net->v6info.saddr = addr6->sin6_addr;
+
+ err = avc_has_perm(&selinux_state,
+ sksec->sid, sid,
+ sksec->sclass, node_perm, &ad);
+ if (err)
+ goto out;
+ }
+out:
+ return err;
+err_af:
+ /* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */
+ if (sksec->sclass == SECCLASS_SCTP_SOCKET)
+ return -EINVAL;
+ return -EAFNOSUPPORT;
+}
+
+/* This supports connect(2) and SCTP connect services such as sctp_connectx(3)
+ * and sctp_sendmsg(3) as described in Documentation/security/SCTP.rst
+ */
+static int selinux_socket_connect_helper(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ struct sock *sk = sock->sk;
+ struct sk_security_struct *sksec = sk->sk_security;
+ int err;
+
+ err = sock_has_perm(sk, SOCKET__CONNECT);
+ if (err)
+ return err;
+ if (addrlen < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
+
+ /* connect(AF_UNSPEC) has special handling, as it is a documented
+ * way to disconnect the socket
+ */
+ if (address->sa_family == AF_UNSPEC)
+ return 0;
+
+ /*
+ * If a TCP, DCCP or SCTP socket, check name_connect permission
+ * for the port.
+ */
+ if (sksec->sclass == SECCLASS_TCP_SOCKET ||
+ sksec->sclass == SECCLASS_DCCP_SOCKET ||
+ sksec->sclass == SECCLASS_SCTP_SOCKET) {
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ struct sockaddr_in *addr4 = NULL;
+ struct sockaddr_in6 *addr6 = NULL;
+ unsigned short snum;
+ u32 sid, perm;
+
+ /* sctp_connectx(3) calls via selinux_sctp_bind_connect()
+ * that validates multiple connect addresses. Because of this
+ * need to check address->sa_family as it is possible to have
+ * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET.
+ */
+ switch (address->sa_family) {
+ case AF_INET:
+ addr4 = (struct sockaddr_in *)address;
+ if (addrlen < sizeof(struct sockaddr_in))
+ return -EINVAL;
+ snum = ntohs(addr4->sin_port);
+ break;
+ case AF_INET6:
+ addr6 = (struct sockaddr_in6 *)address;
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ snum = ntohs(addr6->sin6_port);
+ break;
+ default:
+ /* Note that SCTP services expect -EINVAL, whereas
+ * others expect -EAFNOSUPPORT.
+ */
+ if (sksec->sclass == SECCLASS_SCTP_SOCKET)
+ return -EINVAL;
+ else
+ return -EAFNOSUPPORT;
+ }
+
+ err = sel_netport_sid(sk->sk_protocol, snum, &sid);
+ if (err)
+ return err;
+
+ switch (sksec->sclass) {
+ case SECCLASS_TCP_SOCKET:
+ perm = TCP_SOCKET__NAME_CONNECT;
+ break;
+ case SECCLASS_DCCP_SOCKET:
+ perm = DCCP_SOCKET__NAME_CONNECT;
+ break;
+ case SECCLASS_SCTP_SOCKET:
+ perm = SCTP_SOCKET__NAME_CONNECT;
+ break;
+ }
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->dport = htons(snum);
+ ad.u.net->family = address->sa_family;
+ err = avc_has_perm(&selinux_state,
+ sksec->sid, sid, sksec->sclass, perm, &ad);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Supports connect(2), see comments in selinux_socket_connect_helper() */
+static int selinux_socket_connect(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ int err;
+ struct sock *sk = sock->sk;
+
+ err = selinux_socket_connect_helper(sock, address, addrlen);
+ if (err)
+ return err;
+
+ return selinux_netlbl_socket_connect(sk, address);
+}
+
+static int selinux_socket_listen(struct socket *sock, int backlog)
+{
+ return sock_has_perm(sock->sk, SOCKET__LISTEN);
+}
+
+static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
+{
+ int err;
+ struct inode_security_struct *isec;
+ struct inode_security_struct *newisec;
+ u16 sclass;
+ u32 sid;
+
+ err = sock_has_perm(sock->sk, SOCKET__ACCEPT);
+ if (err)
+ return err;
+
+ isec = inode_security_novalidate(SOCK_INODE(sock));
+ spin_lock(&isec->lock);
+ sclass = isec->sclass;
+ sid = isec->sid;
+ spin_unlock(&isec->lock);
+
+ newisec = inode_security_novalidate(SOCK_INODE(newsock));
+ newisec->sclass = sclass;
+ newisec->sid = sid;
+ newisec->initialized = LABEL_INITIALIZED;
+
+ return 0;
+}
+
+static int selinux_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ return sock_has_perm(sock->sk, SOCKET__WRITE);
+}
+
+static int selinux_socket_recvmsg(struct socket *sock, struct msghdr *msg,
+ int size, int flags)
+{
+ return sock_has_perm(sock->sk, SOCKET__READ);
+}
+
+static int selinux_socket_getsockname(struct socket *sock)
+{
+ return sock_has_perm(sock->sk, SOCKET__GETATTR);
+}
+
+static int selinux_socket_getpeername(struct socket *sock)
+{
+ return sock_has_perm(sock->sk, SOCKET__GETATTR);
+}
+
+static int selinux_socket_setsockopt(struct socket *sock, int level, int optname)
+{
+ int err;
+
+ err = sock_has_perm(sock->sk, SOCKET__SETOPT);
+ if (err)
+ return err;
+
+ return selinux_netlbl_socket_setsockopt(sock, level, optname);
+}
+
+static int selinux_socket_getsockopt(struct socket *sock, int level,
+ int optname)
+{
+ return sock_has_perm(sock->sk, SOCKET__GETOPT);
+}
+
+static int selinux_socket_shutdown(struct socket *sock, int how)
+{
+ return sock_has_perm(sock->sk, SOCKET__SHUTDOWN);
+}
+
+static int selinux_socket_unix_stream_connect(struct sock *sock,
+ struct sock *other,
+ struct sock *newsk)
+{
+ struct sk_security_struct *sksec_sock = sock->sk_security;
+ struct sk_security_struct *sksec_other = other->sk_security;
+ struct sk_security_struct *sksec_new = newsk->sk_security;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ int err;
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = other;
+
+ err = avc_has_perm(&selinux_state,
+ sksec_sock->sid, sksec_other->sid,
+ sksec_other->sclass,
+ UNIX_STREAM_SOCKET__CONNECTTO, &ad);
+ if (err)
+ return err;
+
+ /* server child socket */
+ sksec_new->peer_sid = sksec_sock->sid;
+ err = security_sid_mls_copy(&selinux_state, sksec_other->sid,
+ sksec_sock->sid, &sksec_new->sid);
+ if (err)
+ return err;
+
+ /* connecting socket */
+ sksec_sock->peer_sid = sksec_new->sid;
+
+ return 0;
+}
+
+static int selinux_socket_unix_may_send(struct socket *sock,
+ struct socket *other)
+{
+ struct sk_security_struct *ssec = sock->sk->sk_security;
+ struct sk_security_struct *osec = other->sk->sk_security;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = other->sk;
+
+ return avc_has_perm(&selinux_state,
+ ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
+ &ad);
+}
+
+static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
+ char *addrp, u16 family, u32 peer_sid,
+ struct common_audit_data *ad)
+{
+ int err;
+ u32 if_sid;
+ u32 node_sid;
+
+ err = sel_netif_sid(ns, ifindex, &if_sid);
+ if (err)
+ return err;
+ err = avc_has_perm(&selinux_state,
+ peer_sid, if_sid,
+ SECCLASS_NETIF, NETIF__INGRESS, ad);
+ if (err)
+ return err;
+
+ err = sel_netnode_sid(addrp, family, &node_sid);
+ if (err)
+ return err;
+ return avc_has_perm(&selinux_state,
+ peer_sid, node_sid,
+ SECCLASS_NODE, NODE__RECVFROM, ad);
+}
+
+static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
+ u16 family)
+{
+ int err = 0;
+ struct sk_security_struct *sksec = sk->sk_security;
+ u32 sk_sid = sksec->sid;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ char *addrp;
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
+ err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
+ if (err)
+ return err;
+
+ if (selinux_secmark_enabled()) {
+ err = avc_has_perm(&selinux_state,
+ sk_sid, skb->secmark, SECCLASS_PACKET,
+ PACKET__RECV, &ad);
+ if (err)
+ return err;
+ }
+
+ err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
+ if (err)
+ return err;
+ err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
+
+ return err;
+}
+
+static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+ struct sk_security_struct *sksec = sk->sk_security;
+ u16 family = sk->sk_family;
+ u32 sk_sid = sksec->sid;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ char *addrp;
+ u8 secmark_active;
+ u8 peerlbl_active;
+
+ if (family != PF_INET && family != PF_INET6)
+ return 0;
+
+ /* Handle mapped IPv4 packets arriving via IPv6 sockets */
+ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+
+ /* If any sort of compatibility mode is enabled then handoff processing
+ * to the selinux_sock_rcv_skb_compat() function to deal with the
+ * special handling. We do this in an attempt to keep this function
+ * as fast and as clean as possible. */
+ if (!selinux_policycap_netpeer())
+ return selinux_sock_rcv_skb_compat(sk, skb, family);
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return 0;
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
+ err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
+ if (err)
+ return err;
+
+ if (peerlbl_active) {
+ u32 peer_sid;
+
+ err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
+ if (err)
+ return err;
+ err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
+ addrp, family, peer_sid, &ad);
+ if (err) {
+ selinux_netlbl_err(skb, family, err, 0);
+ return err;
+ }
+ err = avc_has_perm(&selinux_state,
+ sk_sid, peer_sid, SECCLASS_PEER,
+ PEER__RECV, &ad);
+ if (err) {
+ selinux_netlbl_err(skb, family, err, 0);
+ return err;
+ }
+ }
+
+ if (secmark_active) {
+ err = avc_has_perm(&selinux_state,
+ sk_sid, skb->secmark, SECCLASS_PACKET,
+ PACKET__RECV, &ad);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *optval,
+ int __user *optlen, unsigned len)
+{
+ int err = 0;
+ char *scontext;
+ u32 scontext_len;
+ struct sk_security_struct *sksec = sock->sk->sk_security;
+ u32 peer_sid = SECSID_NULL;
+
+ if (sksec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
+ sksec->sclass == SECCLASS_TCP_SOCKET ||
+ sksec->sclass == SECCLASS_SCTP_SOCKET)
+ peer_sid = sksec->peer_sid;
+ if (peer_sid == SECSID_NULL)
+ return -ENOPROTOOPT;
+
+ err = security_sid_to_context(&selinux_state, peer_sid, &scontext,
+ &scontext_len);
+ if (err)
+ return err;
+
+ if (scontext_len > len) {
+ err = -ERANGE;
+ goto out_len;
+ }
+
+ if (copy_to_user(optval, scontext, scontext_len))
+ err = -EFAULT;
+
+out_len:
+ if (put_user(scontext_len, optlen))
+ err = -EFAULT;
+ kfree(scontext);
+ return err;
+}
+
+static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
+{
+ u32 peer_secid = SECSID_NULL;
+ u16 family;
+ struct inode_security_struct *isec;
+
+ if (skb && skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+ else if (skb && skb->protocol == htons(ETH_P_IPV6))
+ family = PF_INET6;
+ else if (sock)
+ family = sock->sk->sk_family;
+ else
+ goto out;
+
+ if (sock && family == PF_UNIX) {
+ isec = inode_security_novalidate(SOCK_INODE(sock));
+ peer_secid = isec->sid;
+ } else if (skb)
+ selinux_skb_peerlbl_sid(skb, family, &peer_secid);
+
+out:
+ *secid = peer_secid;
+ if (peer_secid == SECSID_NULL)
+ return -EINVAL;
+ return 0;
+}
+
+static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
+{
+ struct sk_security_struct *sksec;
+
+ sksec = kzalloc(sizeof(*sksec), priority);
+ if (!sksec)
+ return -ENOMEM;
+
+ sksec->peer_sid = SECINITSID_UNLABELED;
+ sksec->sid = SECINITSID_UNLABELED;
+ sksec->sclass = SECCLASS_SOCKET;
+ selinux_netlbl_sk_security_reset(sksec);
+ sk->sk_security = sksec;
+
+ return 0;
+}
+
+static void selinux_sk_free_security(struct sock *sk)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ sk->sk_security = NULL;
+ selinux_netlbl_sk_security_free(sksec);
+ kfree(sksec);
+}
+
+static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct sk_security_struct *newsksec = newsk->sk_security;
+
+ newsksec->sid = sksec->sid;
+ newsksec->peer_sid = sksec->peer_sid;
+ newsksec->sclass = sksec->sclass;
+
+ selinux_netlbl_sk_security_reset(newsksec);
+}
+
+static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
+{
+ if (!sk)
+ *secid = SECINITSID_ANY_SOCKET;
+ else {
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ *secid = sksec->sid;
+ }
+}
+
+static void selinux_sock_graft(struct sock *sk, struct socket *parent)
+{
+ struct inode_security_struct *isec =
+ inode_security_novalidate(SOCK_INODE(parent));
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
+ sk->sk_family == PF_UNIX)
+ isec->sid = sksec->sid;
+ sksec->sclass = isec->sclass;
+}
+
+/*
+ * Determines peer_secid for the asoc and updates socket's peer label
+ * if it's the first association on the socket.
+ */
+static int selinux_sctp_process_new_assoc(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ struct sock *sk = asoc->base.sk;
+ u16 family = sk->sk_family;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ int err;
+
+ /* handle mapped IPv4 packets arriving via IPv6 sockets */
+ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+
+ if (selinux_peerlbl_enabled()) {
+ asoc->peer_secid = SECSID_NULL;
+
+ /* This will return peer_sid = SECSID_NULL if there are
+ * no peer labels, see security_net_peersid_resolve().
+ */
+ err = selinux_skb_peerlbl_sid(skb, family, &asoc->peer_secid);
+ if (err)
+ return err;
+
+ if (asoc->peer_secid == SECSID_NULL)
+ asoc->peer_secid = SECINITSID_UNLABELED;
+ } else {
+ asoc->peer_secid = SECINITSID_UNLABELED;
+ }
+
+ if (sksec->sctp_assoc_state == SCTP_ASSOC_UNSET) {
+ sksec->sctp_assoc_state = SCTP_ASSOC_SET;
+
+ /* Here as first association on socket. As the peer SID
+ * was allowed by peer recv (and the netif/node checks),
+ * then it is approved by policy and used as the primary
+ * peer SID for getpeercon(3).
+ */
+ sksec->peer_sid = asoc->peer_secid;
+ } else if (sksec->peer_sid != asoc->peer_secid) {
+ /* Other association peer SIDs are checked to enforce
+ * consistency among the peer SIDs.
+ */
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = asoc->base.sk;
+ err = avc_has_perm(&selinux_state,
+ sksec->peer_sid, asoc->peer_secid,
+ sksec->sclass, SCTP_SOCKET__ASSOCIATION,
+ &ad);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* Called whenever SCTP receives an INIT or COOKIE ECHO chunk. This
+ * happens on an incoming connect(2), sctp_connectx(3) or
+ * sctp_sendmsg(3) (with no association already present).
+ */
+static int selinux_sctp_assoc_request(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
+ u32 conn_sid;
+ int err;
+
+ if (!selinux_policycap_extsockclass())
+ return 0;
+
+ err = selinux_sctp_process_new_assoc(asoc, skb);
+ if (err)
+ return err;
+
+ /* Compute the MLS component for the connection and store
+ * the information in asoc. This will be used by SCTP TCP type
+ * sockets and peeled off connections as they cause a new
+ * socket to be generated. selinux_sctp_sk_clone() will then
+ * plug this into the new socket.
+ */
+ err = selinux_conn_sid(sksec->sid, asoc->peer_secid, &conn_sid);
+ if (err)
+ return err;
+
+ asoc->secid = conn_sid;
+
+ /* Set any NetLabel labels including CIPSO/CALIPSO options. */
+ return selinux_netlbl_sctp_assoc_request(asoc, skb);
+}
+
+/* Called when SCTP receives a COOKIE ACK chunk as the final
+ * response to an association request (initited by us).
+ */
+static int selinux_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
+
+ if (!selinux_policycap_extsockclass())
+ return 0;
+
+ /* Inherit secid from the parent socket - this will be picked up
+ * by selinux_sctp_sk_clone() if the association gets peeled off
+ * into a new socket.
+ */
+ asoc->secid = sksec->sid;
+
+ return selinux_sctp_process_new_assoc(asoc, skb);
+}
+
+/* Check if sctp IPv4/IPv6 addresses are valid for binding or connecting
+ * based on their @optname.
+ */
+static int selinux_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address,
+ int addrlen)
+{
+ int len, err = 0, walk_size = 0;
+ void *addr_buf;
+ struct sockaddr *addr;
+ struct socket *sock;
+
+ if (!selinux_policycap_extsockclass())
+ return 0;
+
+ /* Process one or more addresses that may be IPv4 or IPv6 */
+ sock = sk->sk_socket;
+ addr_buf = address;
+
+ while (walk_size < addrlen) {
+ if (walk_size + sizeof(sa_family_t) > addrlen)
+ return -EINVAL;
+
+ addr = addr_buf;
+ switch (addr->sa_family) {
+ case AF_UNSPEC:
+ case AF_INET:
+ len = sizeof(struct sockaddr_in);
+ break;
+ case AF_INET6:
+ len = sizeof(struct sockaddr_in6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (walk_size + len > addrlen)
+ return -EINVAL;
+
+ err = -EINVAL;
+ switch (optname) {
+ /* Bind checks */
+ case SCTP_PRIMARY_ADDR:
+ case SCTP_SET_PEER_PRIMARY_ADDR:
+ case SCTP_SOCKOPT_BINDX_ADD:
+ err = selinux_socket_bind(sock, addr, len);
+ break;
+ /* Connect checks */
+ case SCTP_SOCKOPT_CONNECTX:
+ case SCTP_PARAM_SET_PRIMARY:
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_SENDMSG_CONNECT:
+ err = selinux_socket_connect_helper(sock, addr, len);
+ if (err)
+ return err;
+
+ /* As selinux_sctp_bind_connect() is called by the
+ * SCTP protocol layer, the socket is already locked,
+ * therefore selinux_netlbl_socket_connect_locked()
+ * is called here. The situations handled are:
+ * sctp_connectx(3), sctp_sendmsg(3), sendmsg(2),
+ * whenever a new IP address is added or when a new
+ * primary address is selected.
+ * Note that an SCTP connect(2) call happens before
+ * the SCTP protocol layer and is handled via
+ * selinux_socket_connect().
+ */
+ err = selinux_netlbl_socket_connect_locked(sk, addr);
+ break;
+ }
+
+ if (err)
+ return err;
+
+ addr_buf += len;
+ walk_size += len;
+ }
+
+ return 0;
+}
+
+/* Called whenever a new socket is created by accept(2) or sctp_peeloff(3). */
+static void selinux_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
+ struct sock *newsk)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct sk_security_struct *newsksec = newsk->sk_security;
+
+ /* If policy does not support SECCLASS_SCTP_SOCKET then call
+ * the non-sctp clone version.
+ */
+ if (!selinux_policycap_extsockclass())
+ return selinux_sk_clone_security(sk, newsk);
+
+ newsksec->sid = asoc->secid;
+ newsksec->peer_sid = asoc->peer_secid;
+ newsksec->sclass = sksec->sclass;
+ selinux_netlbl_sctp_sk_clone(sk, newsk);
+}
+
+static int selinux_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ int err;
+ u16 family = req->rsk_ops->family;
+ u32 connsid;
+ u32 peersid;
+
+ err = selinux_skb_peerlbl_sid(skb, family, &peersid);
+ if (err)
+ return err;
+ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
+ if (err)
+ return err;
+ req->secid = connsid;
+ req->peer_secid = peersid;
+
+ return selinux_netlbl_inet_conn_request(req, family);
+}
+
+static void selinux_inet_csk_clone(struct sock *newsk,
+ const struct request_sock *req)
+{
+ struct sk_security_struct *newsksec = newsk->sk_security;
+
+ newsksec->sid = req->secid;
+ newsksec->peer_sid = req->peer_secid;
+ /* NOTE: Ideally, we should also get the isec->sid for the
+ new socket in sync, but we don't have the isec available yet.
+ So we will wait until sock_graft to do it, by which
+ time it will have been created and available. */
+
+ /* We don't need to take any sort of lock here as we are the only
+ * thread with access to newsksec */
+ selinux_netlbl_inet_csk_clone(newsk, req->rsk_ops->family);
+}
+
+static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
+{
+ u16 family = sk->sk_family;
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ /* handle mapped IPv4 packets arriving via IPv6 sockets */
+ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+
+ selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
+}
+
+static int selinux_secmark_relabel_packet(u32 sid)
+{
+ const struct task_security_struct *__tsec;
+ u32 tsid;
+
+ __tsec = selinux_cred(current_cred());
+ tsid = __tsec->sid;
+
+ return avc_has_perm(&selinux_state,
+ tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO,
+ NULL);
+}
+
+static void selinux_secmark_refcount_inc(void)
+{
+ atomic_inc(&selinux_secmark_refcount);
+}
+
+static void selinux_secmark_refcount_dec(void)
+{
+ atomic_dec(&selinux_secmark_refcount);
+}
+
+static void selinux_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
+{
+ flic->flowic_secid = req->secid;
+}
+
+static int selinux_tun_dev_alloc_security(void **security)
+{
+ struct tun_security_struct *tunsec;
+
+ tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL);
+ if (!tunsec)
+ return -ENOMEM;
+ tunsec->sid = current_sid();
+
+ *security = tunsec;
+ return 0;
+}
+
+static void selinux_tun_dev_free_security(void *security)
+{
+ kfree(security);
+}
+
+static int selinux_tun_dev_create(void)
+{
+ u32 sid = current_sid();
+
+ /* we aren't taking into account the "sockcreate" SID since the socket
+ * that is being created here is not a socket in the traditional sense,
+ * instead it is a private sock, accessible only to the kernel, and
+ * representing a wide range of network traffic spanning multiple
+ * connections unlike traditional sockets - check the TUN driver to
+ * get a better understanding of why this socket is special */
+
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
+ NULL);
+}
+
+static int selinux_tun_dev_attach_queue(void *security)
+{
+ struct tun_security_struct *tunsec = security;
+
+ return avc_has_perm(&selinux_state,
+ current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__ATTACH_QUEUE, NULL);
+}
+
+static int selinux_tun_dev_attach(struct sock *sk, void *security)
+{
+ struct tun_security_struct *tunsec = security;
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ /* we don't currently perform any NetLabel based labeling here and it
+ * isn't clear that we would want to do so anyway; while we could apply
+ * labeling without the support of the TUN user the resulting labeled
+ * traffic from the other end of the connection would almost certainly
+ * cause confusion to the TUN user that had no idea network labeling
+ * protocols were being used */
+
+ sksec->sid = tunsec->sid;
+ sksec->sclass = SECCLASS_TUN_SOCKET;
+
+ return 0;
+}
+
+static int selinux_tun_dev_open(void *security)
+{
+ struct tun_security_struct *tunsec = security;
+ u32 sid = current_sid();
+ int err;
+
+ err = avc_has_perm(&selinux_state,
+ sid, tunsec->sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__RELABELFROM, NULL);
+ if (err)
+ return err;
+ err = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__RELABELTO, NULL);
+ if (err)
+ return err;
+ tunsec->sid = sid;
+
+ return 0;
+}
+
+#ifdef CONFIG_NETFILTER
+
+static unsigned int selinux_ip_forward(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ int ifindex;
+ u16 family;
+ char *addrp;
+ u32 peer_sid;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ int secmark_active, peerlbl_active;
+
+ if (!selinux_policycap_netpeer())
+ return NF_ACCEPT;
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+ family = state->pf;
+ if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
+ return NF_DROP;
+
+ ifindex = state->in->ifindex;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
+ if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
+ return NF_DROP;
+
+ if (peerlbl_active) {
+ int err;
+
+ err = selinux_inet_sys_rcv_skb(state->net, ifindex,
+ addrp, family, peer_sid, &ad);
+ if (err) {
+ selinux_netlbl_err(skb, family, err, 1);
+ return NF_DROP;
+ }
+ }
+
+ if (secmark_active)
+ if (avc_has_perm(&selinux_state,
+ peer_sid, skb->secmark,
+ SECCLASS_PACKET, PACKET__FORWARD_IN, &ad))
+ return NF_DROP;
+
+ if (netlbl_enabled())
+ /* we do this in the FORWARD path and not the POST_ROUTING
+ * path because we want to make sure we apply the necessary
+ * labeling before IPsec is applied so we can leverage AH
+ * protection */
+ if (selinux_netlbl_skbuff_setsid(skb, family, peer_sid) != 0)
+ return NF_DROP;
+
+ return NF_ACCEPT;
+}
+
+static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct sock *sk;
+ u32 sid;
+
+ if (!netlbl_enabled())
+ return NF_ACCEPT;
+
+ /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
+ * because we want to make sure we apply the necessary labeling
+ * before IPsec is applied so we can leverage AH protection */
+ sk = skb->sk;
+ if (sk) {
+ struct sk_security_struct *sksec;
+
+ if (sk_listener(sk))
+ /* if the socket is the listening state then this
+ * packet is a SYN-ACK packet which means it needs to
+ * be labeled based on the connection/request_sock and
+ * not the parent socket. unfortunately, we can't
+ * lookup the request_sock yet as it isn't queued on
+ * the parent socket until after the SYN-ACK is sent.
+ * the "solution" is to simply pass the packet as-is
+ * as any IP option based labeling should be copied
+ * from the initial connection request (in the IP
+ * layer). it is far from ideal, but until we get a
+ * security label in the packet itself this is the
+ * best we can do. */
+ return NF_ACCEPT;
+
+ /* standard practice, label using the parent socket */
+ sksec = sk->sk_security;
+ sid = sksec->sid;
+ } else
+ sid = SECINITSID_KERNEL;
+ if (selinux_netlbl_skbuff_setsid(skb, state->pf, sid) != 0)
+ return NF_DROP;
+
+ return NF_ACCEPT;
+}
+
+
+static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct sock *sk;
+ struct sk_security_struct *sksec;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ u8 proto = 0;
+
+ sk = skb_to_full_sk(skb);
+ if (sk == NULL)
+ return NF_ACCEPT;
+ sksec = sk->sk_security;
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = state->out->ifindex;
+ ad.u.net->family = state->pf;
+ if (selinux_parse_skb(skb, &ad, NULL, 0, &proto))
+ return NF_DROP;
+
+ if (selinux_secmark_enabled())
+ if (avc_has_perm(&selinux_state,
+ sksec->sid, skb->secmark,
+ SECCLASS_PACKET, PACKET__SEND, &ad))
+ return NF_DROP_ERR(-ECONNREFUSED);
+
+ if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
+ return NF_DROP_ERR(-ECONNREFUSED);
+
+ return NF_ACCEPT;
+}
+
+static unsigned int selinux_ip_postroute(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ u16 family;
+ u32 secmark_perm;
+ u32 peer_sid;
+ int ifindex;
+ struct sock *sk;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ char *addrp;
+ int secmark_active, peerlbl_active;
+
+ /* If any sort of compatibility mode is enabled then handoff processing
+ * to the selinux_ip_postroute_compat() function to deal with the
+ * special handling. We do this in an attempt to keep this function
+ * as fast and as clean as possible. */
+ if (!selinux_policycap_netpeer())
+ return selinux_ip_postroute_compat(skb, state);
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+ sk = skb_to_full_sk(skb);
+
+#ifdef CONFIG_XFRM
+ /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
+ * packet transformation so allow the packet to pass without any checks
+ * since we'll have another chance to perform access control checks
+ * when the packet is on it's final way out.
+ * NOTE: there appear to be some IPv6 multicast cases where skb->dst
+ * is NULL, in this case go ahead and apply access control.
+ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
+ * TCP listening state we cannot wait until the XFRM processing
+ * is done as we will miss out on the SA label if we do;
+ * unfortunately, this means more work, but it is only once per
+ * connection. */
+ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
+ !(sk && sk_listener(sk)))
+ return NF_ACCEPT;
+#endif
+
+ family = state->pf;
+ if (sk == NULL) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+ * to determine which and if the packet is being forwarded
+ * query the packet directly to determine the security label. */
+ if (skb->skb_iif) {
+ secmark_perm = PACKET__FORWARD_OUT;
+ if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
+ return NF_DROP;
+ } else {
+ secmark_perm = PACKET__SEND;
+ peer_sid = SECINITSID_KERNEL;
+ }
+ } else if (sk_listener(sk)) {
+ /* Locally generated packet but the associated socket is in the
+ * listening state which means this is a SYN-ACK packet. In
+ * this particular case the correct security label is assigned
+ * to the connection/request_sock but unfortunately we can't
+ * query the request_sock as it isn't queued on the parent
+ * socket until after the SYN-ACK packet is sent; the only
+ * viable choice is to regenerate the label like we do in
+ * selinux_inet_conn_request(). See also selinux_ip_output()
+ * for similar problems. */
+ u32 skb_sid;
+ struct sk_security_struct *sksec;
+
+ sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
+ /* At this point, if the returned skb peerlbl is SECSID_NULL
+ * and the packet has been through at least one XFRM
+ * transformation then we must be dealing with the "final"
+ * form of labeled IPsec packet; since we've already applied
+ * all of our access controls on this packet we can safely
+ * pass the packet. */
+ if (skb_sid == SECSID_NULL) {
+ switch (family) {
+ case PF_INET:
+ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ break;
+ case PF_INET6:
+ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ break;
+ default:
+ return NF_DROP_ERR(-ECONNREFUSED);
+ }
+ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
+ } else {
+ /* Locally generated packet, fetch the security label from the
+ * associated socket. */
+ struct sk_security_struct *sksec = sk->sk_security;
+ peer_sid = sksec->sid;
+ secmark_perm = PACKET__SEND;
+ }
+
+ ifindex = state->out->ifindex;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
+ if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
+ return NF_DROP;
+
+ if (secmark_active)
+ if (avc_has_perm(&selinux_state,
+ peer_sid, skb->secmark,
+ SECCLASS_PACKET, secmark_perm, &ad))
+ return NF_DROP_ERR(-ECONNREFUSED);
+
+ if (peerlbl_active) {
+ u32 if_sid;
+ u32 node_sid;
+
+ if (sel_netif_sid(state->net, ifindex, &if_sid))
+ return NF_DROP;
+ if (avc_has_perm(&selinux_state,
+ peer_sid, if_sid,
+ SECCLASS_NETIF, NETIF__EGRESS, &ad))
+ return NF_DROP_ERR(-ECONNREFUSED);
+
+ if (sel_netnode_sid(addrp, family, &node_sid))
+ return NF_DROP;
+ if (avc_has_perm(&selinux_state,
+ peer_sid, node_sid,
+ SECCLASS_NODE, NODE__SENDTO, &ad))
+ return NF_DROP_ERR(-ECONNREFUSED);
+ }
+
+ return NF_ACCEPT;
+}
+#endif /* CONFIG_NETFILTER */
+
+static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ int rc = 0;
+ unsigned int msg_len;
+ unsigned int data_len = skb->len;
+ unsigned char *data = skb->data;
+ struct nlmsghdr *nlh;
+ struct sk_security_struct *sksec = sk->sk_security;
+ u16 sclass = sksec->sclass;
+ u32 perm;
+
+ while (data_len >= nlmsg_total_size(0)) {
+ nlh = (struct nlmsghdr *)data;
+
+ /* NOTE: the nlmsg_len field isn't reliably set by some netlink
+ * users which means we can't reject skb's with bogus
+ * length fields; our solution is to follow what
+ * netlink_rcv_skb() does and simply skip processing at
+ * messages with length fields that are clearly junk
+ */
+ if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
+ return 0;
+
+ rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
+ if (rc == 0) {
+ rc = sock_has_perm(sk, perm);
+ if (rc)
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* -EINVAL is a missing msg/perm mapping */
+ pr_warn_ratelimited("SELinux: unrecognized netlink"
+ " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+ " pid=%d comm=%s\n",
+ sk->sk_protocol, nlh->nlmsg_type,
+ secclass_map[sclass - 1].name,
+ task_pid_nr(current), current->comm);
+ if (enforcing_enabled(&selinux_state) &&
+ !security_get_allow_unknown(&selinux_state))
+ return rc;
+ rc = 0;
+ } else if (rc == -ENOENT) {
+ /* -ENOENT is a missing socket/class mapping, ignore */
+ rc = 0;
+ } else {
+ return rc;
+ }
+
+ /* move to the next message after applying netlink padding */
+ msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msg_len >= data_len)
+ return 0;
+ data_len -= msg_len;
+ data += msg_len;
+ }
+
+ return rc;
+}
+
+static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
+{
+ isec->sclass = sclass;
+ isec->sid = current_sid();
+}
+
+static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
+ u32 perms)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(ipc_perms);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = ipc_perms->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, perms, &ad);
+}
+
+static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
+{
+ struct msg_security_struct *msec;
+
+ msec = selinux_msg_msg(msg);
+ msec->sid = SECINITSID_UNLABELED;
+
+ return 0;
+}
+
+/* message queue security operations */
+static int selinux_msg_queue_alloc_security(struct kern_ipc_perm *msq)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(msq);
+ ipc_init_security(isec, SECCLASS_MSGQ);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = msq->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_MSGQ,
+ MSGQ__CREATE, &ad);
+}
+
+static int selinux_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(msq);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = msq->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_MSGQ,
+ MSGQ__ASSOCIATE, &ad);
+}
+
+static int selinux_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
+{
+ int err;
+ int perms;
+
+ switch (cmd) {
+ case IPC_INFO:
+ case MSG_INFO:
+ /* No specific object, just general system-wide information. */
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
+ SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
+ case IPC_STAT:
+ case MSG_STAT:
+ case MSG_STAT_ANY:
+ perms = MSGQ__GETATTR | MSGQ__ASSOCIATE;
+ break;
+ case IPC_SET:
+ perms = MSGQ__SETATTR;
+ break;
+ case IPC_RMID:
+ perms = MSGQ__DESTROY;
+ break;
+ default:
+ return 0;
+ }
+
+ err = ipc_has_perm(msq, perms);
+ return err;
+}
+
+static int selinux_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg)
+{
+ struct ipc_security_struct *isec;
+ struct msg_security_struct *msec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+ int rc;
+
+ isec = selinux_ipc(msq);
+ msec = selinux_msg_msg(msg);
+
+ /*
+ * First time through, need to assign label to the message
+ */
+ if (msec->sid == SECINITSID_UNLABELED) {
+ /*
+ * Compute new sid based on current process and
+ * message queue this message will be stored in
+ */
+ rc = security_transition_sid(&selinux_state, sid, isec->sid,
+ SECCLASS_MSG, NULL, &msec->sid);
+ if (rc)
+ return rc;
+ }
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = msq->key;
+
+ /* Can this process write to the queue? */
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_MSGQ,
+ MSGQ__WRITE, &ad);
+ if (!rc)
+ /* Can this process send the message */
+ rc = avc_has_perm(&selinux_state,
+ sid, msec->sid, SECCLASS_MSG,
+ MSG__SEND, &ad);
+ if (!rc)
+ /* Can the message be put in the queue? */
+ rc = avc_has_perm(&selinux_state,
+ msec->sid, isec->sid, SECCLASS_MSGQ,
+ MSGQ__ENQUEUE, &ad);
+
+ return rc;
+}
+
+static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
+ struct task_struct *target,
+ long type, int mode)
+{
+ struct ipc_security_struct *isec;
+ struct msg_security_struct *msec;
+ struct common_audit_data ad;
+ u32 sid = task_sid_obj(target);
+ int rc;
+
+ isec = selinux_ipc(msq);
+ msec = selinux_msg_msg(msg);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = msq->key;
+
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid,
+ SECCLASS_MSGQ, MSGQ__READ, &ad);
+ if (!rc)
+ rc = avc_has_perm(&selinux_state,
+ sid, msec->sid,
+ SECCLASS_MSG, MSG__RECEIVE, &ad);
+ return rc;
+}
+
+/* Shared Memory security operations */
+static int selinux_shm_alloc_security(struct kern_ipc_perm *shp)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(shp);
+ ipc_init_security(isec, SECCLASS_SHM);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = shp->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SHM,
+ SHM__CREATE, &ad);
+}
+
+static int selinux_shm_associate(struct kern_ipc_perm *shp, int shmflg)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(shp);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = shp->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SHM,
+ SHM__ASSOCIATE, &ad);
+}
+
+/* Note, at this point, shp is locked down */
+static int selinux_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
+{
+ int perms;
+ int err;
+
+ switch (cmd) {
+ case IPC_INFO:
+ case SHM_INFO:
+ /* No specific object, just general system-wide information. */
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
+ SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
+ case IPC_STAT:
+ case SHM_STAT:
+ case SHM_STAT_ANY:
+ perms = SHM__GETATTR | SHM__ASSOCIATE;
+ break;
+ case IPC_SET:
+ perms = SHM__SETATTR;
+ break;
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ perms = SHM__LOCK;
+ break;
+ case IPC_RMID:
+ perms = SHM__DESTROY;
+ break;
+ default:
+ return 0;
+ }
+
+ err = ipc_has_perm(shp, perms);
+ return err;
+}
+
+static int selinux_shm_shmat(struct kern_ipc_perm *shp,
+ char __user *shmaddr, int shmflg)
+{
+ u32 perms;
+
+ if (shmflg & SHM_RDONLY)
+ perms = SHM__READ;
+ else
+ perms = SHM__READ | SHM__WRITE;
+
+ return ipc_has_perm(shp, perms);
+}
+
+/* Semaphore security operations */
+static int selinux_sem_alloc_security(struct kern_ipc_perm *sma)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(sma);
+ ipc_init_security(isec, SECCLASS_SEM);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = sma->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SEM,
+ SEM__CREATE, &ad);
+}
+
+static int selinux_sem_associate(struct kern_ipc_perm *sma, int semflg)
+{
+ struct ipc_security_struct *isec;
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+
+ isec = selinux_ipc(sma);
+
+ ad.type = LSM_AUDIT_DATA_IPC;
+ ad.u.ipc_id = sma->key;
+
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SEM,
+ SEM__ASSOCIATE, &ad);
+}
+
+/* Note, at this point, sma is locked down */
+static int selinux_sem_semctl(struct kern_ipc_perm *sma, int cmd)
+{
+ int err;
+ u32 perms;
+
+ switch (cmd) {
+ case IPC_INFO:
+ case SEM_INFO:
+ /* No specific object, just general system-wide information. */
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
+ SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ perms = SEM__GETATTR;
+ break;
+ case GETVAL:
+ case GETALL:
+ perms = SEM__READ;
+ break;
+ case SETVAL:
+ case SETALL:
+ perms = SEM__WRITE;
+ break;
+ case IPC_RMID:
+ perms = SEM__DESTROY;
+ break;
+ case IPC_SET:
+ perms = SEM__SETATTR;
+ break;
+ case IPC_STAT:
+ case SEM_STAT:
+ case SEM_STAT_ANY:
+ perms = SEM__GETATTR | SEM__ASSOCIATE;
+ break;
+ default:
+ return 0;
+ }
+
+ err = ipc_has_perm(sma, perms);
+ return err;
+}
+
+static int selinux_sem_semop(struct kern_ipc_perm *sma,
+ struct sembuf *sops, unsigned nsops, int alter)
+{
+ u32 perms;
+
+ if (alter)
+ perms = SEM__READ | SEM__WRITE;
+ else
+ perms = SEM__READ;
+
+ return ipc_has_perm(sma, perms);
+}
+
+static int selinux_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
+{
+ u32 av = 0;
+
+ av = 0;
+ if (flag & S_IRUGO)
+ av |= IPC__UNIX_READ;
+ if (flag & S_IWUGO)
+ av |= IPC__UNIX_WRITE;
+
+ if (av == 0)
+ return 0;
+
+ return ipc_has_perm(ipcp, av);
+}
+
+static void selinux_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+{
+ struct ipc_security_struct *isec = selinux_ipc(ipcp);
+ *secid = isec->sid;
+}
+
+static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+ if (inode)
+ inode_doinit_with_dentry(inode, dentry);
+}
+
+static int selinux_getprocattr(struct task_struct *p,
+ const char *name, char **value)
+{
+ const struct task_security_struct *__tsec;
+ u32 sid;
+ int error;
+ unsigned len;
+
+ rcu_read_lock();
+ __tsec = selinux_cred(__task_cred(p));
+
+ if (current != p) {
+ error = avc_has_perm(&selinux_state,
+ current_sid(), __tsec->sid,
+ SECCLASS_PROCESS, PROCESS__GETATTR, NULL);
+ if (error)
+ goto bad;
+ }
+
+ if (!strcmp(name, "current"))
+ sid = __tsec->sid;
+ else if (!strcmp(name, "prev"))
+ sid = __tsec->osid;
+ else if (!strcmp(name, "exec"))
+ sid = __tsec->exec_sid;
+ else if (!strcmp(name, "fscreate"))
+ sid = __tsec->create_sid;
+ else if (!strcmp(name, "keycreate"))
+ sid = __tsec->keycreate_sid;
+ else if (!strcmp(name, "sockcreate"))
+ sid = __tsec->sockcreate_sid;
+ else {
+ error = -EINVAL;
+ goto bad;
+ }
+ rcu_read_unlock();
+
+ if (!sid)
+ return 0;
+
+ error = security_sid_to_context(&selinux_state, sid, value, &len);
+ if (error)
+ return error;
+ return len;
+
+bad:
+ rcu_read_unlock();
+ return error;
+}
+
+static int selinux_setprocattr(const char *name, void *value, size_t size)
+{
+ struct task_security_struct *tsec;
+ struct cred *new;
+ u32 mysid = current_sid(), sid = 0, ptsid;
+ int error;
+ char *str = value;
+
+ /*
+ * Basic control over ability to set these attributes at all.
+ */
+ if (!strcmp(name, "exec"))
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
+ PROCESS__SETEXEC, NULL);
+ else if (!strcmp(name, "fscreate"))
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
+ PROCESS__SETFSCREATE, NULL);
+ else if (!strcmp(name, "keycreate"))
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
+ PROCESS__SETKEYCREATE, NULL);
+ else if (!strcmp(name, "sockcreate"))
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
+ PROCESS__SETSOCKCREATE, NULL);
+ else if (!strcmp(name, "current"))
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
+ PROCESS__SETCURRENT, NULL);
+ else
+ error = -EINVAL;
+ if (error)
+ return error;
+
+ /* Obtain a SID for the context, if one was specified. */
+ if (size && str[0] && str[0] != '\n') {
+ if (str[size-1] == '\n') {
+ str[size-1] = 0;
+ size--;
+ }
+ error = security_context_to_sid(&selinux_state, value, size,
+ &sid, GFP_KERNEL);
+ if (error == -EINVAL && !strcmp(name, "fscreate")) {
+ if (!has_cap_mac_admin(true)) {
+ struct audit_buffer *ab;
+ size_t audit_size;
+
+ /* We strip a nul only if it is at the end, otherwise the
+ * context contains a nul and we should audit that */
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ ab = audit_log_start(audit_context(),
+ GFP_ATOMIC,
+ AUDIT_SELINUX_ERR);
+ if (!ab)
+ return error;
+ audit_log_format(ab, "op=fscreate invalid_context=");
+ audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_end(ab);
+
+ return error;
+ }
+ error = security_context_to_sid_force(
+ &selinux_state,
+ value, size, &sid);
+ }
+ if (error)
+ return error;
+ }
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ /* Permission checking based on the specified context is
+ performed during the actual operation (execve,
+ open/mkdir/...), when we know the full context of the
+ operation. See selinux_bprm_creds_for_exec for the execve
+ checks and may_create for the file creation checks. The
+ operation will then fail if the context is not permitted. */
+ tsec = selinux_cred(new);
+ if (!strcmp(name, "exec")) {
+ tsec->exec_sid = sid;
+ } else if (!strcmp(name, "fscreate")) {
+ tsec->create_sid = sid;
+ } else if (!strcmp(name, "keycreate")) {
+ if (sid) {
+ error = avc_has_perm(&selinux_state, mysid, sid,
+ SECCLASS_KEY, KEY__CREATE, NULL);
+ if (error)
+ goto abort_change;
+ }
+ tsec->keycreate_sid = sid;
+ } else if (!strcmp(name, "sockcreate")) {
+ tsec->sockcreate_sid = sid;
+ } else if (!strcmp(name, "current")) {
+ error = -EINVAL;
+ if (sid == 0)
+ goto abort_change;
+
+ /* Only allow single threaded processes to change context */
+ if (!current_is_single_threaded()) {
+ error = security_bounded_transition(&selinux_state,
+ tsec->sid, sid);
+ if (error)
+ goto abort_change;
+ }
+
+ /* Check permissions for the transition. */
+ error = avc_has_perm(&selinux_state,
+ tsec->sid, sid, SECCLASS_PROCESS,
+ PROCESS__DYNTRANSITION, NULL);
+ if (error)
+ goto abort_change;
+
+ /* Check for ptracing, and update the task SID if ok.
+ Otherwise, leave SID unchanged and fail. */
+ ptsid = ptrace_parent_sid();
+ if (ptsid != 0) {
+ error = avc_has_perm(&selinux_state,
+ ptsid, sid, SECCLASS_PROCESS,
+ PROCESS__PTRACE, NULL);
+ if (error)
+ goto abort_change;
+ }
+
+ tsec->sid = sid;
+ } else {
+ error = -EINVAL;
+ goto abort_change;
+ }
+
+ commit_creds(new);
+ return size;
+
+abort_change:
+ abort_creds(new);
+ return error;
+}
+
+static int selinux_ismaclabel(const char *name)
+{
+ return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0);
+}
+
+static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ return security_sid_to_context(&selinux_state, secid,
+ secdata, seclen);
+}
+
+static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+ return security_context_to_sid(&selinux_state, secdata, seclen,
+ secid, GFP_KERNEL);
+}
+
+static void selinux_release_secctx(char *secdata, u32 seclen)
+{
+ kfree(secdata);
+}
+
+static void selinux_inode_invalidate_secctx(struct inode *inode)
+{
+ struct inode_security_struct *isec = selinux_inode(inode);
+
+ spin_lock(&isec->lock);
+ isec->initialized = LABEL_INVALID;
+ spin_unlock(&isec->lock);
+}
+
+/*
+ * called with inode->i_mutex locked
+ */
+static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
+ ctx, ctxlen, 0);
+ /* Do not return error when suppressing label (SBLABEL_MNT not set). */
+ return rc == -EOPNOTSUPP ? 0 : rc;
+}
+
+/*
+ * called with inode->i_mutex locked
+ */
+static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return __vfs_setxattr_noperm(&init_user_ns, dentry, XATTR_NAME_SELINUX,
+ ctx, ctxlen, 0);
+}
+
+static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ int len = 0;
+ len = selinux_inode_getsecurity(&init_user_ns, inode,
+ XATTR_SELINUX_SUFFIX, ctx, true);
+ if (len < 0)
+ return len;
+ *ctxlen = len;
+ return 0;
+}
+#ifdef CONFIG_KEYS
+
+static int selinux_key_alloc(struct key *k, const struct cred *cred,
+ unsigned long flags)
+{
+ const struct task_security_struct *tsec;
+ struct key_security_struct *ksec;
+
+ ksec = kzalloc(sizeof(struct key_security_struct), GFP_KERNEL);
+ if (!ksec)
+ return -ENOMEM;
+
+ tsec = selinux_cred(cred);
+ if (tsec->keycreate_sid)
+ ksec->sid = tsec->keycreate_sid;
+ else
+ ksec->sid = tsec->sid;
+
+ k->security = ksec;
+ return 0;
+}
+
+static void selinux_key_free(struct key *k)
+{
+ struct key_security_struct *ksec = k->security;
+
+ k->security = NULL;
+ kfree(ksec);
+}
+
+static int selinux_key_permission(key_ref_t key_ref,
+ const struct cred *cred,
+ enum key_need_perm need_perm)
+{
+ struct key *key;
+ struct key_security_struct *ksec;
+ u32 perm, sid;
+
+ switch (need_perm) {
+ case KEY_NEED_VIEW:
+ perm = KEY__VIEW;
+ break;
+ case KEY_NEED_READ:
+ perm = KEY__READ;
+ break;
+ case KEY_NEED_WRITE:
+ perm = KEY__WRITE;
+ break;
+ case KEY_NEED_SEARCH:
+ perm = KEY__SEARCH;
+ break;
+ case KEY_NEED_LINK:
+ perm = KEY__LINK;
+ break;
+ case KEY_NEED_SETATTR:
+ perm = KEY__SETATTR;
+ break;
+ case KEY_NEED_UNLINK:
+ case KEY_SYSADMIN_OVERRIDE:
+ case KEY_AUTHTOKEN_OVERRIDE:
+ case KEY_DEFER_PERM_CHECK:
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EPERM;
+
+ }
+
+ sid = cred_sid(cred);
+ key = key_ref_to_ptr(key_ref);
+ ksec = key->security;
+
+ return avc_has_perm(&selinux_state,
+ sid, ksec->sid, SECCLASS_KEY, perm, NULL);
+}
+
+static int selinux_key_getsecurity(struct key *key, char **_buffer)
+{
+ struct key_security_struct *ksec = key->security;
+ char *context = NULL;
+ unsigned len;
+ int rc;
+
+ rc = security_sid_to_context(&selinux_state, ksec->sid,
+ &context, &len);
+ if (!rc)
+ rc = len;
+ *_buffer = context;
+ return rc;
+}
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+static int selinux_watch_key(struct key *key)
+{
+ struct key_security_struct *ksec = key->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state,
+ sid, ksec->sid, SECCLASS_KEY, KEY__VIEW, NULL);
+}
+#endif
+#endif
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+static int selinux_ib_pkey_access(void *ib_sec, u64 subnet_prefix, u16 pkey_val)
+{
+ struct common_audit_data ad;
+ int err;
+ u32 sid = 0;
+ struct ib_security_struct *sec = ib_sec;
+ struct lsm_ibpkey_audit ibpkey;
+
+ err = sel_ib_pkey_sid(subnet_prefix, pkey_val, &sid);
+ if (err)
+ return err;
+
+ ad.type = LSM_AUDIT_DATA_IBPKEY;
+ ibpkey.subnet_prefix = subnet_prefix;
+ ibpkey.pkey = pkey_val;
+ ad.u.ibpkey = &ibpkey;
+ return avc_has_perm(&selinux_state,
+ sec->sid, sid,
+ SECCLASS_INFINIBAND_PKEY,
+ INFINIBAND_PKEY__ACCESS, &ad);
+}
+
+static int selinux_ib_endport_manage_subnet(void *ib_sec, const char *dev_name,
+ u8 port_num)
+{
+ struct common_audit_data ad;
+ int err;
+ u32 sid = 0;
+ struct ib_security_struct *sec = ib_sec;
+ struct lsm_ibendport_audit ibendport;
+
+ err = security_ib_endport_sid(&selinux_state, dev_name, port_num,
+ &sid);
+
+ if (err)
+ return err;
+
+ ad.type = LSM_AUDIT_DATA_IBENDPORT;
+ ibendport.dev_name = dev_name;
+ ibendport.port = port_num;
+ ad.u.ibendport = &ibendport;
+ return avc_has_perm(&selinux_state,
+ sec->sid, sid,
+ SECCLASS_INFINIBAND_ENDPORT,
+ INFINIBAND_ENDPORT__MANAGE_SUBNET, &ad);
+}
+
+static int selinux_ib_alloc_security(void **ib_sec)
+{
+ struct ib_security_struct *sec;
+
+ sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+ sec->sid = current_sid();
+
+ *ib_sec = sec;
+ return 0;
+}
+
+static void selinux_ib_free_security(void *ib_sec)
+{
+ kfree(ib_sec);
+}
+#endif
+
+#ifdef CONFIG_BPF_SYSCALL
+static int selinux_bpf(int cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ u32 sid = current_sid();
+ int ret;
+
+ switch (cmd) {
+ case BPF_MAP_CREATE:
+ ret = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_BPF, BPF__MAP_CREATE,
+ NULL);
+ break;
+ case BPF_PROG_LOAD:
+ ret = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_BPF, BPF__PROG_LOAD,
+ NULL);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static u32 bpf_map_fmode_to_av(fmode_t fmode)
+{
+ u32 av = 0;
+
+ if (fmode & FMODE_READ)
+ av |= BPF__MAP_READ;
+ if (fmode & FMODE_WRITE)
+ av |= BPF__MAP_WRITE;
+ return av;
+}
+
+/* This function will check the file pass through unix socket or binder to see
+ * if it is a bpf related object. And apply corresponding checks on the bpf
+ * object based on the type. The bpf maps and programs, not like other files and
+ * socket, are using a shared anonymous inode inside the kernel as their inode.
+ * So checking that inode cannot identify if the process have privilege to
+ * access the bpf object and that's why we have to add this additional check in
+ * selinux_file_receive and selinux_binder_transfer_files.
+ */
+static int bpf_fd_pass(struct file *file, u32 sid)
+{
+ struct bpf_security_struct *bpfsec;
+ struct bpf_prog *prog;
+ struct bpf_map *map;
+ int ret;
+
+ if (file->f_op == &bpf_map_fops) {
+ map = file->private_data;
+ bpfsec = map->security;
+ ret = avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
+ bpf_map_fmode_to_av(file->f_mode), NULL);
+ if (ret)
+ return ret;
+ } else if (file->f_op == &bpf_prog_fops) {
+ prog = file->private_data;
+ bpfsec = prog->aux->security;
+ ret = avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
+ BPF__PROG_RUN, NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode)
+{
+ u32 sid = current_sid();
+ struct bpf_security_struct *bpfsec;
+
+ bpfsec = map->security;
+ return avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
+ bpf_map_fmode_to_av(fmode), NULL);
+}
+
+static int selinux_bpf_prog(struct bpf_prog *prog)
+{
+ u32 sid = current_sid();
+ struct bpf_security_struct *bpfsec;
+
+ bpfsec = prog->aux->security;
+ return avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
+ BPF__PROG_RUN, NULL);
+}
+
+static int selinux_bpf_map_alloc(struct bpf_map *map)
+{
+ struct bpf_security_struct *bpfsec;
+
+ bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
+ if (!bpfsec)
+ return -ENOMEM;
+
+ bpfsec->sid = current_sid();
+ map->security = bpfsec;
+
+ return 0;
+}
+
+static void selinux_bpf_map_free(struct bpf_map *map)
+{
+ struct bpf_security_struct *bpfsec = map->security;
+
+ map->security = NULL;
+ kfree(bpfsec);
+}
+
+static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
+{
+ struct bpf_security_struct *bpfsec;
+
+ bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
+ if (!bpfsec)
+ return -ENOMEM;
+
+ bpfsec->sid = current_sid();
+ aux->security = bpfsec;
+
+ return 0;
+}
+
+static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
+{
+ struct bpf_security_struct *bpfsec = aux->security;
+
+ aux->security = NULL;
+ kfree(bpfsec);
+}
+#endif
+
+struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct task_security_struct),
+ .lbs_file = sizeof(struct file_security_struct),
+ .lbs_inode = sizeof(struct inode_security_struct),
+ .lbs_ipc = sizeof(struct ipc_security_struct),
+ .lbs_msg_msg = sizeof(struct msg_security_struct),
+ .lbs_superblock = sizeof(struct superblock_security_struct),
+};
+
+#ifdef CONFIG_PERF_EVENTS
+static int selinux_perf_event_open(struct perf_event_attr *attr, int type)
+{
+ u32 requested, sid = current_sid();
+
+ if (type == PERF_SECURITY_OPEN)
+ requested = PERF_EVENT__OPEN;
+ else if (type == PERF_SECURITY_CPU)
+ requested = PERF_EVENT__CPU;
+ else if (type == PERF_SECURITY_KERNEL)
+ requested = PERF_EVENT__KERNEL;
+ else if (type == PERF_SECURITY_TRACEPOINT)
+ requested = PERF_EVENT__TRACEPOINT;
+ else
+ return -EINVAL;
+
+ return avc_has_perm(&selinux_state, sid, sid, SECCLASS_PERF_EVENT,
+ requested, NULL);
+}
+
+static int selinux_perf_event_alloc(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec;
+
+ perfsec = kzalloc(sizeof(*perfsec), GFP_KERNEL);
+ if (!perfsec)
+ return -ENOMEM;
+
+ perfsec->sid = current_sid();
+ event->security = perfsec;
+
+ return 0;
+}
+
+static void selinux_perf_event_free(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+
+ event->security = NULL;
+ kfree(perfsec);
+}
+
+static int selinux_perf_event_read(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
+ SECCLASS_PERF_EVENT, PERF_EVENT__READ, NULL);
+}
+
+static int selinux_perf_event_write(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
+ SECCLASS_PERF_EVENT, PERF_EVENT__WRITE, NULL);
+}
+#endif
+
+#ifdef CONFIG_IO_URING
+/**
+ * selinux_uring_override_creds - check the requested cred override
+ * @new: the target creds
+ *
+ * Check to see if the current task is allowed to override it's credentials
+ * to service an io_uring operation.
+ */
+static int selinux_uring_override_creds(const struct cred *new)
+{
+ return avc_has_perm(&selinux_state, current_sid(), cred_sid(new),
+ SECCLASS_IO_URING, IO_URING__OVERRIDE_CREDS, NULL);
+}
+
+/**
+ * selinux_uring_sqpoll - check if a io_uring polling thread can be created
+ *
+ * Check to see if the current task is allowed to create a new io_uring
+ * kernel polling thread.
+ */
+static int selinux_uring_sqpoll(void)
+{
+ int sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, sid,
+ SECCLASS_IO_URING, IO_URING__SQPOLL, NULL);
+}
+
+/**
+ * selinux_uring_cmd - check if IORING_OP_URING_CMD is allowed
+ * @ioucmd: the io_uring command structure
+ *
+ * Check to see if the current domain is allowed to execute an
+ * IORING_OP_URING_CMD against the device/file specified in @ioucmd.
+ *
+ */
+static int selinux_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ struct file *file = ioucmd->file;
+ struct inode *inode = file_inode(file);
+ struct inode_security_struct *isec = selinux_inode(inode);
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+
+ return avc_has_perm(&selinux_state, current_sid(), isec->sid,
+ SECCLASS_IO_URING, IO_URING__CMD, &ad);
+}
+#endif /* CONFIG_IO_URING */
+
+/*
+ * IMPORTANT NOTE: When adding new hooks, please be careful to keep this order:
+ * 1. any hooks that don't belong to (2.) or (3.) below,
+ * 2. hooks that both access structures allocated by other hooks, and allocate
+ * structures that can be later accessed by other hooks (mostly "cloning"
+ * hooks),
+ * 3. hooks that only allocate structures that can be later accessed by other
+ * hooks ("allocating" hooks).
+ *
+ * Please follow block comment delimiters in the list to keep this order.
+ *
+ * This ordering is needed for SELinux runtime disable to work at least somewhat
+ * safely. Breaking the ordering rules above might lead to NULL pointer derefs
+ * when disabling SELinux at runtime.
+ */
+static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
+ LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
+ LSM_HOOK_INIT(binder_transfer_binder, selinux_binder_transfer_binder),
+ LSM_HOOK_INIT(binder_transfer_file, selinux_binder_transfer_file),
+
+ LSM_HOOK_INIT(ptrace_access_check, selinux_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, selinux_ptrace_traceme),
+ LSM_HOOK_INIT(capget, selinux_capget),
+ LSM_HOOK_INIT(capset, selinux_capset),
+ LSM_HOOK_INIT(capable, selinux_capable),
+ LSM_HOOK_INIT(quotactl, selinux_quotactl),
+ LSM_HOOK_INIT(quota_on, selinux_quota_on),
+ LSM_HOOK_INIT(syslog, selinux_syslog),
+ LSM_HOOK_INIT(vm_enough_memory, selinux_vm_enough_memory),
+
+ LSM_HOOK_INIT(netlink_send, selinux_netlink_send),
+
+ LSM_HOOK_INIT(bprm_creds_for_exec, selinux_bprm_creds_for_exec),
+ LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
+ LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
+
+ LSM_HOOK_INIT(sb_free_mnt_opts, selinux_free_mnt_opts),
+ LSM_HOOK_INIT(sb_mnt_opts_compat, selinux_sb_mnt_opts_compat),
+ LSM_HOOK_INIT(sb_remount, selinux_sb_remount),
+ LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount),
+ LSM_HOOK_INIT(sb_show_options, selinux_sb_show_options),
+ LSM_HOOK_INIT(sb_statfs, selinux_sb_statfs),
+ LSM_HOOK_INIT(sb_mount, selinux_mount),
+ LSM_HOOK_INIT(sb_umount, selinux_umount),
+ LSM_HOOK_INIT(sb_set_mnt_opts, selinux_set_mnt_opts),
+ LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
+
+ LSM_HOOK_INIT(move_mount, selinux_move_mount),
+
+ LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
+ LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
+
+ LSM_HOOK_INIT(inode_free_security, selinux_inode_free_security),
+ LSM_HOOK_INIT(inode_init_security, selinux_inode_init_security),
+ LSM_HOOK_INIT(inode_init_security_anon, selinux_inode_init_security_anon),
+ LSM_HOOK_INIT(inode_create, selinux_inode_create),
+ LSM_HOOK_INIT(inode_link, selinux_inode_link),
+ LSM_HOOK_INIT(inode_unlink, selinux_inode_unlink),
+ LSM_HOOK_INIT(inode_symlink, selinux_inode_symlink),
+ LSM_HOOK_INIT(inode_mkdir, selinux_inode_mkdir),
+ LSM_HOOK_INIT(inode_rmdir, selinux_inode_rmdir),
+ LSM_HOOK_INIT(inode_mknod, selinux_inode_mknod),
+ LSM_HOOK_INIT(inode_rename, selinux_inode_rename),
+ LSM_HOOK_INIT(inode_readlink, selinux_inode_readlink),
+ LSM_HOOK_INIT(inode_follow_link, selinux_inode_follow_link),
+ LSM_HOOK_INIT(inode_permission, selinux_inode_permission),
+ LSM_HOOK_INIT(inode_setattr, selinux_inode_setattr),
+ LSM_HOOK_INIT(inode_getattr, selinux_inode_getattr),
+ LSM_HOOK_INIT(inode_setxattr, selinux_inode_setxattr),
+ LSM_HOOK_INIT(inode_post_setxattr, selinux_inode_post_setxattr),
+ LSM_HOOK_INIT(inode_getxattr, selinux_inode_getxattr),
+ LSM_HOOK_INIT(inode_listxattr, selinux_inode_listxattr),
+ LSM_HOOK_INIT(inode_removexattr, selinux_inode_removexattr),
+ LSM_HOOK_INIT(inode_getsecurity, selinux_inode_getsecurity),
+ LSM_HOOK_INIT(inode_setsecurity, selinux_inode_setsecurity),
+ LSM_HOOK_INIT(inode_listsecurity, selinux_inode_listsecurity),
+ LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid),
+ LSM_HOOK_INIT(inode_copy_up, selinux_inode_copy_up),
+ LSM_HOOK_INIT(inode_copy_up_xattr, selinux_inode_copy_up_xattr),
+ LSM_HOOK_INIT(path_notify, selinux_path_notify),
+
+ LSM_HOOK_INIT(kernfs_init_security, selinux_kernfs_init_security),
+
+ LSM_HOOK_INIT(file_permission, selinux_file_permission),
+ LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
+ LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
+ LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
+ LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
+ LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
+ LSM_HOOK_INIT(file_lock, selinux_file_lock),
+ LSM_HOOK_INIT(file_fcntl, selinux_file_fcntl),
+ LSM_HOOK_INIT(file_set_fowner, selinux_file_set_fowner),
+ LSM_HOOK_INIT(file_send_sigiotask, selinux_file_send_sigiotask),
+ LSM_HOOK_INIT(file_receive, selinux_file_receive),
+
+ LSM_HOOK_INIT(file_open, selinux_file_open),
+
+ LSM_HOOK_INIT(task_alloc, selinux_task_alloc),
+ LSM_HOOK_INIT(cred_prepare, selinux_cred_prepare),
+ LSM_HOOK_INIT(cred_transfer, selinux_cred_transfer),
+ LSM_HOOK_INIT(cred_getsecid, selinux_cred_getsecid),
+ LSM_HOOK_INIT(kernel_act_as, selinux_kernel_act_as),
+ LSM_HOOK_INIT(kernel_create_files_as, selinux_kernel_create_files_as),
+ LSM_HOOK_INIT(kernel_module_request, selinux_kernel_module_request),
+ LSM_HOOK_INIT(kernel_load_data, selinux_kernel_load_data),
+ LSM_HOOK_INIT(kernel_read_file, selinux_kernel_read_file),
+ LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid),
+ LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid),
+ LSM_HOOK_INIT(task_getsid, selinux_task_getsid),
+ LSM_HOOK_INIT(current_getsecid_subj, selinux_current_getsecid_subj),
+ LSM_HOOK_INIT(task_getsecid_obj, selinux_task_getsecid_obj),
+ LSM_HOOK_INIT(task_setnice, selinux_task_setnice),
+ LSM_HOOK_INIT(task_setioprio, selinux_task_setioprio),
+ LSM_HOOK_INIT(task_getioprio, selinux_task_getioprio),
+ LSM_HOOK_INIT(task_prlimit, selinux_task_prlimit),
+ LSM_HOOK_INIT(task_setrlimit, selinux_task_setrlimit),
+ LSM_HOOK_INIT(task_setscheduler, selinux_task_setscheduler),
+ LSM_HOOK_INIT(task_getscheduler, selinux_task_getscheduler),
+ LSM_HOOK_INIT(task_movememory, selinux_task_movememory),
+ LSM_HOOK_INIT(task_kill, selinux_task_kill),
+ LSM_HOOK_INIT(task_to_inode, selinux_task_to_inode),
+ LSM_HOOK_INIT(userns_create, selinux_userns_create),
+
+ LSM_HOOK_INIT(ipc_permission, selinux_ipc_permission),
+ LSM_HOOK_INIT(ipc_getsecid, selinux_ipc_getsecid),
+
+ LSM_HOOK_INIT(msg_queue_associate, selinux_msg_queue_associate),
+ LSM_HOOK_INIT(msg_queue_msgctl, selinux_msg_queue_msgctl),
+ LSM_HOOK_INIT(msg_queue_msgsnd, selinux_msg_queue_msgsnd),
+ LSM_HOOK_INIT(msg_queue_msgrcv, selinux_msg_queue_msgrcv),
+
+ LSM_HOOK_INIT(shm_associate, selinux_shm_associate),
+ LSM_HOOK_INIT(shm_shmctl, selinux_shm_shmctl),
+ LSM_HOOK_INIT(shm_shmat, selinux_shm_shmat),
+
+ LSM_HOOK_INIT(sem_associate, selinux_sem_associate),
+ LSM_HOOK_INIT(sem_semctl, selinux_sem_semctl),
+ LSM_HOOK_INIT(sem_semop, selinux_sem_semop),
+
+ LSM_HOOK_INIT(d_instantiate, selinux_d_instantiate),
+
+ LSM_HOOK_INIT(getprocattr, selinux_getprocattr),
+ LSM_HOOK_INIT(setprocattr, selinux_setprocattr),
+
+ LSM_HOOK_INIT(ismaclabel, selinux_ismaclabel),
+ LSM_HOOK_INIT(secctx_to_secid, selinux_secctx_to_secid),
+ LSM_HOOK_INIT(release_secctx, selinux_release_secctx),
+ LSM_HOOK_INIT(inode_invalidate_secctx, selinux_inode_invalidate_secctx),
+ LSM_HOOK_INIT(inode_notifysecctx, selinux_inode_notifysecctx),
+ LSM_HOOK_INIT(inode_setsecctx, selinux_inode_setsecctx),
+
+ LSM_HOOK_INIT(unix_stream_connect, selinux_socket_unix_stream_connect),
+ LSM_HOOK_INIT(unix_may_send, selinux_socket_unix_may_send),
+
+ LSM_HOOK_INIT(socket_create, selinux_socket_create),
+ LSM_HOOK_INIT(socket_post_create, selinux_socket_post_create),
+ LSM_HOOK_INIT(socket_socketpair, selinux_socket_socketpair),
+ LSM_HOOK_INIT(socket_bind, selinux_socket_bind),
+ LSM_HOOK_INIT(socket_connect, selinux_socket_connect),
+ LSM_HOOK_INIT(socket_listen, selinux_socket_listen),
+ LSM_HOOK_INIT(socket_accept, selinux_socket_accept),
+ LSM_HOOK_INIT(socket_sendmsg, selinux_socket_sendmsg),
+ LSM_HOOK_INIT(socket_recvmsg, selinux_socket_recvmsg),
+ LSM_HOOK_INIT(socket_getsockname, selinux_socket_getsockname),
+ LSM_HOOK_INIT(socket_getpeername, selinux_socket_getpeername),
+ LSM_HOOK_INIT(socket_getsockopt, selinux_socket_getsockopt),
+ LSM_HOOK_INIT(socket_setsockopt, selinux_socket_setsockopt),
+ LSM_HOOK_INIT(socket_shutdown, selinux_socket_shutdown),
+ LSM_HOOK_INIT(socket_sock_rcv_skb, selinux_socket_sock_rcv_skb),
+ LSM_HOOK_INIT(socket_getpeersec_stream,
+ selinux_socket_getpeersec_stream),
+ LSM_HOOK_INIT(socket_getpeersec_dgram, selinux_socket_getpeersec_dgram),
+ LSM_HOOK_INIT(sk_free_security, selinux_sk_free_security),
+ LSM_HOOK_INIT(sk_clone_security, selinux_sk_clone_security),
+ LSM_HOOK_INIT(sk_getsecid, selinux_sk_getsecid),
+ LSM_HOOK_INIT(sock_graft, selinux_sock_graft),
+ LSM_HOOK_INIT(sctp_assoc_request, selinux_sctp_assoc_request),
+ LSM_HOOK_INIT(sctp_sk_clone, selinux_sctp_sk_clone),
+ LSM_HOOK_INIT(sctp_bind_connect, selinux_sctp_bind_connect),
+ LSM_HOOK_INIT(sctp_assoc_established, selinux_sctp_assoc_established),
+ LSM_HOOK_INIT(inet_conn_request, selinux_inet_conn_request),
+ LSM_HOOK_INIT(inet_csk_clone, selinux_inet_csk_clone),
+ LSM_HOOK_INIT(inet_conn_established, selinux_inet_conn_established),
+ LSM_HOOK_INIT(secmark_relabel_packet, selinux_secmark_relabel_packet),
+ LSM_HOOK_INIT(secmark_refcount_inc, selinux_secmark_refcount_inc),
+ LSM_HOOK_INIT(secmark_refcount_dec, selinux_secmark_refcount_dec),
+ LSM_HOOK_INIT(req_classify_flow, selinux_req_classify_flow),
+ LSM_HOOK_INIT(tun_dev_free_security, selinux_tun_dev_free_security),
+ LSM_HOOK_INIT(tun_dev_create, selinux_tun_dev_create),
+ LSM_HOOK_INIT(tun_dev_attach_queue, selinux_tun_dev_attach_queue),
+ LSM_HOOK_INIT(tun_dev_attach, selinux_tun_dev_attach),
+ LSM_HOOK_INIT(tun_dev_open, selinux_tun_dev_open),
+#ifdef CONFIG_SECURITY_INFINIBAND
+ LSM_HOOK_INIT(ib_pkey_access, selinux_ib_pkey_access),
+ LSM_HOOK_INIT(ib_endport_manage_subnet,
+ selinux_ib_endport_manage_subnet),
+ LSM_HOOK_INIT(ib_free_security, selinux_ib_free_security),
+#endif
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ LSM_HOOK_INIT(xfrm_policy_free_security, selinux_xfrm_policy_free),
+ LSM_HOOK_INIT(xfrm_policy_delete_security, selinux_xfrm_policy_delete),
+ LSM_HOOK_INIT(xfrm_state_free_security, selinux_xfrm_state_free),
+ LSM_HOOK_INIT(xfrm_state_delete_security, selinux_xfrm_state_delete),
+ LSM_HOOK_INIT(xfrm_policy_lookup, selinux_xfrm_policy_lookup),
+ LSM_HOOK_INIT(xfrm_state_pol_flow_match,
+ selinux_xfrm_state_pol_flow_match),
+ LSM_HOOK_INIT(xfrm_decode_session, selinux_xfrm_decode_session),
+#endif
+
+#ifdef CONFIG_KEYS
+ LSM_HOOK_INIT(key_free, selinux_key_free),
+ LSM_HOOK_INIT(key_permission, selinux_key_permission),
+ LSM_HOOK_INIT(key_getsecurity, selinux_key_getsecurity),
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ LSM_HOOK_INIT(watch_key, selinux_watch_key),
+#endif
+#endif
+
+#ifdef CONFIG_AUDIT
+ LSM_HOOK_INIT(audit_rule_known, selinux_audit_rule_known),
+ LSM_HOOK_INIT(audit_rule_match, selinux_audit_rule_match),
+ LSM_HOOK_INIT(audit_rule_free, selinux_audit_rule_free),
+#endif
+
+#ifdef CONFIG_BPF_SYSCALL
+ LSM_HOOK_INIT(bpf, selinux_bpf),
+ LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
+ LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
+ LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
+ LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+ LSM_HOOK_INIT(perf_event_open, selinux_perf_event_open),
+ LSM_HOOK_INIT(perf_event_free, selinux_perf_event_free),
+ LSM_HOOK_INIT(perf_event_read, selinux_perf_event_read),
+ LSM_HOOK_INIT(perf_event_write, selinux_perf_event_write),
+#endif
+
+#ifdef CONFIG_IO_URING
+ LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds),
+ LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll),
+ LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd),
+#endif
+
+ /*
+ * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
+ */
+ LSM_HOOK_INIT(fs_context_submount, selinux_fs_context_submount),
+ LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
+ LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
+ LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
+#endif
+
+ /*
+ * PUT "ALLOCATING" HOOKS HERE
+ */
+ LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
+ LSM_HOOK_INIT(msg_queue_alloc_security,
+ selinux_msg_queue_alloc_security),
+ LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
+ LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
+ LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
+ LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
+ LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
+ LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),
+ LSM_HOOK_INIT(sk_alloc_security, selinux_sk_alloc_security),
+ LSM_HOOK_INIT(tun_dev_alloc_security, selinux_tun_dev_alloc_security),
+#ifdef CONFIG_SECURITY_INFINIBAND
+ LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security),
+#endif
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
+ LSM_HOOK_INIT(xfrm_state_alloc, selinux_xfrm_state_alloc),
+ LSM_HOOK_INIT(xfrm_state_alloc_acquire,
+ selinux_xfrm_state_alloc_acquire),
+#endif
+#ifdef CONFIG_KEYS
+ LSM_HOOK_INIT(key_alloc, selinux_key_alloc),
+#endif
+#ifdef CONFIG_AUDIT
+ LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
+#endif
+#ifdef CONFIG_BPF_SYSCALL
+ LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
+ LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
+#endif
+};
+
+static __init int selinux_init(void)
+{
+ pr_info("SELinux: Initializing.\n");
+
+ memset(&selinux_state, 0, sizeof(selinux_state));
+ enforcing_set(&selinux_state, selinux_enforcing_boot);
+ if (CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE)
+ pr_err("SELinux: CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE is non-zero. This is deprecated and will be rejected in a future kernel release.\n");
+ checkreqprot_set(&selinux_state, selinux_checkreqprot_boot);
+ selinux_avc_init(&selinux_state.avc);
+ mutex_init(&selinux_state.status_lock);
+ mutex_init(&selinux_state.policy_mutex);
+
+ /* Set the security state for the initial task. */
+ cred_init_security();
+
+ default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
+
+ avc_init();
+
+ avtab_cache_init();
+
+ ebitmap_cache_init();
+
+ hashtab_cache_init();
+
+ security_add_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks), "selinux");
+
+ if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
+ panic("SELinux: Unable to register AVC netcache callback\n");
+
+ if (avc_add_callback(selinux_lsm_notifier_avc_callback, AVC_CALLBACK_RESET))
+ panic("SELinux: Unable to register AVC LSM notifier callback\n");
+
+ if (selinux_enforcing_boot)
+ pr_debug("SELinux: Starting in enforcing mode\n");
+ else
+ pr_debug("SELinux: Starting in permissive mode\n");
+
+ fs_validate_description("selinux", selinux_fs_parameters);
+
+ return 0;
+}
+
+static void delayed_superblock_init(struct super_block *sb, void *unused)
+{
+ selinux_set_mnt_opts(sb, NULL, 0, NULL);
+}
+
+void selinux_complete_init(void)
+{
+ pr_debug("SELinux: Completing initialization.\n");
+
+ /* Set up any superblocks initialized prior to the policy load. */
+ pr_debug("SELinux: Setting up existing superblocks.\n");
+ iterate_supers(delayed_superblock_init, NULL);
+}
+
+/* SELinux requires early initialization in order to label
+ all processes and objects when they are created. */
+DEFINE_LSM(selinux) = {
+ .name = "selinux",
+ .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
+ .enabled = &selinux_enabled_boot,
+ .blobs = &selinux_blob_sizes,
+ .init = selinux_init,
+};
+
+#if defined(CONFIG_NETFILTER)
+
+static const struct nf_hook_ops selinux_nf_ops[] = {
+ {
+ .hook = selinux_ip_postroute,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_SELINUX_LAST,
+ },
+ {
+ .hook = selinux_ip_forward,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP_PRI_SELINUX_FIRST,
+ },
+ {
+ .hook = selinux_ip_output,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_SELINUX_FIRST,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .hook = selinux_ip_postroute,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_SELINUX_LAST,
+ },
+ {
+ .hook = selinux_ip_forward,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP6_PRI_SELINUX_FIRST,
+ },
+ {
+ .hook = selinux_ip_output,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_SELINUX_FIRST,
+ },
+#endif /* IPV6 */
+};
+
+static int __net_init selinux_nf_register(struct net *net)
+{
+ return nf_register_net_hooks(net, selinux_nf_ops,
+ ARRAY_SIZE(selinux_nf_ops));
+}
+
+static void __net_exit selinux_nf_unregister(struct net *net)
+{
+ nf_unregister_net_hooks(net, selinux_nf_ops,
+ ARRAY_SIZE(selinux_nf_ops));
+}
+
+static struct pernet_operations selinux_net_ops = {
+ .init = selinux_nf_register,
+ .exit = selinux_nf_unregister,
+};
+
+static int __init selinux_nf_ip_init(void)
+{
+ int err;
+
+ if (!selinux_enabled_boot)
+ return 0;
+
+ pr_debug("SELinux: Registering netfilter hooks\n");
+
+ err = register_pernet_subsys(&selinux_net_ops);
+ if (err)
+ panic("SELinux: register_pernet_subsys: error %d\n", err);
+
+ return 0;
+}
+__initcall(selinux_nf_ip_init);
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static void selinux_nf_ip_exit(void)
+{
+ pr_debug("SELinux: Unregistering netfilter hooks\n");
+
+ unregister_pernet_subsys(&selinux_net_ops);
+}
+#endif
+
+#else /* CONFIG_NETFILTER */
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+#define selinux_nf_ip_exit()
+#endif
+
+#endif /* CONFIG_NETFILTER */
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+int selinux_disable(struct selinux_state *state)
+{
+ if (selinux_initialized(state)) {
+ /* Not permitted after initial policy load. */
+ return -EINVAL;
+ }
+
+ if (selinux_disabled(state)) {
+ /* Only do this once. */
+ return -EINVAL;
+ }
+
+ selinux_mark_disabled(state);
+
+ pr_info("SELinux: Disabled at runtime.\n");
+
+ /*
+ * Unregister netfilter hooks.
+ * Must be done before security_delete_hooks() to avoid breaking
+ * runtime disable.
+ */
+ selinux_nf_ip_exit();
+
+ security_delete_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
+
+ /* Try to destroy the avc node cache */
+ avc_disable();
+
+ /* Unregister selinuxfs. */
+ exit_sel_fs();
+
+ return 0;
+}
+#endif
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
new file mode 100644
index 000000000..5839ca7bb
--- /dev/null
+++ b/security/selinux/ibpkey.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Pkey table
+ *
+ * SELinux must keep a mapping of Infinband PKEYs to labels/SIDs. This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * This code is heavily based on the "netif" and "netport" concept originally
+ * developed by
+ * James Morris <jmorris@redhat.com> and
+ * Paul Moore <paul@paul-moore.com>
+ * (see security/selinux/netif.c and security/selinux/netport.c for more
+ * information)
+ */
+
+/*
+ * (c) Mellanox Technologies, 2016
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "ibpkey.h"
+#include "objsec.h"
+
+#define SEL_PKEY_HASH_SIZE 256
+#define SEL_PKEY_HASH_BKT_LIMIT 16
+
+struct sel_ib_pkey_bkt {
+ int size;
+ struct list_head list;
+};
+
+struct sel_ib_pkey {
+ struct pkey_security_struct psec;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+static DEFINE_SPINLOCK(sel_ib_pkey_lock);
+static struct sel_ib_pkey_bkt sel_ib_pkey_hash[SEL_PKEY_HASH_SIZE];
+
+/**
+ * sel_ib_pkey_hashfn - Hashing function for the pkey table
+ * @pkey: pkey number
+ *
+ * Description:
+ * This is the hashing function for the pkey table, it returns the bucket
+ * number for the given pkey.
+ *
+ */
+static unsigned int sel_ib_pkey_hashfn(u16 pkey)
+{
+ return (pkey & (SEL_PKEY_HASH_SIZE - 1));
+}
+
+/**
+ * sel_ib_pkey_find - Search for a pkey record
+ * @subnet_prefix: subnet_prefix
+ * @pkey_num: pkey_num
+ *
+ * Description:
+ * Search the pkey table and return the matching record. If an entry
+ * can not be found in the table return NULL.
+ *
+ */
+static struct sel_ib_pkey *sel_ib_pkey_find(u64 subnet_prefix, u16 pkey_num)
+{
+ unsigned int idx;
+ struct sel_ib_pkey *pkey;
+
+ idx = sel_ib_pkey_hashfn(pkey_num);
+ list_for_each_entry_rcu(pkey, &sel_ib_pkey_hash[idx].list, list) {
+ if (pkey->psec.pkey == pkey_num &&
+ pkey->psec.subnet_prefix == subnet_prefix)
+ return pkey;
+ }
+
+ return NULL;
+}
+
+/**
+ * sel_ib_pkey_insert - Insert a new pkey into the table
+ * @pkey: the new pkey record
+ *
+ * Description:
+ * Add a new pkey record to the hash table.
+ *
+ */
+static void sel_ib_pkey_insert(struct sel_ib_pkey *pkey)
+{
+ unsigned int idx;
+
+ /* we need to impose a limit on the growth of the hash table so check
+ * this bucket to make sure it is within the specified bounds
+ */
+ idx = sel_ib_pkey_hashfn(pkey->psec.pkey);
+ list_add_rcu(&pkey->list, &sel_ib_pkey_hash[idx].list);
+ if (sel_ib_pkey_hash[idx].size == SEL_PKEY_HASH_BKT_LIMIT) {
+ struct sel_ib_pkey *tail;
+
+ tail = list_entry(
+ rcu_dereference_protected(
+ list_tail_rcu(&sel_ib_pkey_hash[idx].list),
+ lockdep_is_held(&sel_ib_pkey_lock)),
+ struct sel_ib_pkey, list);
+ list_del_rcu(&tail->list);
+ kfree_rcu(tail, rcu);
+ } else {
+ sel_ib_pkey_hash[idx].size++;
+ }
+}
+
+/**
+ * sel_ib_pkey_sid_slow - Lookup the SID of a pkey using the policy
+ * @subnet_prefix: subnet prefix
+ * @pkey_num: pkey number
+ * @sid: pkey SID
+ *
+ * Description:
+ * This function determines the SID of a pkey by querying the security
+ * policy. The result is added to the pkey table to speedup future
+ * queries. Returns zero on success, negative values on failure.
+ *
+ */
+static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
+{
+ int ret;
+ struct sel_ib_pkey *pkey;
+ struct sel_ib_pkey *new = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sel_ib_pkey_lock, flags);
+ pkey = sel_ib_pkey_find(subnet_prefix, pkey_num);
+ if (pkey) {
+ *sid = pkey->psec.sid;
+ spin_unlock_irqrestore(&sel_ib_pkey_lock, flags);
+ return 0;
+ }
+
+ ret = security_ib_pkey_sid(&selinux_state, subnet_prefix, pkey_num,
+ sid);
+ if (ret)
+ goto out;
+
+ /* If this memory allocation fails still return 0. The SID
+ * is valid, it just won't be added to the cache.
+ */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new->psec.subnet_prefix = subnet_prefix;
+ new->psec.pkey = pkey_num;
+ new->psec.sid = *sid;
+ sel_ib_pkey_insert(new);
+
+out:
+ spin_unlock_irqrestore(&sel_ib_pkey_lock, flags);
+ return ret;
+}
+
+/**
+ * sel_ib_pkey_sid - Lookup the SID of a PKEY
+ * @subnet_prefix: subnet_prefix
+ * @pkey_num: pkey number
+ * @sid: pkey SID
+ *
+ * Description:
+ * This function determines the SID of a PKEY using the fastest method
+ * possible. First the pkey table is queried, but if an entry can't be found
+ * then the policy is queried and the result is added to the table to speedup
+ * future queries. Returns zero on success, negative values on failure.
+ *
+ */
+int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *sid)
+{
+ struct sel_ib_pkey *pkey;
+
+ rcu_read_lock();
+ pkey = sel_ib_pkey_find(subnet_prefix, pkey_num);
+ if (pkey) {
+ *sid = pkey->psec.sid;
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ return sel_ib_pkey_sid_slow(subnet_prefix, pkey_num, sid);
+}
+
+/**
+ * sel_ib_pkey_flush - Flush the entire pkey table
+ *
+ * Description:
+ * Remove all entries from the pkey table
+ *
+ */
+void sel_ib_pkey_flush(void)
+{
+ unsigned int idx;
+ struct sel_ib_pkey *pkey, *pkey_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sel_ib_pkey_lock, flags);
+ for (idx = 0; idx < SEL_PKEY_HASH_SIZE; idx++) {
+ list_for_each_entry_safe(pkey, pkey_tmp,
+ &sel_ib_pkey_hash[idx].list, list) {
+ list_del_rcu(&pkey->list);
+ kfree_rcu(pkey, rcu);
+ }
+ sel_ib_pkey_hash[idx].size = 0;
+ }
+ spin_unlock_irqrestore(&sel_ib_pkey_lock, flags);
+}
+
+static __init int sel_ib_pkey_init(void)
+{
+ int iter;
+
+ if (!selinux_enabled_boot)
+ return 0;
+
+ for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) {
+ INIT_LIST_HEAD(&sel_ib_pkey_hash[iter].list);
+ sel_ib_pkey_hash[iter].size = 0;
+ }
+
+ return 0;
+}
+
+subsys_initcall(sel_ib_pkey_init);
diff --git a/security/selinux/ima.c b/security/selinux/ima.c
new file mode 100644
index 000000000..a915b89d5
--- /dev/null
+++ b/security/selinux/ima.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2021 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * Measure critical data structures maintainted by SELinux
+ * using IMA subsystem.
+ */
+#include <linux/vmalloc.h>
+#include <linux/ima.h>
+#include "security.h"
+#include "ima.h"
+
+/*
+ * selinux_ima_collect_state - Read selinux configuration settings
+ *
+ * @state: selinux_state
+ *
+ * On success returns the configuration settings string.
+ * On error, returns NULL.
+ */
+static char *selinux_ima_collect_state(struct selinux_state *state)
+{
+ const char *on = "=1;", *off = "=0;";
+ char *buf;
+ int buf_len, len, i, rc;
+
+ buf_len = strlen("initialized=0;enforcing=0;checkreqprot=0;") + 1;
+
+ len = strlen(on);
+ for (i = 0; i < __POLICYDB_CAP_MAX; i++)
+ buf_len += strlen(selinux_policycap_names[i]) + len;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ rc = strscpy(buf, "initialized", buf_len);
+ WARN_ON(rc < 0);
+
+ rc = strlcat(buf, selinux_initialized(state) ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, "enforcing", buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, enforcing_enabled(state) ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, "checkreqprot", buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, checkreqprot_get(state) ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+
+ for (i = 0; i < __POLICYDB_CAP_MAX; i++) {
+ rc = strlcat(buf, selinux_policycap_names[i], buf_len);
+ WARN_ON(rc >= buf_len);
+
+ rc = strlcat(buf, state->policycap[i] ? on : off, buf_len);
+ WARN_ON(rc >= buf_len);
+ }
+
+ return buf;
+}
+
+/*
+ * selinux_ima_measure_state_locked - Measure SELinux state and hash of policy
+ *
+ * @state: selinux state struct
+ */
+void selinux_ima_measure_state_locked(struct selinux_state *state)
+{
+ char *state_str = NULL;
+ void *policy = NULL;
+ size_t policy_len;
+ int rc = 0;
+
+ lockdep_assert_held(&state->policy_mutex);
+
+ state_str = selinux_ima_collect_state(state);
+ if (!state_str) {
+ pr_err("SELinux: %s: failed to read state.\n", __func__);
+ return;
+ }
+
+ ima_measure_critical_data("selinux", "selinux-state",
+ state_str, strlen(state_str), false,
+ NULL, 0);
+
+ kfree(state_str);
+
+ /*
+ * Measure SELinux policy only after initialization is completed.
+ */
+ if (!selinux_initialized(state))
+ return;
+
+ rc = security_read_state_kernel(state, &policy, &policy_len);
+ if (rc) {
+ pr_err("SELinux: %s: failed to read policy %d.\n", __func__, rc);
+ return;
+ }
+
+ ima_measure_critical_data("selinux", "selinux-policy-hash",
+ policy, policy_len, true,
+ NULL, 0);
+
+ vfree(policy);
+}
+
+/*
+ * selinux_ima_measure_state - Measure SELinux state and hash of policy
+ *
+ * @state: selinux state struct
+ */
+void selinux_ima_measure_state(struct selinux_state *state)
+{
+ lockdep_assert_not_held(&state->policy_mutex);
+
+ mutex_lock(&state->policy_mutex);
+ selinux_ima_measure_state_locked(state);
+ mutex_unlock(&state->policy_mutex);
+}
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
new file mode 100644
index 000000000..406bceb90
--- /dev/null
+++ b/security/selinux/include/audit.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SELinux support for the Audit LSM hooks
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
+ */
+
+#ifndef _SELINUX_AUDIT_H
+#define _SELINUX_AUDIT_H
+
+#include <linux/audit.h>
+#include <linux/types.h>
+
+/**
+ * selinux_audit_rule_init - alloc/init an selinux audit rule structure.
+ * @field: the field this rule refers to
+ * @op: the operator the rule uses
+ * @rulestr: the text "target" of the rule
+ * @rule: pointer to the new rule structure returned via this
+ *
+ * Returns 0 if successful, -errno if not. On success, the rule structure
+ * will be allocated internally. The caller must free this structure with
+ * selinux_audit_rule_free() after use.
+ */
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
+
+/**
+ * selinux_audit_rule_free - free an selinux audit rule structure.
+ * @rule: pointer to the audit rule to be freed
+ *
+ * This will free all memory associated with the given rule.
+ * If @rule is NULL, no operation is performed.
+ */
+void selinux_audit_rule_free(void *rule);
+
+/**
+ * selinux_audit_rule_match - determine if a context ID matches a rule.
+ * @sid: the context ID to check
+ * @field: the field this rule refers to
+ * @op: the operater the rule uses
+ * @rule: pointer to the audit rule to check against
+ *
+ * Returns 1 if the context id matches the rule, 0 if it does not, and
+ * -errno on failure.
+ */
+int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *rule);
+
+/**
+ * selinux_audit_rule_known - check to see if rule contains selinux fields.
+ * @rule: rule to be checked
+ * Returns 1 if there are selinux fields specified in the rule, 0 otherwise.
+ */
+int selinux_audit_rule_known(struct audit_krule *rule);
+
+#endif /* _SELINUX_AUDIT_H */
+
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
new file mode 100644
index 000000000..5525b94fd
--- /dev/null
+++ b/security/selinux/include/avc.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access vector cache interface for object managers.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SELINUX_AVC_H_
+#define _SELINUX_AVC_H_
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/audit.h>
+#include <linux/lsm_audit.h>
+#include <linux/in6.h>
+#include "flask.h"
+#include "av_permissions.h"
+#include "security.h"
+
+/*
+ * An entry in the AVC.
+ */
+struct avc_entry;
+
+struct task_struct;
+struct inode;
+struct sock;
+struct sk_buff;
+
+/*
+ * AVC statistics
+ */
+struct avc_cache_stats {
+ unsigned int lookups;
+ unsigned int misses;
+ unsigned int allocations;
+ unsigned int reclaims;
+ unsigned int frees;
+};
+
+/*
+ * We only need this data after we have decided to send an audit message.
+ */
+struct selinux_audit_data {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ u32 requested;
+ u32 audited;
+ u32 denied;
+ int result;
+ struct selinux_state *state;
+} __randomize_layout;
+
+/*
+ * AVC operations
+ */
+
+void __init avc_init(void);
+
+static inline u32 avc_audit_required(u32 requested,
+ struct av_decision *avd,
+ int result,
+ u32 auditdeny,
+ u32 *deniedp)
+{
+ u32 denied, audited;
+ denied = requested & ~avd->allowed;
+ if (unlikely(denied)) {
+ audited = denied & avd->auditdeny;
+ /*
+ * auditdeny is TRICKY! Setting a bit in
+ * this field means that ANY denials should NOT be audited if
+ * the policy contains an explicit dontaudit rule for that
+ * permission. Take notice that this is unrelated to the
+ * actual permissions that were denied. As an example lets
+ * assume:
+ *
+ * denied == READ
+ * avd.auditdeny & ACCESS == 0 (not set means explicit rule)
+ * auditdeny & ACCESS == 1
+ *
+ * We will NOT audit the denial even though the denied
+ * permission was READ and the auditdeny checks were for
+ * ACCESS
+ */
+ if (auditdeny && !(auditdeny & avd->auditdeny))
+ audited = 0;
+ } else if (result)
+ audited = denied = requested;
+ else
+ audited = requested & avd->auditallow;
+ *deniedp = denied;
+ return audited;
+}
+
+int slow_avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a);
+
+/**
+ * avc_audit - Audit the granting or denial of permissions.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions
+ * @avd: access vector decisions
+ * @result: result from avc_has_perm_noaudit
+ * @a: auxiliary audit data
+ *
+ * Audit the granting or denial of permissions in accordance
+ * with the policy. This function is typically called by
+ * avc_has_perm() after a permission check, but can also be
+ * called directly by callers who use avc_has_perm_noaudit()
+ * in order to separate the permission check from the auditing.
+ * For example, this separation is useful when the permission check must
+ * be performed under a lock, to allow the lock to be released
+ * before calling the auditing code.
+ */
+static inline int avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct av_decision *avd,
+ int result,
+ struct common_audit_data *a)
+{
+ u32 audited, denied;
+ audited = avc_audit_required(requested, avd, result, 0, &denied);
+ if (likely(!audited))
+ return 0;
+ return slow_avc_audit(state, ssid, tsid, tclass,
+ requested, audited, denied, result,
+ a);
+}
+
+#define AVC_STRICT 1 /* Ignore permissive mode. */
+#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
+int avc_has_perm_noaudit(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ unsigned flags,
+ struct av_decision *avd);
+
+int avc_has_perm(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct common_audit_data *auditdata);
+
+int avc_has_extended_perms(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u8 driver, u8 perm, struct common_audit_data *ad);
+
+
+u32 avc_policy_seqno(struct selinux_state *state);
+
+#define AVC_CALLBACK_GRANT 1
+#define AVC_CALLBACK_TRY_REVOKE 2
+#define AVC_CALLBACK_REVOKE 4
+#define AVC_CALLBACK_RESET 8
+#define AVC_CALLBACK_AUDITALLOW_ENABLE 16
+#define AVC_CALLBACK_AUDITALLOW_DISABLE 32
+#define AVC_CALLBACK_AUDITDENY_ENABLE 64
+#define AVC_CALLBACK_AUDITDENY_DISABLE 128
+#define AVC_CALLBACK_ADD_XPERMS 256
+
+int avc_add_callback(int (*callback)(u32 event), u32 events);
+
+/* Exported to selinuxfs */
+struct selinux_avc;
+int avc_get_hash_stats(struct selinux_avc *avc, char *page);
+unsigned int avc_get_cache_threshold(struct selinux_avc *avc);
+void avc_set_cache_threshold(struct selinux_avc *avc,
+ unsigned int cache_threshold);
+
+/* Attempt to free avc node cache */
+void avc_disable(void);
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
+#endif
+
+#endif /* _SELINUX_AVC_H_ */
+
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
new file mode 100644
index 000000000..42912c917
--- /dev/null
+++ b/security/selinux/include/avc_ss.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access vector cache interface for the security server.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SELINUX_AVC_SS_H_
+#define _SELINUX_AVC_SS_H_
+
+#include <linux/types.h>
+
+struct selinux_avc;
+int avc_ss_reset(struct selinux_avc *avc, u32 seqno);
+
+/* Class/perm mapping support */
+struct security_class_mapping {
+ const char *name;
+ const char *perms[sizeof(u32) * 8 + 1];
+};
+
+extern const struct security_class_mapping secclass_map[];
+
+#endif /* _SELINUX_AVC_SS_H_ */
+
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
new file mode 100644
index 000000000..a3c380775
--- /dev/null
+++ b/security/selinux/include/classmap.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/capability.h>
+#include <linux/socket.h>
+
+#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
+ "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
+
+#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
+ "rename", "execute", "quotaon", "mounton", "audit_access", \
+ "open", "execmod", "watch", "watch_mount", "watch_sb", \
+ "watch_with_perm", "watch_reads"
+
+#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
+ "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
+ "sendto", "name_bind"
+
+#define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
+ "write", "associate", "unix_read", "unix_write"
+
+#define COMMON_CAP_PERMS "chown", "dac_override", "dac_read_search", \
+ "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap", \
+ "linux_immutable", "net_bind_service", "net_broadcast", \
+ "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module", \
+ "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin", \
+ "sys_boot", "sys_nice", "sys_resource", "sys_time", \
+ "sys_tty_config", "mknod", "lease", "audit_write", \
+ "audit_control", "setfcap"
+
+#define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \
+ "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf", \
+ "checkpoint_restore"
+
+#if CAP_LAST_CAP > CAP_CHECKPOINT_RESTORE
+#error New capability defined, please update COMMON_CAP2_PERMS.
+#endif
+
+/*
+ * Note: The name for any socket class should be suffixed by "socket",
+ * and doesn't contain more than one substr of "socket".
+ */
+const struct security_class_mapping secclass_map[] = {
+ { "security",
+ { "compute_av", "compute_create", "compute_member",
+ "check_context", "load_policy", "compute_relabel",
+ "compute_user", "setenforce", "setbool", "setsecparam",
+ "setcheckreqprot", "read_policy", "validate_trans", NULL } },
+ { "process",
+ { "fork", "transition", "sigchld", "sigkill",
+ "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
+ "getsession", "getpgid", "setpgid", "getcap", "setcap", "share",
+ "getattr", "setexec", "setfscreate", "noatsecure", "siginh",
+ "setrlimit", "rlimitinh", "dyntransition", "setcurrent",
+ "execmem", "execstack", "execheap", "setkeycreate",
+ "setsockcreate", "getrlimit", NULL } },
+ { "process2",
+ { "nnp_transition", "nosuid_transition", NULL } },
+ { "system",
+ { "ipc_info", "syslog_read", "syslog_mod",
+ "syslog_console", "module_request", "module_load", NULL } },
+ { "capability",
+ { COMMON_CAP_PERMS, NULL } },
+ { "filesystem",
+ { "mount", "remount", "unmount", "getattr",
+ "relabelfrom", "relabelto", "associate", "quotamod",
+ "quotaget", "watch", NULL } },
+ { "file",
+ { COMMON_FILE_PERMS,
+ "execute_no_trans", "entrypoint", NULL } },
+ { "dir",
+ { COMMON_FILE_PERMS, "add_name", "remove_name",
+ "reparent", "search", "rmdir", NULL } },
+ { "fd", { "use", NULL } },
+ { "lnk_file",
+ { COMMON_FILE_PERMS, NULL } },
+ { "chr_file",
+ { COMMON_FILE_PERMS, NULL } },
+ { "blk_file",
+ { COMMON_FILE_PERMS, NULL } },
+ { "sock_file",
+ { COMMON_FILE_PERMS, NULL } },
+ { "fifo_file",
+ { COMMON_FILE_PERMS, NULL } },
+ { "socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "tcp_socket",
+ { COMMON_SOCK_PERMS,
+ "node_bind", "name_connect",
+ NULL } },
+ { "udp_socket",
+ { COMMON_SOCK_PERMS,
+ "node_bind", NULL } },
+ { "rawip_socket",
+ { COMMON_SOCK_PERMS,
+ "node_bind", NULL } },
+ { "node",
+ { "recvfrom", "sendto", NULL } },
+ { "netif",
+ { "ingress", "egress", NULL } },
+ { "netlink_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "packet_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "key_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "unix_stream_socket",
+ { COMMON_SOCK_PERMS, "connectto", NULL } },
+ { "unix_dgram_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "sem",
+ { COMMON_IPC_PERMS, NULL } },
+ { "msg", { "send", "receive", NULL } },
+ { "msgq",
+ { COMMON_IPC_PERMS, "enqueue", NULL } },
+ { "shm",
+ { COMMON_IPC_PERMS, "lock", NULL } },
+ { "ipc",
+ { COMMON_IPC_PERMS, NULL } },
+ { "netlink_route_socket",
+ { COMMON_SOCK_PERMS,
+ "nlmsg_read", "nlmsg_write", NULL } },
+ { "netlink_tcpdiag_socket",
+ { COMMON_SOCK_PERMS,
+ "nlmsg_read", "nlmsg_write", NULL } },
+ { "netlink_nflog_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_xfrm_socket",
+ { COMMON_SOCK_PERMS,
+ "nlmsg_read", "nlmsg_write", NULL } },
+ { "netlink_selinux_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_iscsi_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_audit_socket",
+ { COMMON_SOCK_PERMS,
+ "nlmsg_read", "nlmsg_write", "nlmsg_relay", "nlmsg_readpriv",
+ "nlmsg_tty_audit", NULL } },
+ { "netlink_fib_lookup_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_connector_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_netfilter_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_dnrt_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "association",
+ { "sendto", "recvfrom", "setcontext", "polmatch", NULL } },
+ { "netlink_kobject_uevent_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_generic_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_scsitransport_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_rdma_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netlink_crypto_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "appletalk_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "packet",
+ { "send", "recv", "relabelto", "forward_in", "forward_out", NULL } },
+ { "key",
+ { "view", "read", "write", "search", "link", "setattr", "create",
+ NULL } },
+ { "dccp_socket",
+ { COMMON_SOCK_PERMS,
+ "node_bind", "name_connect", NULL } },
+ { "memprotect", { "mmap_zero", NULL } },
+ { "peer", { "recv", NULL } },
+ { "capability2",
+ { COMMON_CAP2_PERMS, NULL } },
+ { "kernel_service", { "use_as_override", "create_files_as", NULL } },
+ { "tun_socket",
+ { COMMON_SOCK_PERMS, "attach_queue", NULL } },
+ { "binder", { "impersonate", "call", "set_context_mgr", "transfer",
+ NULL } },
+ { "cap_userns",
+ { COMMON_CAP_PERMS, NULL } },
+ { "cap2_userns",
+ { COMMON_CAP2_PERMS, NULL } },
+ { "sctp_socket",
+ { COMMON_SOCK_PERMS,
+ "node_bind", "name_connect", "association", NULL } },
+ { "icmp_socket",
+ { COMMON_SOCK_PERMS,
+ "node_bind", NULL } },
+ { "ax25_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "ipx_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "netrom_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "atmpvc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "x25_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "rose_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "decnet_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "atmsvc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "rds_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "irda_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "pppox_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "llc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "can_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "tipc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "bluetooth_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "iucv_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "rxrpc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "isdn_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "phonet_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "ieee802154_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "caif_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "alg_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "nfc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "vsock_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "kcm_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "qipcrtr_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "smc_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "infiniband_pkey",
+ { "access", NULL } },
+ { "infiniband_endport",
+ { "manage_subnet", NULL } },
+ { "bpf",
+ { "map_create", "map_read", "map_write", "prog_load", "prog_run",
+ NULL } },
+ { "xdp_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "mctp_socket",
+ { COMMON_SOCK_PERMS, NULL } },
+ { "perf_event",
+ { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
+ { "anon_inode",
+ { COMMON_FILE_PERMS, NULL } },
+ { "io_uring",
+ { "override_creds", "sqpoll", "cmd", NULL } },
+ { "user_namespace",
+ { "create", NULL } },
+ { NULL }
+ };
+
+#if PF_MAX > 46
+#error New address family defined, please update secclass_map.
+#endif
diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h
new file mode 100644
index 000000000..b09343346
--- /dev/null
+++ b/security/selinux/include/conditional.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface to booleans in the security server. This is exported
+ * for the selinuxfs.
+ *
+ * Author: Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ */
+
+#ifndef _SELINUX_CONDITIONAL_H_
+#define _SELINUX_CONDITIONAL_H_
+
+#include "security.h"
+
+int security_get_bools(struct selinux_policy *policy,
+ u32 *len, char ***names, int **values);
+
+int security_set_bools(struct selinux_state *state, u32 len, int *values);
+
+int security_get_bool_value(struct selinux_state *state, u32 index);
+
+#endif
diff --git a/security/selinux/include/ibpkey.h b/security/selinux/include/ibpkey.h
new file mode 100644
index 000000000..c992f83b0
--- /dev/null
+++ b/security/selinux/include/ibpkey.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * pkey table
+ *
+ * SELinux must keep a mapping of pkeys to labels/SIDs. This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ */
+
+/*
+ * (c) Mellanox Technologies, 2016
+ */
+
+#ifndef _SELINUX_IB_PKEY_H
+#define _SELINUX_IB_PKEY_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+void sel_ib_pkey_flush(void);
+int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid);
+#else
+static inline void sel_ib_pkey_flush(void)
+{
+ return;
+}
+static inline int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid)
+{
+ *sid = SECINITSID_UNLABELED;
+ return 0;
+}
+#endif
+
+#endif
diff --git a/security/selinux/include/ima.h b/security/selinux/include/ima.h
new file mode 100644
index 000000000..75ca92b4a
--- /dev/null
+++ b/security/selinux/include/ima.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2021 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * Measure critical data structures maintainted by SELinux
+ * using IMA subsystem.
+ */
+
+#ifndef _SELINUX_IMA_H_
+#define _SELINUX_IMA_H_
+
+#include "security.h"
+
+#ifdef CONFIG_IMA
+extern void selinux_ima_measure_state(struct selinux_state *selinux_state);
+extern void selinux_ima_measure_state_locked(
+ struct selinux_state *selinux_state);
+#else
+static inline void selinux_ima_measure_state(struct selinux_state *selinux_state)
+{
+}
+static inline void selinux_ima_measure_state_locked(
+ struct selinux_state *selinux_state)
+{
+}
+#endif
+
+#endif /* _SELINUX_IMA_H_ */
diff --git a/security/selinux/include/initial_sid_to_string.h b/security/selinux/include/initial_sid_to_string.h
new file mode 100644
index 000000000..60820517a
--- /dev/null
+++ b/security/selinux/include/initial_sid_to_string.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+static const char *const initial_sid_to_string[] = {
+ NULL,
+ "kernel",
+ "security",
+ "unlabeled",
+ NULL,
+ "file",
+ NULL,
+ NULL,
+ "any_socket",
+ "port",
+ "netif",
+ "netmsg",
+ "node",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "devnull",
+};
+
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
new file mode 100644
index 000000000..85ec30d11
--- /dev/null
+++ b/security/selinux/include/netif.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Network interface table.
+ *
+ * Network interfaces (devices) do not have a security field, so we
+ * maintain a table associating each interface with a SID.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * Paul Moore <paul@paul-moore.com>
+ */
+#ifndef _SELINUX_NETIF_H_
+#define _SELINUX_NETIF_H_
+
+#include <net/net_namespace.h>
+
+void sel_netif_flush(void);
+
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid);
+
+#endif /* _SELINUX_NETIF_H_ */
+
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
new file mode 100644
index 000000000..4d0456d3d
--- /dev/null
+++ b/security/selinux/include/netlabel.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SELinux interface to the NetLabel subsystem
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+
+#ifndef _SELINUX_NETLABEL_H_
+#define _SELINUX_NETLABEL_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/request_sock.h>
+#include <net/sctp/structs.h>
+
+#include "avc.h"
+#include "objsec.h"
+
+#ifdef CONFIG_NETLABEL
+void selinux_netlbl_cache_invalidate(void);
+
+void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error,
+ int gateway);
+
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec);
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec);
+
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+ u16 family,
+ u32 *type,
+ u32 *sid);
+int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+ u16 family,
+ u32 sid);
+int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
+ struct sk_buff *skb);
+int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family);
+void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family);
+void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk);
+int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
+int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ struct sk_buff *skb,
+ u16 family,
+ struct common_audit_data *ad);
+int selinux_netlbl_socket_setsockopt(struct socket *sock,
+ int level,
+ int optname);
+int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr);
+int selinux_netlbl_socket_connect_locked(struct sock *sk,
+ struct sockaddr *addr);
+
+#else
+static inline void selinux_netlbl_cache_invalidate(void)
+{
+ return;
+}
+
+static inline void selinux_netlbl_err(struct sk_buff *skb,
+ u16 family,
+ int error,
+ int gateway)
+{
+ return;
+}
+
+static inline void selinux_netlbl_sk_security_free(
+ struct sk_security_struct *sksec)
+{
+ return;
+}
+
+static inline void selinux_netlbl_sk_security_reset(
+ struct sk_security_struct *sksec)
+{
+ return;
+}
+
+static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+ u16 family,
+ u32 *type,
+ u32 *sid)
+{
+ *type = NETLBL_NLTYPE_NONE;
+ *sid = SECSID_NULL;
+ return 0;
+}
+static inline int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+ u16 family,
+ u32 sid)
+{
+ return 0;
+}
+
+static inline int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+static inline int selinux_netlbl_inet_conn_request(struct request_sock *req,
+ u16 family)
+{
+ return 0;
+}
+static inline void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
+{
+ return;
+}
+static inline void selinux_netlbl_sctp_sk_clone(struct sock *sk,
+ struct sock *newsk)
+{
+ return;
+}
+static inline int selinux_netlbl_socket_post_create(struct sock *sk,
+ u16 family)
+{
+ return 0;
+}
+static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ struct sk_buff *skb,
+ u16 family,
+ struct common_audit_data *ad)
+{
+ return 0;
+}
+static inline int selinux_netlbl_socket_setsockopt(struct socket *sock,
+ int level,
+ int optname)
+{
+ return 0;
+}
+static inline int selinux_netlbl_socket_connect(struct sock *sk,
+ struct sockaddr *addr)
+{
+ return 0;
+}
+static inline int selinux_netlbl_socket_connect_locked(struct sock *sk,
+ struct sockaddr *addr)
+{
+ return 0;
+}
+#endif /* CONFIG_NETLABEL */
+
+#endif
diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
new file mode 100644
index 000000000..9b8b655a8
--- /dev/null
+++ b/security/selinux/include/netnode.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Network node table
+ *
+ * SELinux must keep a mapping of network nodes to labels/SIDs. This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead since most of these queries happen on
+ * a per-packet basis.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007
+ */
+
+#ifndef _SELINUX_NETNODE_H
+#define _SELINUX_NETNODE_H
+
+#include <linux/types.h>
+
+void sel_netnode_flush(void);
+
+int sel_netnode_sid(void *addr, u16 family, u32 *sid);
+
+#endif
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
new file mode 100644
index 000000000..9096a8289
--- /dev/null
+++ b/security/selinux/include/netport.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Network port table
+ *
+ * SELinux must keep a mapping of network ports to labels/SIDs. This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
+ */
+
+#ifndef _SELINUX_NETPORT_H
+#define _SELINUX_NETPORT_H
+
+#include <linux/types.h>
+
+void sel_netport_flush(void);
+
+int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid);
+
+#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
new file mode 100644
index 000000000..295313240
--- /dev/null
+++ b/security/selinux/include/objsec.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * NSA Security-Enhanced Linux (SELinux) security module
+ *
+ * This file contains the SELinux security data structures for kernel objects.
+ *
+ * Author(s): Stephen Smalley, <sds@tycho.nsa.gov>
+ * Chris Vance, <cvance@nai.com>
+ * Wayne Salamon, <wsalamon@nai.com>
+ * James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2001,2002 Networks Associates Technology, Inc.
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2016 Mellanox Technologies
+ */
+#ifndef _SELINUX_OBJSEC_H_
+#define _SELINUX_OBJSEC_H_
+
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/binfmts.h>
+#include <linux/in.h>
+#include <linux/spinlock.h>
+#include <linux/lsm_hooks.h>
+#include <linux/msg.h>
+#include <net/net_namespace.h>
+#include "flask.h"
+#include "avc.h"
+
+struct task_security_struct {
+ u32 osid; /* SID prior to last execve */
+ u32 sid; /* current SID */
+ u32 exec_sid; /* exec SID */
+ u32 create_sid; /* fscreate SID */
+ u32 keycreate_sid; /* keycreate SID */
+ u32 sockcreate_sid; /* fscreate SID */
+} __randomize_layout;
+
+enum label_initialized {
+ LABEL_INVALID, /* invalid or not initialized */
+ LABEL_INITIALIZED, /* initialized */
+ LABEL_PENDING
+};
+
+struct inode_security_struct {
+ struct inode *inode; /* back pointer to inode object */
+ struct list_head list; /* list of inode_security_struct */
+ u32 task_sid; /* SID of creating task */
+ u32 sid; /* SID of this object */
+ u16 sclass; /* security class of this object */
+ unsigned char initialized; /* initialization flag */
+ spinlock_t lock;
+};
+
+struct file_security_struct {
+ u32 sid; /* SID of open file description */
+ u32 fown_sid; /* SID of file owner (for SIGIO) */
+ u32 isid; /* SID of inode at the time of file open */
+ u32 pseqno; /* Policy seqno at the time of file open */
+};
+
+struct superblock_security_struct {
+ u32 sid; /* SID of file system superblock */
+ u32 def_sid; /* default SID for labeling */
+ u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
+ unsigned short behavior; /* labeling behavior */
+ unsigned short flags; /* which mount options were specified */
+ struct mutex lock;
+ struct list_head isec_head;
+ spinlock_t isec_lock;
+};
+
+struct msg_security_struct {
+ u32 sid; /* SID of message */
+};
+
+struct ipc_security_struct {
+ u16 sclass; /* security class of this object */
+ u32 sid; /* SID of IPC resource */
+};
+
+struct netif_security_struct {
+ struct net *ns; /* network namespace */
+ int ifindex; /* device index */
+ u32 sid; /* SID for this interface */
+};
+
+struct netnode_security_struct {
+ union {
+ __be32 ipv4; /* IPv4 node address */
+ struct in6_addr ipv6; /* IPv6 node address */
+ } addr;
+ u32 sid; /* SID for this node */
+ u16 family; /* address family */
+};
+
+struct netport_security_struct {
+ u32 sid; /* SID for this node */
+ u16 port; /* port number */
+ u8 protocol; /* transport protocol */
+};
+
+struct sk_security_struct {
+#ifdef CONFIG_NETLABEL
+ enum { /* NetLabel state */
+ NLBL_UNSET = 0,
+ NLBL_REQUIRE,
+ NLBL_LABELED,
+ NLBL_REQSKB,
+ NLBL_CONNLABELED,
+ } nlbl_state;
+ struct netlbl_lsm_secattr *nlbl_secattr; /* NetLabel sec attributes */
+#endif
+ u32 sid; /* SID of this object */
+ u32 peer_sid; /* SID of peer */
+ u16 sclass; /* sock security class */
+ enum { /* SCTP association state */
+ SCTP_ASSOC_UNSET = 0,
+ SCTP_ASSOC_SET,
+ } sctp_assoc_state;
+};
+
+struct tun_security_struct {
+ u32 sid; /* SID for the tun device sockets */
+};
+
+struct key_security_struct {
+ u32 sid; /* SID of key */
+};
+
+struct ib_security_struct {
+ u32 sid; /* SID of the queue pair or MAD agent */
+};
+
+struct pkey_security_struct {
+ u64 subnet_prefix; /* Port subnet prefix */
+ u16 pkey; /* PKey number */
+ u32 sid; /* SID of pkey */
+};
+
+struct bpf_security_struct {
+ u32 sid; /* SID of bpf obj creator */
+};
+
+struct perf_event_security_struct {
+ u32 sid; /* SID of perf_event obj creator */
+};
+
+extern struct lsm_blob_sizes selinux_blob_sizes;
+static inline struct task_security_struct *selinux_cred(const struct cred *cred)
+{
+ return cred->security + selinux_blob_sizes.lbs_cred;
+}
+
+static inline struct file_security_struct *selinux_file(const struct file *file)
+{
+ return file->f_security + selinux_blob_sizes.lbs_file;
+}
+
+static inline struct inode_security_struct *selinux_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+ return inode->i_security + selinux_blob_sizes.lbs_inode;
+}
+
+static inline struct msg_security_struct *selinux_msg_msg(
+ const struct msg_msg *msg_msg)
+{
+ return msg_msg->security + selinux_blob_sizes.lbs_msg_msg;
+}
+
+static inline struct ipc_security_struct *selinux_ipc(
+ const struct kern_ipc_perm *ipc)
+{
+ return ipc->security + selinux_blob_sizes.lbs_ipc;
+}
+
+/*
+ * get the subjective security ID of the current task
+ */
+static inline u32 current_sid(void)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+
+ return tsec->sid;
+}
+
+static inline struct superblock_security_struct *selinux_superblock(
+ const struct super_block *superblock)
+{
+ return superblock->s_security + selinux_blob_sizes.lbs_superblock;
+}
+
+#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h
new file mode 100644
index 000000000..f35d3458e
--- /dev/null
+++ b/security/selinux/include/policycap.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SELINUX_POLICYCAP_H_
+#define _SELINUX_POLICYCAP_H_
+
+/* Policy capabilities */
+enum {
+ POLICYDB_CAP_NETPEER,
+ POLICYDB_CAP_OPENPERM,
+ POLICYDB_CAP_EXTSOCKCLASS,
+ POLICYDB_CAP_ALWAYSNETWORK,
+ POLICYDB_CAP_CGROUPSECLABEL,
+ POLICYDB_CAP_NNP_NOSUID_TRANSITION,
+ POLICYDB_CAP_GENFS_SECLABEL_SYMLINKS,
+ POLICYDB_CAP_IOCTL_SKIP_CLOEXEC,
+ __POLICYDB_CAP_MAX
+};
+#define POLICYDB_CAP_MAX (__POLICYDB_CAP_MAX - 1)
+
+extern const char *const selinux_policycap_names[__POLICYDB_CAP_MAX];
+
+#endif /* _SELINUX_POLICYCAP_H_ */
diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h
new file mode 100644
index 000000000..2a87fc370
--- /dev/null
+++ b/security/selinux/include/policycap_names.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SELINUX_POLICYCAP_NAMES_H_
+#define _SELINUX_POLICYCAP_NAMES_H_
+
+#include "policycap.h"
+
+/* Policy capability names */
+const char *const selinux_policycap_names[__POLICYDB_CAP_MAX] = {
+ "network_peer_controls",
+ "open_perms",
+ "extended_socket_class",
+ "always_check_network",
+ "cgroup_seclabel",
+ "nnp_nosuid_transition",
+ "genfs_seclabel_symlinks",
+ "ioctl_skip_cloexec"
+};
+
+#endif /* _SELINUX_POLICYCAP_NAMES_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
new file mode 100644
index 000000000..393aff41d
--- /dev/null
+++ b/security/selinux/include/security.h
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Security server interface.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ *
+ */
+
+#ifndef _SELINUX_SECURITY_H_
+#define _SELINUX_SECURITY_H_
+
+#include <linux/compiler.h>
+#include <linux/dcache.h>
+#include <linux/magic.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/printk.h>
+#include "flask.h"
+#include "policycap.h"
+
+#define SECSID_NULL 0x00000000 /* unspecified SID */
+#define SECSID_WILD 0xffffffff /* wildcard SID */
+#define SECCLASS_NULL 0x0000 /* no class */
+
+/* Identify specific policy version changes */
+#define POLICYDB_VERSION_BASE 15
+#define POLICYDB_VERSION_BOOL 16
+#define POLICYDB_VERSION_IPV6 17
+#define POLICYDB_VERSION_NLCLASS 18
+#define POLICYDB_VERSION_VALIDATETRANS 19
+#define POLICYDB_VERSION_MLS 19
+#define POLICYDB_VERSION_AVTAB 20
+#define POLICYDB_VERSION_RANGETRANS 21
+#define POLICYDB_VERSION_POLCAP 22
+#define POLICYDB_VERSION_PERMISSIVE 23
+#define POLICYDB_VERSION_BOUNDARY 24
+#define POLICYDB_VERSION_FILENAME_TRANS 25
+#define POLICYDB_VERSION_ROLETRANS 26
+#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27
+#define POLICYDB_VERSION_DEFAULT_TYPE 28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
+#define POLICYDB_VERSION_XPERMS_IOCTL 30
+#define POLICYDB_VERSION_INFINIBAND 31
+#define POLICYDB_VERSION_GLBLUB 32
+#define POLICYDB_VERSION_COMP_FTRANS 33 /* compressed filename transitions */
+
+/* Range of policy versions we understand*/
+#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_COMP_FTRANS
+
+/* Mask for just the mount related flags */
+#define SE_MNTMASK 0x0f
+/* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
+#define CONTEXT_MNT 0x01
+#define FSCONTEXT_MNT 0x02
+#define ROOTCONTEXT_MNT 0x04
+#define DEFCONTEXT_MNT 0x08
+#define SBLABEL_MNT 0x10
+/* Non-mount related flags */
+#define SE_SBINITIALIZED 0x0100
+#define SE_SBPROC 0x0200
+#define SE_SBGENFS 0x0400
+#define SE_SBGENFS_XATTR 0x0800
+
+#define CONTEXT_STR "context"
+#define FSCONTEXT_STR "fscontext"
+#define ROOTCONTEXT_STR "rootcontext"
+#define DEFCONTEXT_STR "defcontext"
+#define SECLABEL_STR "seclabel"
+
+struct netlbl_lsm_secattr;
+
+extern int selinux_enabled_boot;
+
+/*
+ * type_datum properties
+ * available at the kernel policy version >= POLICYDB_VERSION_BOUNDARY
+ */
+#define TYPEDATUM_PROPERTY_PRIMARY 0x0001
+#define TYPEDATUM_PROPERTY_ATTRIBUTE 0x0002
+
+/* limitation of boundary depth */
+#define POLICYDB_BOUNDS_MAXDEPTH 4
+
+struct selinux_avc;
+struct selinux_policy;
+
+struct selinux_state {
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+ bool disabled;
+#endif
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+ bool enforcing;
+#endif
+ bool checkreqprot;
+ bool initialized;
+ bool policycap[__POLICYDB_CAP_MAX];
+
+ struct page *status_page;
+ struct mutex status_lock;
+
+ struct selinux_avc *avc;
+ struct selinux_policy __rcu *policy;
+ struct mutex policy_mutex;
+} __randomize_layout;
+
+void selinux_avc_init(struct selinux_avc **avc);
+
+extern struct selinux_state selinux_state;
+
+static inline bool selinux_initialized(const struct selinux_state *state)
+{
+ /* do a synchronized load to avoid race conditions */
+ return smp_load_acquire(&state->initialized);
+}
+
+static inline void selinux_mark_initialized(struct selinux_state *state)
+{
+ /* do a synchronized write to avoid race conditions */
+ smp_store_release(&state->initialized, true);
+}
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+static inline bool enforcing_enabled(struct selinux_state *state)
+{
+ return READ_ONCE(state->enforcing);
+}
+
+static inline void enforcing_set(struct selinux_state *state, bool value)
+{
+ WRITE_ONCE(state->enforcing, value);
+}
+#else
+static inline bool enforcing_enabled(struct selinux_state *state)
+{
+ return true;
+}
+
+static inline void enforcing_set(struct selinux_state *state, bool value)
+{
+}
+#endif
+
+static inline bool checkreqprot_get(const struct selinux_state *state)
+{
+ return READ_ONCE(state->checkreqprot);
+}
+
+static inline void checkreqprot_set(struct selinux_state *state, bool value)
+{
+ if (value)
+ pr_err("SELinux: https://github.com/SELinuxProject/selinux-kernel/wiki/DEPRECATE-checkreqprot\n");
+ WRITE_ONCE(state->checkreqprot, value);
+}
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static inline bool selinux_disabled(struct selinux_state *state)
+{
+ return READ_ONCE(state->disabled);
+}
+
+static inline void selinux_mark_disabled(struct selinux_state *state)
+{
+ WRITE_ONCE(state->disabled, true);
+}
+#else
+static inline bool selinux_disabled(struct selinux_state *state)
+{
+ return false;
+}
+#endif
+
+static inline bool selinux_policycap_netpeer(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_NETPEER]);
+}
+
+static inline bool selinux_policycap_openperm(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_OPENPERM]);
+}
+
+static inline bool selinux_policycap_extsockclass(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_EXTSOCKCLASS]);
+}
+
+static inline bool selinux_policycap_alwaysnetwork(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_ALWAYSNETWORK]);
+}
+
+static inline bool selinux_policycap_cgroupseclabel(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_CGROUPSECLABEL]);
+}
+
+static inline bool selinux_policycap_nnp_nosuid_transition(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_NNP_NOSUID_TRANSITION]);
+}
+
+static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_GENFS_SECLABEL_SYMLINKS]);
+}
+
+static inline bool selinux_policycap_ioctl_skip_cloexec(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return READ_ONCE(state->policycap[POLICYDB_CAP_IOCTL_SKIP_CLOEXEC]);
+}
+
+struct selinux_policy_convert_data;
+
+struct selinux_load_state {
+ struct selinux_policy *policy;
+ struct selinux_policy_convert_data *convert_data;
+};
+
+int security_mls_enabled(struct selinux_state *state);
+int security_load_policy(struct selinux_state *state,
+ void *data, size_t len,
+ struct selinux_load_state *load_state);
+void selinux_policy_commit(struct selinux_state *state,
+ struct selinux_load_state *load_state);
+void selinux_policy_cancel(struct selinux_state *state,
+ struct selinux_load_state *load_state);
+int security_read_policy(struct selinux_state *state,
+ void **data, size_t *len);
+int security_read_state_kernel(struct selinux_state *state,
+ void **data, size_t *len);
+int security_policycap_supported(struct selinux_state *state,
+ unsigned int req_cap);
+
+#define SEL_VEC_MAX 32
+struct av_decision {
+ u32 allowed;
+ u32 auditallow;
+ u32 auditdeny;
+ u32 seqno;
+ u32 flags;
+};
+
+#define XPERMS_ALLOWED 1
+#define XPERMS_AUDITALLOW 2
+#define XPERMS_DONTAUDIT 4
+
+#define security_xperm_set(perms, x) ((perms)[(x) >> 5] |= 1 << ((x) & 0x1f))
+#define security_xperm_test(perms, x) (1 & ((perms)[(x) >> 5] >> ((x) & 0x1f)))
+struct extended_perms_data {
+ u32 p[8];
+};
+
+struct extended_perms_decision {
+ u8 used;
+ u8 driver;
+ struct extended_perms_data *allowed;
+ struct extended_perms_data *auditallow;
+ struct extended_perms_data *dontaudit;
+};
+
+struct extended_perms {
+ u16 len; /* length associated decision chain */
+ struct extended_perms_data drivers; /* flag drivers that are used */
+};
+
+/* definitions of av_decision.flags */
+#define AVD_FLAGS_PERMISSIVE 0x0001
+
+void security_compute_av(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd,
+ struct extended_perms *xperms);
+
+void security_compute_xperms_decision(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u8 driver,
+ struct extended_perms_decision *xpermd);
+
+void security_compute_av_user(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
+
+int security_transition_sid(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ const struct qstr *qstr, u32 *out_sid);
+
+int security_transition_sid_user(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ const char *objname, u32 *out_sid);
+
+int security_member_sid(struct selinux_state *state, u32 ssid, u32 tsid,
+ u16 tclass, u32 *out_sid);
+
+int security_change_sid(struct selinux_state *state, u32 ssid, u32 tsid,
+ u16 tclass, u32 *out_sid);
+
+int security_sid_to_context(struct selinux_state *state, u32 sid,
+ char **scontext, u32 *scontext_len);
+
+int security_sid_to_context_force(struct selinux_state *state,
+ u32 sid, char **scontext, u32 *scontext_len);
+
+int security_sid_to_context_inval(struct selinux_state *state,
+ u32 sid, char **scontext, u32 *scontext_len);
+
+int security_context_to_sid(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
+ u32 *out_sid, gfp_t gfp);
+
+int security_context_str_to_sid(struct selinux_state *state,
+ const char *scontext, u32 *out_sid, gfp_t gfp);
+
+int security_context_to_sid_default(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
+ u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
+
+int security_context_to_sid_force(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
+ u32 *sid);
+
+int security_get_user_sids(struct selinux_state *state,
+ u32 callsid, char *username,
+ u32 **sids, u32 *nel);
+
+int security_port_sid(struct selinux_state *state,
+ u8 protocol, u16 port, u32 *out_sid);
+
+int security_ib_pkey_sid(struct selinux_state *state,
+ u64 subnet_prefix, u16 pkey_num, u32 *out_sid);
+
+int security_ib_endport_sid(struct selinux_state *state,
+ const char *dev_name, u8 port_num, u32 *out_sid);
+
+int security_netif_sid(struct selinux_state *state,
+ char *name, u32 *if_sid);
+
+int security_node_sid(struct selinux_state *state,
+ u16 domain, void *addr, u32 addrlen,
+ u32 *out_sid);
+
+int security_validate_transition(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
+ u16 tclass);
+
+int security_validate_transition_user(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
+ u16 tclass);
+
+int security_bounded_transition(struct selinux_state *state,
+ u32 oldsid, u32 newsid);
+
+int security_sid_mls_copy(struct selinux_state *state,
+ u32 sid, u32 mls_sid, u32 *new_sid);
+
+int security_net_peersid_resolve(struct selinux_state *state,
+ u32 nlbl_sid, u32 nlbl_type,
+ u32 xfrm_sid,
+ u32 *peer_sid);
+
+int security_get_classes(struct selinux_policy *policy,
+ char ***classes, int *nclasses);
+int security_get_permissions(struct selinux_policy *policy,
+ char *class, char ***perms, int *nperms);
+int security_get_reject_unknown(struct selinux_state *state);
+int security_get_allow_unknown(struct selinux_state *state);
+
+#define SECURITY_FS_USE_XATTR 1 /* use xattr */
+#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */
+#define SECURITY_FS_USE_TASK 3 /* use task SIDs, e.g. pipefs/sockfs */
+#define SECURITY_FS_USE_GENFS 4 /* use the genfs support */
+#define SECURITY_FS_USE_NONE 5 /* no labeling support */
+#define SECURITY_FS_USE_MNTPOINT 6 /* use mountpoint labeling */
+#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
+#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
+
+int security_fs_use(struct selinux_state *state, struct super_block *sb);
+
+int security_genfs_sid(struct selinux_state *state,
+ const char *fstype, const char *path, u16 sclass,
+ u32 *sid);
+
+int selinux_policy_genfs_sid(struct selinux_policy *policy,
+ const char *fstype, const char *path, u16 sclass,
+ u32 *sid);
+
+#ifdef CONFIG_NETLABEL
+int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ struct netlbl_lsm_secattr *secattr,
+ u32 *sid);
+
+int security_netlbl_sid_to_secattr(struct selinux_state *state,
+ u32 sid,
+ struct netlbl_lsm_secattr *secattr);
+#else
+static inline int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ struct netlbl_lsm_secattr *secattr,
+ u32 *sid)
+{
+ return -EIDRM;
+}
+
+static inline int security_netlbl_sid_to_secattr(struct selinux_state *state,
+ u32 sid,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_NETLABEL */
+
+const char *security_get_initial_sid_context(u32 sid);
+
+/*
+ * status notifier using mmap interface
+ */
+extern struct page *selinux_kernel_status_page(struct selinux_state *state);
+
+#define SELINUX_KERNEL_STATUS_VERSION 1
+struct selinux_kernel_status {
+ u32 version; /* version number of the structure */
+ u32 sequence; /* sequence number of seqlock logic */
+ u32 enforcing; /* current setting of enforcing mode */
+ u32 policyload; /* times of policy reloaded */
+ u32 deny_unknown; /* current setting of deny_unknown */
+ /*
+ * The version > 0 supports above members.
+ */
+} __packed;
+
+extern void selinux_status_update_setenforce(struct selinux_state *state,
+ int enforcing);
+extern void selinux_status_update_policyload(struct selinux_state *state,
+ int seqno);
+extern void selinux_complete_init(void);
+extern int selinux_disable(struct selinux_state *state);
+extern void exit_sel_fs(void);
+extern struct path selinux_null;
+extern void selnl_notify_setenforce(int val);
+extern void selnl_notify_policyload(u32 seqno);
+extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
+
+extern void avtab_cache_init(void);
+extern void ebitmap_cache_init(void);
+extern void hashtab_cache_init(void);
+extern int security_sidtab_hash_stats(struct selinux_state *state, char *page);
+
+#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
new file mode 100644
index 000000000..c75839860
--- /dev/null
+++ b/security/selinux/include/xfrm.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SELinux support for the XFRM LSM hooks
+ *
+ * Author : Trent Jaeger, <jaegert@us.ibm.com>
+ * Updated : Venkat Yekkirala, <vyekkirala@TrustedCS.com>
+ */
+#ifndef _SELINUX_XFRM_H_
+#define _SELINUX_XFRM_H_
+
+#include <linux/lsm_audit.h>
+#include <net/flow.h>
+#include <net/xfrm.h>
+
+int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp);
+int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctxp);
+void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
+int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid);
+void selinux_xfrm_state_free(struct xfrm_state *x);
+int selinux_xfrm_state_delete(struct xfrm_state *x);
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic);
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+extern atomic_t selinux_xfrm_refcount;
+
+static inline int selinux_xfrm_enabled(void)
+{
+ return (atomic_read(&selinux_xfrm_refcount) > 0);
+}
+
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
+
+static inline void selinux_xfrm_notify_policyload(void)
+{
+ struct net *net;
+
+ down_read(&net_rwsem);
+ for_each_net(net)
+ rt_genid_bump_all(net);
+ up_read(&net_rwsem);
+}
+#else
+static inline int selinux_xfrm_enabled(void)
+{
+ return 0;
+}
+
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
+{
+ return 0;
+}
+
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad,
+ u8 proto)
+{
+ return 0;
+}
+
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+ int ckall)
+{
+ *sid = SECSID_NULL;
+ return 0;
+}
+
+static inline void selinux_xfrm_notify_policyload(void)
+{
+}
+
+static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+ *sid = SECSID_NULL;
+ return 0;
+}
+#endif
+
+#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
new file mode 100644
index 000000000..1ab03efe7
--- /dev/null
+++ b/security/selinux/netif.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Network interface table.
+ *
+ * Network interfaces (devices) do not have a security field, so we
+ * maintain a table associating each interface with a SID.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * Paul Moore <paul@paul-moore.com>
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
+
+#include "security.h"
+#include "objsec.h"
+#include "netif.h"
+
+#define SEL_NETIF_HASH_SIZE 64
+#define SEL_NETIF_HASH_MAX 1024
+
+struct sel_netif {
+ struct list_head list;
+ struct netif_security_struct nsec;
+ struct rcu_head rcu_head;
+};
+
+static u32 sel_netif_total;
+static DEFINE_SPINLOCK(sel_netif_lock);
+static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
+
+/**
+ * sel_netif_hashfn - Hashing function for the interface table
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ *
+ * Description:
+ * This is the hashing function for the network interface table, it returns the
+ * bucket number for the given interface.
+ *
+ */
+static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
+{
+ return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netif_find - Search for an interface record
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ *
+ * Description:
+ * Search the network interface table and return the record matching @ifindex.
+ * If an entry can not be found in the table return NULL.
+ *
+ */
+static inline struct sel_netif *sel_netif_find(const struct net *ns,
+ int ifindex)
+{
+ int idx = sel_netif_hashfn(ns, ifindex);
+ struct sel_netif *netif;
+
+ list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
+ if (net_eq(netif->nsec.ns, ns) &&
+ netif->nsec.ifindex == ifindex)
+ return netif;
+
+ return NULL;
+}
+
+/**
+ * sel_netif_insert - Insert a new interface into the table
+ * @netif: the new interface record
+ *
+ * Description:
+ * Add a new interface record to the network interface hash table. Returns
+ * zero on success, negative values on failure.
+ *
+ */
+static int sel_netif_insert(struct sel_netif *netif)
+{
+ int idx;
+
+ if (sel_netif_total >= SEL_NETIF_HASH_MAX)
+ return -ENOSPC;
+
+ idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
+ list_add_rcu(&netif->list, &sel_netif_hash[idx]);
+ sel_netif_total++;
+
+ return 0;
+}
+
+/**
+ * sel_netif_destroy - Remove an interface record from the table
+ * @netif: the existing interface record
+ *
+ * Description:
+ * Remove an existing interface record from the network interface table.
+ *
+ */
+static void sel_netif_destroy(struct sel_netif *netif)
+{
+ list_del_rcu(&netif->list);
+ sel_netif_total--;
+ kfree_rcu(netif, rcu_head);
+}
+
+/**
+ * sel_netif_sid_slow - Lookup the SID of a network interface using the policy
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ * @sid: interface SID
+ *
+ * Description:
+ * This function determines the SID of a network interface by querying the
+ * security policy. The result is added to the network interface table to
+ * speedup future queries. Returns zero on success, negative values on
+ * failure.
+ *
+ */
+static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
+{
+ int ret = 0;
+ struct sel_netif *netif;
+ struct sel_netif *new;
+ struct net_device *dev;
+
+ /* NOTE: we always use init's network namespace since we don't
+ * currently support containers */
+
+ dev = dev_get_by_index(ns, ifindex);
+ if (unlikely(dev == NULL)) {
+ pr_warn("SELinux: failure in %s(), invalid network interface (%d)\n",
+ __func__, ifindex);
+ return -ENOENT;
+ }
+
+ spin_lock_bh(&sel_netif_lock);
+ netif = sel_netif_find(ns, ifindex);
+ if (netif != NULL) {
+ *sid = netif->nsec.sid;
+ goto out;
+ }
+
+ ret = security_netif_sid(&selinux_state, dev->name, sid);
+ if (ret != 0)
+ goto out;
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (new) {
+ new->nsec.ns = ns;
+ new->nsec.ifindex = ifindex;
+ new->nsec.sid = *sid;
+ if (sel_netif_insert(new))
+ kfree(new);
+ }
+
+out:
+ spin_unlock_bh(&sel_netif_lock);
+ dev_put(dev);
+ if (unlikely(ret))
+ pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
+ __func__, ifindex);
+ return ret;
+}
+
+/**
+ * sel_netif_sid - Lookup the SID of a network interface
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ * @sid: interface SID
+ *
+ * Description:
+ * This function determines the SID of a network interface using the fastest
+ * method possible. First the interface table is queried, but if an entry
+ * can't be found then the policy is queried and the result is added to the
+ * table to speedup future queries. Returns zero on success, negative values
+ * on failure.
+ *
+ */
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
+{
+ struct sel_netif *netif;
+
+ rcu_read_lock();
+ netif = sel_netif_find(ns, ifindex);
+ if (likely(netif != NULL)) {
+ *sid = netif->nsec.sid;
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ return sel_netif_sid_slow(ns, ifindex, sid);
+}
+
+/**
+ * sel_netif_kill - Remove an entry from the network interface table
+ * @ns: the network namespace
+ * @ifindex: the network interface
+ *
+ * Description:
+ * This function removes the entry matching @ifindex from the network interface
+ * table if it exists.
+ *
+ */
+static void sel_netif_kill(const struct net *ns, int ifindex)
+{
+ struct sel_netif *netif;
+
+ rcu_read_lock();
+ spin_lock_bh(&sel_netif_lock);
+ netif = sel_netif_find(ns, ifindex);
+ if (netif)
+ sel_netif_destroy(netif);
+ spin_unlock_bh(&sel_netif_lock);
+ rcu_read_unlock();
+}
+
+/**
+ * sel_netif_flush - Flush the entire network interface table
+ *
+ * Description:
+ * Remove all entries from the network interface table.
+ *
+ */
+void sel_netif_flush(void)
+{
+ int idx;
+ struct sel_netif *netif;
+
+ spin_lock_bh(&sel_netif_lock);
+ for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
+ list_for_each_entry(netif, &sel_netif_hash[idx], list)
+ sel_netif_destroy(netif);
+ spin_unlock_bh(&sel_netif_lock);
+}
+
+static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event == NETDEV_DOWN)
+ sel_netif_kill(dev_net(dev), dev->ifindex);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sel_netif_netdev_notifier = {
+ .notifier_call = sel_netif_netdev_notifier_handler,
+};
+
+static __init int sel_netif_init(void)
+{
+ int i;
+
+ if (!selinux_enabled_boot)
+ return 0;
+
+ for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&sel_netif_hash[i]);
+
+ register_netdevice_notifier(&sel_netif_netdev_notifier);
+
+ return 0;
+}
+
+__initcall(sel_netif_init);
+
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
new file mode 100644
index 000000000..1321f1579
--- /dev/null
+++ b/security/selinux/netlabel.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SELinux NetLabel Support
+ *
+ * This file provides the necessary glue to tie NetLabel into the SELinux
+ * subsystem.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007, 2008
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/gfp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/sock.h>
+#include <net/netlabel.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "objsec.h"
+#include "security.h"
+#include "netlabel.h"
+
+/**
+ * selinux_netlbl_sidlookup_cached - Cache a SID lookup
+ * @skb: the packet
+ * @family: the packet's address family
+ * @secattr: the NetLabel security attributes
+ * @sid: the SID
+ *
+ * Description:
+ * Query the SELinux security server to lookup the correct SID for the given
+ * security attributes. If the query is successful, cache the result to speed
+ * up future lookups. Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
+ u16 family,
+ struct netlbl_lsm_secattr *secattr,
+ u32 *sid)
+{
+ int rc;
+
+ rc = security_netlbl_secattr_to_sid(&selinux_state, secattr, sid);
+ if (rc == 0 &&
+ (secattr->flags & NETLBL_SECATTR_CACHEABLE) &&
+ (secattr->flags & NETLBL_SECATTR_CACHE))
+ netlbl_cache_add(skb, family, secattr);
+
+ return rc;
+}
+
+/**
+ * selinux_netlbl_sock_genattr - Generate the NetLabel socket secattr
+ * @sk: the socket
+ *
+ * Description:
+ * Generate the NetLabel security attributes for a socket, making full use of
+ * the socket's attribute cache. Returns a pointer to the security attributes
+ * on success, NULL on failure.
+ *
+ */
+static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
+{
+ int rc;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr *secattr;
+
+ if (sksec->nlbl_secattr != NULL)
+ return sksec->nlbl_secattr;
+
+ secattr = netlbl_secattr_alloc(GFP_ATOMIC);
+ if (secattr == NULL)
+ return NULL;
+ rc = security_netlbl_sid_to_secattr(&selinux_state, sksec->sid,
+ secattr);
+ if (rc != 0) {
+ netlbl_secattr_free(secattr);
+ return NULL;
+ }
+ sksec->nlbl_secattr = secattr;
+
+ return secattr;
+}
+
+/**
+ * selinux_netlbl_sock_getattr - Get the cached NetLabel secattr
+ * @sk: the socket
+ * @sid: the SID
+ *
+ * Query the socket's cached secattr and if the SID matches the cached value
+ * return the cache, otherwise return NULL.
+ *
+ */
+static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr(
+ const struct sock *sk,
+ u32 sid)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
+
+ if (secattr == NULL)
+ return NULL;
+
+ if ((secattr->flags & NETLBL_SECATTR_SECID) &&
+ (secattr->attr.secid == sid))
+ return secattr;
+
+ return NULL;
+}
+
+/**
+ * selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache
+ *
+ * Description:
+ * Invalidate the NetLabel security attribute mapping cache.
+ *
+ */
+void selinux_netlbl_cache_invalidate(void)
+{
+ netlbl_cache_invalidate();
+}
+
+/**
+ * selinux_netlbl_err - Handle a NetLabel packet error
+ * @skb: the packet
+ * @family: the packet's address family
+ * @error: the error code
+ * @gateway: true if host is acting as a gateway, false otherwise
+ *
+ * Description:
+ * When a packet is dropped due to a call to avc_has_perm() pass the error
+ * code to the NetLabel subsystem so any protocol specific processing can be
+ * done. This is safe to call even if you are unsure if NetLabel labeling is
+ * present on the packet, NetLabel is smart enough to only act when it should.
+ *
+ */
+void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error, int gateway)
+{
+ netlbl_skbuff_err(skb, family, error, gateway);
+}
+
+/**
+ * selinux_netlbl_sk_security_free - Free the NetLabel fields
+ * @sksec: the sk_security_struct
+ *
+ * Description:
+ * Free all of the memory in the NetLabel fields of a sk_security_struct.
+ *
+ */
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec)
+{
+ if (sksec->nlbl_secattr != NULL)
+ netlbl_secattr_free(sksec->nlbl_secattr);
+}
+
+/**
+ * selinux_netlbl_sk_security_reset - Reset the NetLabel fields
+ * @sksec: the sk_security_struct
+ *
+ * Description:
+ * Called when the NetLabel state of a sk_security_struct needs to be reset.
+ * The caller is responsible for all the NetLabel sk_security_struct locking.
+ *
+ */
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec)
+{
+ sksec->nlbl_state = NLBL_UNSET;
+}
+
+/**
+ * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel
+ * @skb: the packet
+ * @family: protocol family
+ * @type: NetLabel labeling protocol type
+ * @sid: the SID
+ *
+ * Description:
+ * Call the NetLabel mechanism to get the security attributes of the given
+ * packet and use those attributes to determine the correct context/SID to
+ * assign to the packet. Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+ u16 family,
+ u32 *type,
+ u32 *sid)
+{
+ int rc;
+ struct netlbl_lsm_secattr secattr;
+
+ if (!netlbl_enabled()) {
+ *sid = SECSID_NULL;
+ return 0;
+ }
+
+ netlbl_secattr_init(&secattr);
+ rc = netlbl_skbuff_getattr(skb, family, &secattr);
+ if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
+ rc = selinux_netlbl_sidlookup_cached(skb, family,
+ &secattr, sid);
+ else
+ *sid = SECSID_NULL;
+ *type = secattr.type;
+ netlbl_secattr_destroy(&secattr);
+
+ return rc;
+}
+
+/**
+ * selinux_netlbl_skbuff_setsid - Set the NetLabel on a packet given a sid
+ * @skb: the packet
+ * @family: protocol family
+ * @sid: the SID
+ *
+ * Description
+ * Call the NetLabel mechanism to set the label of a packet using @sid.
+ * Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+ u16 family,
+ u32 sid)
+{
+ int rc;
+ struct netlbl_lsm_secattr secattr_storage;
+ struct netlbl_lsm_secattr *secattr = NULL;
+ struct sock *sk;
+
+ /* if this is a locally generated packet check to see if it is already
+ * being labeled by it's parent socket, if it is just exit */
+ sk = skb_to_full_sk(skb);
+ if (sk != NULL) {
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ if (sksec->nlbl_state != NLBL_REQSKB)
+ return 0;
+ secattr = selinux_netlbl_sock_getattr(sk, sid);
+ }
+ if (secattr == NULL) {
+ secattr = &secattr_storage;
+ netlbl_secattr_init(secattr);
+ rc = security_netlbl_sid_to_secattr(&selinux_state, sid,
+ secattr);
+ if (rc != 0)
+ goto skbuff_setsid_return;
+ }
+
+ rc = netlbl_skbuff_setattr(skb, family, secattr);
+
+skbuff_setsid_return:
+ if (secattr == &secattr_storage)
+ netlbl_secattr_destroy(secattr);
+ return rc;
+}
+
+/**
+ * selinux_netlbl_sctp_assoc_request - Label an incoming sctp association.
+ * @asoc: incoming association.
+ * @skb: the packet.
+ *
+ * Description:
+ * A new incoming connection is represented by @asoc, ......
+ * Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ int rc;
+ struct netlbl_lsm_secattr secattr;
+ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+
+ if (asoc->base.sk->sk_family != PF_INET &&
+ asoc->base.sk->sk_family != PF_INET6)
+ return 0;
+
+ netlbl_secattr_init(&secattr);
+ rc = security_netlbl_sid_to_secattr(&selinux_state,
+ asoc->secid, &secattr);
+ if (rc != 0)
+ goto assoc_request_return;
+
+ /* Move skb hdr address info to a struct sockaddr and then call
+ * netlbl_conn_setattr().
+ */
+ if (ip_hdr(skb)->version == 4) {
+ addr4.sin_family = AF_INET;
+ addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ rc = netlbl_conn_setattr(asoc->base.sk, (void *)&addr4, &secattr);
+ } else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_addr = ipv6_hdr(skb)->saddr;
+ rc = netlbl_conn_setattr(asoc->base.sk, (void *)&addr6, &secattr);
+ } else {
+ rc = -EAFNOSUPPORT;
+ }
+
+ if (rc == 0)
+ sksec->nlbl_state = NLBL_LABELED;
+
+assoc_request_return:
+ netlbl_secattr_destroy(&secattr);
+ return rc;
+}
+
+/**
+ * selinux_netlbl_inet_conn_request - Label an incoming stream connection
+ * @req: incoming connection request socket
+ * @family: the request socket's address family
+ *
+ * Description:
+ * A new incoming connection request is represented by @req, we need to label
+ * the new request_sock here and the stack will ensure the on-the-wire label
+ * will get preserved when a full sock is created once the connection handshake
+ * is complete. Returns zero on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
+{
+ int rc;
+ struct netlbl_lsm_secattr secattr;
+
+ if (family != PF_INET && family != PF_INET6)
+ return 0;
+
+ netlbl_secattr_init(&secattr);
+ rc = security_netlbl_sid_to_secattr(&selinux_state, req->secid,
+ &secattr);
+ if (rc != 0)
+ goto inet_conn_request_return;
+ rc = netlbl_req_setattr(req, &secattr);
+inet_conn_request_return:
+ netlbl_secattr_destroy(&secattr);
+ return rc;
+}
+
+/**
+ * selinux_netlbl_inet_csk_clone - Initialize the newly created sock
+ * @sk: the new sock
+ * @family: the sock's address family
+ *
+ * Description:
+ * A new connection has been established using @sk, we've already labeled the
+ * socket via the request_sock struct in selinux_netlbl_inet_conn_request() but
+ * we need to set the NetLabel state here since we now have a sock structure.
+ *
+ */
+void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ if (family == PF_INET)
+ sksec->nlbl_state = NLBL_LABELED;
+ else
+ sksec->nlbl_state = NLBL_UNSET;
+}
+
+/**
+ * selinux_netlbl_sctp_sk_clone - Copy state to the newly created sock
+ * @sk: current sock
+ * @newsk: the new sock
+ *
+ * Description:
+ * Called whenever a new socket is created by accept(2) or sctp_peeloff(3).
+ */
+void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct sk_security_struct *newsksec = newsk->sk_security;
+
+ newsksec->nlbl_state = sksec->nlbl_state;
+}
+
+/**
+ * selinux_netlbl_socket_post_create - Label a socket using NetLabel
+ * @sk: the sock to label
+ * @family: protocol family
+ *
+ * Description:
+ * Attempt to label a socket using the NetLabel mechanism using the given
+ * SID. Returns zero values on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
+{
+ int rc;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr *secattr;
+
+ if (family != PF_INET && family != PF_INET6)
+ return 0;
+
+ secattr = selinux_netlbl_sock_genattr(sk);
+ if (secattr == NULL)
+ return -ENOMEM;
+ rc = netlbl_sock_setattr(sk, family, secattr);
+ switch (rc) {
+ case 0:
+ sksec->nlbl_state = NLBL_LABELED;
+ break;
+ case -EDESTADDRREQ:
+ sksec->nlbl_state = NLBL_REQSKB;
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * selinux_netlbl_sock_rcv_skb - Do an inbound access check using NetLabel
+ * @sksec: the sock's sk_security_struct
+ * @skb: the packet
+ * @family: protocol family
+ * @ad: the audit data
+ *
+ * Description:
+ * Fetch the NetLabel security attributes from @skb and perform an access check
+ * against the receiving socket. Returns zero on success, negative values on
+ * error.
+ *
+ */
+int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ struct sk_buff *skb,
+ u16 family,
+ struct common_audit_data *ad)
+{
+ int rc;
+ u32 nlbl_sid;
+ u32 perm;
+ struct netlbl_lsm_secattr secattr;
+
+ if (!netlbl_enabled())
+ return 0;
+
+ netlbl_secattr_init(&secattr);
+ rc = netlbl_skbuff_getattr(skb, family, &secattr);
+ if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
+ rc = selinux_netlbl_sidlookup_cached(skb, family,
+ &secattr, &nlbl_sid);
+ else
+ nlbl_sid = SECINITSID_UNLABELED;
+ netlbl_secattr_destroy(&secattr);
+ if (rc != 0)
+ return rc;
+
+ switch (sksec->sclass) {
+ case SECCLASS_UDP_SOCKET:
+ perm = UDP_SOCKET__RECVFROM;
+ break;
+ case SECCLASS_TCP_SOCKET:
+ perm = TCP_SOCKET__RECVFROM;
+ break;
+ default:
+ perm = RAWIP_SOCKET__RECVFROM;
+ }
+
+ rc = avc_has_perm(&selinux_state,
+ sksec->sid, nlbl_sid, sksec->sclass, perm, ad);
+ if (rc == 0)
+ return 0;
+
+ if (nlbl_sid != SECINITSID_UNLABELED)
+ netlbl_skbuff_err(skb, family, rc, 0);
+ return rc;
+}
+
+/**
+ * selinux_netlbl_option - Is this a NetLabel option
+ * @level: the socket level or protocol
+ * @optname: the socket option name
+ *
+ * Description:
+ * Returns true if @level and @optname refer to a NetLabel option.
+ * Helper for selinux_netlbl_socket_setsockopt().
+ */
+static inline int selinux_netlbl_option(int level, int optname)
+{
+ return (level == IPPROTO_IP && optname == IP_OPTIONS) ||
+ (level == IPPROTO_IPV6 && optname == IPV6_HOPOPTS);
+}
+
+/**
+ * selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel
+ * @sock: the socket
+ * @level: the socket level or protocol
+ * @optname: the socket option name
+ *
+ * Description:
+ * Check the setsockopt() call and if the user is trying to replace the IP
+ * options on a socket and a NetLabel is in place for the socket deny the
+ * access; otherwise allow the access. Returns zero when the access is
+ * allowed, -EACCES when denied, and other negative values on error.
+ *
+ */
+int selinux_netlbl_socket_setsockopt(struct socket *sock,
+ int level,
+ int optname)
+{
+ int rc = 0;
+ struct sock *sk = sock->sk;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr secattr;
+
+ if (selinux_netlbl_option(level, optname) &&
+ (sksec->nlbl_state == NLBL_LABELED ||
+ sksec->nlbl_state == NLBL_CONNLABELED)) {
+ netlbl_secattr_init(&secattr);
+ lock_sock(sk);
+ /* call the netlabel function directly as we want to see the
+ * on-the-wire label that is assigned via the socket's options
+ * and not the cached netlabel/lsm attributes */
+ rc = netlbl_sock_getattr(sk, &secattr);
+ release_sock(sk);
+ if (rc == 0)
+ rc = -EACCES;
+ else if (rc == -ENOMSG)
+ rc = 0;
+ netlbl_secattr_destroy(&secattr);
+ }
+
+ return rc;
+}
+
+/**
+ * selinux_netlbl_socket_connect_helper - Help label a client-side socket on
+ * connect
+ * @sk: the socket to label
+ * @addr: the destination address
+ *
+ * Description:
+ * Attempt to label a connected socket with NetLabel using the given address.
+ * Returns zero values on success, negative values on failure.
+ *
+ */
+static int selinux_netlbl_socket_connect_helper(struct sock *sk,
+ struct sockaddr *addr)
+{
+ int rc;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr *secattr;
+
+ /* connected sockets are allowed to disconnect when the address family
+ * is set to AF_UNSPEC, if that is what is happening we want to reset
+ * the socket */
+ if (addr->sa_family == AF_UNSPEC) {
+ netlbl_sock_delattr(sk);
+ sksec->nlbl_state = NLBL_REQSKB;
+ rc = 0;
+ return rc;
+ }
+ secattr = selinux_netlbl_sock_genattr(sk);
+ if (secattr == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ rc = netlbl_conn_setattr(sk, addr, secattr);
+ if (rc == 0)
+ sksec->nlbl_state = NLBL_CONNLABELED;
+
+ return rc;
+}
+
+/**
+ * selinux_netlbl_socket_connect_locked - Label a client-side socket on
+ * connect
+ * @sk: the socket to label
+ * @addr: the destination address
+ *
+ * Description:
+ * Attempt to label a connected socket that already has the socket locked
+ * with NetLabel using the given address.
+ * Returns zero values on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_socket_connect_locked(struct sock *sk,
+ struct sockaddr *addr)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ if (sksec->nlbl_state != NLBL_REQSKB &&
+ sksec->nlbl_state != NLBL_CONNLABELED)
+ return 0;
+
+ return selinux_netlbl_socket_connect_helper(sk, addr);
+}
+
+/**
+ * selinux_netlbl_socket_connect - Label a client-side socket on connect
+ * @sk: the socket to label
+ * @addr: the destination address
+ *
+ * Description:
+ * Attempt to label a connected socket with NetLabel using the given address.
+ * Returns zero values on success, negative values on failure.
+ *
+ */
+int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+{
+ int rc;
+
+ lock_sock(sk);
+ rc = selinux_netlbl_socket_connect_locked(sk, addr);
+ release_sock(sk);
+
+ return rc;
+}
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
new file mode 100644
index 000000000..1760aee71
--- /dev/null
+++ b/security/selinux/netlink.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Netlink event notifications for SELinux.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
+#include <linux/selinux_netlink.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+
+#include "security.h"
+
+static struct sock *selnl __ro_after_init;
+
+static int selnl_msglen(int msgtype)
+{
+ int ret = 0;
+
+ switch (msgtype) {
+ case SELNL_MSG_SETENFORCE:
+ ret = sizeof(struct selnl_msg_setenforce);
+ break;
+
+ case SELNL_MSG_POLICYLOAD:
+ ret = sizeof(struct selnl_msg_policyload);
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *data)
+{
+ switch (msgtype) {
+ case SELNL_MSG_SETENFORCE: {
+ struct selnl_msg_setenforce *msg = nlmsg_data(nlh);
+
+ memset(msg, 0, len);
+ msg->val = *((int *)data);
+ break;
+ }
+
+ case SELNL_MSG_POLICYLOAD: {
+ struct selnl_msg_policyload *msg = nlmsg_data(nlh);
+
+ memset(msg, 0, len);
+ msg->seqno = *((u32 *)data);
+ break;
+ }
+
+ default:
+ BUG();
+ }
+}
+
+static void selnl_notify(int msgtype, void *data)
+{
+ int len;
+ sk_buff_data_t tmp;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+
+ len = selnl_msglen(msgtype);
+
+ skb = nlmsg_new(len, GFP_USER);
+ if (!skb)
+ goto oom;
+
+ tmp = skb->tail;
+ nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
+ if (!nlh)
+ goto out_kfree_skb;
+ selnl_add_payload(nlh, len, msgtype, data);
+ nlh->nlmsg_len = skb->tail - tmp;
+ NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
+ netlink_broadcast(selnl, skb, 0, SELNLGRP_AVC, GFP_USER);
+out:
+ return;
+
+out_kfree_skb:
+ kfree_skb(skb);
+oom:
+ pr_err("SELinux: OOM in %s\n", __func__);
+ goto out;
+}
+
+void selnl_notify_setenforce(int val)
+{
+ selnl_notify(SELNL_MSG_SETENFORCE, &val);
+}
+
+void selnl_notify_policyload(u32 seqno)
+{
+ selnl_notify(SELNL_MSG_POLICYLOAD, &seqno);
+}
+
+static int __init selnl_init(void)
+{
+ struct netlink_kernel_cfg cfg = {
+ .groups = SELNLGRP_MAX,
+ .flags = NL_CFG_F_NONROOT_RECV,
+ };
+
+ selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
+ if (selnl == NULL)
+ panic("SELinux: Cannot create netlink socket.");
+ return 0;
+}
+
+__initcall(selnl_init);
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
new file mode 100644
index 000000000..0ac7df9a9
--- /dev/null
+++ b/security/selinux/netnode.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Network node table
+ *
+ * SELinux must keep a mapping of network nodes to labels/SIDs. This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead since most of these queries happen on
+ * a per-packet basis.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ * This code is heavily based on the "netif" concept originally developed by
+ * James Morris <jmorris@redhat.com>
+ * (see security/selinux/netif.c for more information)
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "netnode.h"
+#include "objsec.h"
+
+#define SEL_NETNODE_HASH_SIZE 256
+#define SEL_NETNODE_HASH_BKT_LIMIT 16
+
+struct sel_netnode_bkt {
+ unsigned int size;
+ struct list_head list;
+};
+
+struct sel_netnode {
+ struct netnode_security_struct nsec;
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
+ * for this is that I suspect most users will not make heavy use of both
+ * address families at the same time so one table will usually end up wasted,
+ * if this becomes a problem we can always add a hash table for each address
+ * family later */
+
+static DEFINE_SPINLOCK(sel_netnode_lock);
+static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
+
+/**
+ * sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
+ * @addr: IPv4 address
+ *
+ * Description:
+ * This is the IPv4 hashing function for the node interface table, it returns
+ * the bucket number for the given IP address.
+ *
+ */
+static unsigned int sel_netnode_hashfn_ipv4(__be32 addr)
+{
+ /* at some point we should determine if the mismatch in byte order
+ * affects the hash function dramatically */
+ return (addr & (SEL_NETNODE_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netnode_hashfn_ipv6 - IPv6 hashing function for the node table
+ * @addr: IPv6 address
+ *
+ * Description:
+ * This is the IPv6 hashing function for the node interface table, it returns
+ * the bucket number for the given IP address.
+ *
+ */
+static unsigned int sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
+{
+ /* just hash the least significant 32 bits to keep things fast (they
+ * are the most likely to be different anyway), we can revisit this
+ * later if needed */
+ return (addr->s6_addr32[3] & (SEL_NETNODE_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netnode_find - Search for a node record
+ * @addr: IP address
+ * @family: address family
+ *
+ * Description:
+ * Search the network node table and return the record matching @addr. If an
+ * entry can not be found in the table return NULL.
+ *
+ */
+static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
+{
+ unsigned int idx;
+ struct sel_netnode *node;
+
+ switch (family) {
+ case PF_INET:
+ idx = sel_netnode_hashfn_ipv4(*(const __be32 *)addr);
+ break;
+ case PF_INET6:
+ idx = sel_netnode_hashfn_ipv6(addr);
+ break;
+ default:
+ BUG();
+ return NULL;
+ }
+
+ list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
+ if (node->nsec.family == family)
+ switch (family) {
+ case PF_INET:
+ if (node->nsec.addr.ipv4 == *(const __be32 *)addr)
+ return node;
+ break;
+ case PF_INET6:
+ if (ipv6_addr_equal(&node->nsec.addr.ipv6,
+ addr))
+ return node;
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * sel_netnode_insert - Insert a new node into the table
+ * @node: the new node record
+ *
+ * Description:
+ * Add a new node record to the network address hash table.
+ *
+ */
+static void sel_netnode_insert(struct sel_netnode *node)
+{
+ unsigned int idx;
+
+ switch (node->nsec.family) {
+ case PF_INET:
+ idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
+ break;
+ case PF_INET6:
+ idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ /* we need to impose a limit on the growth of the hash table so check
+ * this bucket to make sure it is within the specified bounds */
+ list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
+ if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
+ struct sel_netnode *tail;
+ tail = list_entry(
+ rcu_dereference_protected(
+ list_tail_rcu(&sel_netnode_hash[idx].list),
+ lockdep_is_held(&sel_netnode_lock)),
+ struct sel_netnode, list);
+ list_del_rcu(&tail->list);
+ kfree_rcu(tail, rcu);
+ } else
+ sel_netnode_hash[idx].size++;
+}
+
+/**
+ * sel_netnode_sid_slow - Lookup the SID of a network address using the policy
+ * @addr: the IP address
+ * @family: the address family
+ * @sid: node SID
+ *
+ * Description:
+ * This function determines the SID of a network address by querying the
+ * security policy. The result is added to the network address table to
+ * speedup future queries. Returns zero on success, negative values on
+ * failure.
+ *
+ */
+static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
+{
+ int ret;
+ struct sel_netnode *node;
+ struct sel_netnode *new;
+
+ spin_lock_bh(&sel_netnode_lock);
+ node = sel_netnode_find(addr, family);
+ if (node != NULL) {
+ *sid = node->nsec.sid;
+ spin_unlock_bh(&sel_netnode_lock);
+ return 0;
+ }
+
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ switch (family) {
+ case PF_INET:
+ ret = security_node_sid(&selinux_state, PF_INET,
+ addr, sizeof(struct in_addr), sid);
+ if (new)
+ new->nsec.addr.ipv4 = *(__be32 *)addr;
+ break;
+ case PF_INET6:
+ ret = security_node_sid(&selinux_state, PF_INET6,
+ addr, sizeof(struct in6_addr), sid);
+ if (new)
+ new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
+ break;
+ default:
+ BUG();
+ ret = -EINVAL;
+ }
+ if (ret == 0 && new) {
+ new->nsec.family = family;
+ new->nsec.sid = *sid;
+ sel_netnode_insert(new);
+ } else
+ kfree(new);
+
+ spin_unlock_bh(&sel_netnode_lock);
+ if (unlikely(ret))
+ pr_warn("SELinux: failure in %s(), unable to determine network node label\n",
+ __func__);
+ return ret;
+}
+
+/**
+ * sel_netnode_sid - Lookup the SID of a network address
+ * @addr: the IP address
+ * @family: the address family
+ * @sid: node SID
+ *
+ * Description:
+ * This function determines the SID of a network address using the fastest
+ * method possible. First the address table is queried, but if an entry
+ * can't be found then the policy is queried and the result is added to the
+ * table to speedup future queries. Returns zero on success, negative values
+ * on failure.
+ *
+ */
+int sel_netnode_sid(void *addr, u16 family, u32 *sid)
+{
+ struct sel_netnode *node;
+
+ rcu_read_lock();
+ node = sel_netnode_find(addr, family);
+ if (node != NULL) {
+ *sid = node->nsec.sid;
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ return sel_netnode_sid_slow(addr, family, sid);
+}
+
+/**
+ * sel_netnode_flush - Flush the entire network address table
+ *
+ * Description:
+ * Remove all entries from the network address table.
+ *
+ */
+void sel_netnode_flush(void)
+{
+ unsigned int idx;
+ struct sel_netnode *node, *node_tmp;
+
+ spin_lock_bh(&sel_netnode_lock);
+ for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) {
+ list_for_each_entry_safe(node, node_tmp,
+ &sel_netnode_hash[idx].list, list) {
+ list_del_rcu(&node->list);
+ kfree_rcu(node, rcu);
+ }
+ sel_netnode_hash[idx].size = 0;
+ }
+ spin_unlock_bh(&sel_netnode_lock);
+}
+
+static __init int sel_netnode_init(void)
+{
+ int iter;
+
+ if (!selinux_enabled_boot)
+ return 0;
+
+ for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
+ INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
+ sel_netnode_hash[iter].size = 0;
+ }
+
+ return 0;
+}
+
+__initcall(sel_netnode_init);
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
new file mode 100644
index 000000000..8eec6347c
--- /dev/null
+++ b/security/selinux/netport.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Network port table
+ *
+ * SELinux must keep a mapping of network ports to labels/SIDs. This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * Author: Paul Moore <paul@paul-moore.com>
+ *
+ * This code is heavily based on the "netif" concept originally developed by
+ * James Morris <jmorris@redhat.com>
+ * (see security/selinux/netif.c for more information)
+ */
+
+/*
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2008
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "netport.h"
+#include "objsec.h"
+
+#define SEL_NETPORT_HASH_SIZE 256
+#define SEL_NETPORT_HASH_BKT_LIMIT 16
+
+struct sel_netport_bkt {
+ int size;
+ struct list_head list;
+};
+
+struct sel_netport {
+ struct netport_security_struct psec;
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
+ * for this is that I suspect most users will not make heavy use of both
+ * address families at the same time so one table will usually end up wasted,
+ * if this becomes a problem we can always add a hash table for each address
+ * family later */
+
+static DEFINE_SPINLOCK(sel_netport_lock);
+static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE];
+
+/**
+ * sel_netport_hashfn - Hashing function for the port table
+ * @pnum: port number
+ *
+ * Description:
+ * This is the hashing function for the port table, it returns the bucket
+ * number for the given port.
+ *
+ */
+static unsigned int sel_netport_hashfn(u16 pnum)
+{
+ return (pnum & (SEL_NETPORT_HASH_SIZE - 1));
+}
+
+/**
+ * sel_netport_find - Search for a port record
+ * @protocol: protocol
+ * @pnum: port
+ *
+ * Description:
+ * Search the network port table and return the matching record. If an entry
+ * can not be found in the table return NULL.
+ *
+ */
+static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum)
+{
+ unsigned int idx;
+ struct sel_netport *port;
+
+ idx = sel_netport_hashfn(pnum);
+ list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list)
+ if (port->psec.port == pnum && port->psec.protocol == protocol)
+ return port;
+
+ return NULL;
+}
+
+/**
+ * sel_netport_insert - Insert a new port into the table
+ * @port: the new port record
+ *
+ * Description:
+ * Add a new port record to the network address hash table.
+ *
+ */
+static void sel_netport_insert(struct sel_netport *port)
+{
+ unsigned int idx;
+
+ /* we need to impose a limit on the growth of the hash table so check
+ * this bucket to make sure it is within the specified bounds */
+ idx = sel_netport_hashfn(port->psec.port);
+ list_add_rcu(&port->list, &sel_netport_hash[idx].list);
+ if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
+ struct sel_netport *tail;
+ tail = list_entry(
+ rcu_dereference_protected(
+ list_tail_rcu(&sel_netport_hash[idx].list),
+ lockdep_is_held(&sel_netport_lock)),
+ struct sel_netport, list);
+ list_del_rcu(&tail->list);
+ kfree_rcu(tail, rcu);
+ } else
+ sel_netport_hash[idx].size++;
+}
+
+/**
+ * sel_netport_sid_slow - Lookup the SID of a network address using the policy
+ * @protocol: protocol
+ * @pnum: port
+ * @sid: port SID
+ *
+ * Description:
+ * This function determines the SID of a network port by querying the security
+ * policy. The result is added to the network port table to speedup future
+ * queries. Returns zero on success, negative values on failure.
+ *
+ */
+static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
+{
+ int ret;
+ struct sel_netport *port;
+ struct sel_netport *new;
+
+ spin_lock_bh(&sel_netport_lock);
+ port = sel_netport_find(protocol, pnum);
+ if (port != NULL) {
+ *sid = port->psec.sid;
+ spin_unlock_bh(&sel_netport_lock);
+ return 0;
+ }
+
+ ret = security_port_sid(&selinux_state, protocol, pnum, sid);
+ if (ret != 0)
+ goto out;
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (new) {
+ new->psec.port = pnum;
+ new->psec.protocol = protocol;
+ new->psec.sid = *sid;
+ sel_netport_insert(new);
+ }
+
+out:
+ spin_unlock_bh(&sel_netport_lock);
+ if (unlikely(ret))
+ pr_warn("SELinux: failure in %s(), unable to determine network port label\n",
+ __func__);
+ return ret;
+}
+
+/**
+ * sel_netport_sid - Lookup the SID of a network port
+ * @protocol: protocol
+ * @pnum: port
+ * @sid: port SID
+ *
+ * Description:
+ * This function determines the SID of a network port using the fastest method
+ * possible. First the port table is queried, but if an entry can't be found
+ * then the policy is queried and the result is added to the table to speedup
+ * future queries. Returns zero on success, negative values on failure.
+ *
+ */
+int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
+{
+ struct sel_netport *port;
+
+ rcu_read_lock();
+ port = sel_netport_find(protocol, pnum);
+ if (port != NULL) {
+ *sid = port->psec.sid;
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ return sel_netport_sid_slow(protocol, pnum, sid);
+}
+
+/**
+ * sel_netport_flush - Flush the entire network port table
+ *
+ * Description:
+ * Remove all entries from the network address table.
+ *
+ */
+void sel_netport_flush(void)
+{
+ unsigned int idx;
+ struct sel_netport *port, *port_tmp;
+
+ spin_lock_bh(&sel_netport_lock);
+ for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) {
+ list_for_each_entry_safe(port, port_tmp,
+ &sel_netport_hash[idx].list, list) {
+ list_del_rcu(&port->list);
+ kfree_rcu(port, rcu);
+ }
+ sel_netport_hash[idx].size = 0;
+ }
+ spin_unlock_bh(&sel_netport_lock);
+}
+
+static __init int sel_netport_init(void)
+{
+ int iter;
+
+ if (!selinux_enabled_boot)
+ return 0;
+
+ for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
+ INIT_LIST_HEAD(&sel_netport_hash[iter].list);
+ sel_netport_hash[iter].size = 0;
+ }
+
+ return 0;
+}
+
+__initcall(sel_netport_init);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
new file mode 100644
index 000000000..2ee7b4ed4
--- /dev/null
+++ b/security/selinux/nlmsgtab.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Netlink message type permission tables, for user generated messages.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if.h>
+#include <linux/inet_diag.h>
+#include <linux/xfrm.h>
+#include <linux/audit.h>
+#include <linux/sock_diag.h>
+
+#include "flask.h"
+#include "av_permissions.h"
+#include "security.h"
+
+struct nlmsg_perm {
+ u16 nlmsg_type;
+ u32 perm;
+};
+
+static const struct nlmsg_perm nlmsg_route_perms[] = {
+ { RTM_NEWLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETLINK, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWADDR, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELADDR, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETADDR, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWROUTE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELROUTE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETROUTE, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWRULE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELRULE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETRULE, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWQDISC, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELQDISC, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETQDISC, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWACTION, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELACTION, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETACTION, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWPREFIX, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETMULTICAST, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETANYCAST, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNSID, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWLINKPROP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELLINKPROP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWVLAN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELVLAN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETVLAN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWTUNNEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELTUNNEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETTUNNEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+};
+
+static const struct nlmsg_perm nlmsg_tcpdiag_perms[] = {
+ { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
+};
+
+static const struct nlmsg_perm nlmsg_xfrm_perms[] = {
+ { XFRM_MSG_NEWSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_DELSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETSA, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_NEWPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_DELPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETPOLICY, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_ALLOCSPI, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_ACQUIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_EXPIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_UPDPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_UPDSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_POLEXPIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_FLUSHSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_NEWAE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETAE, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_REPORT, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_MIGRATE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_NEWSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_GETSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_NEWSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_MAPPING, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_SETDEFAULT, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETDEFAULT, NETLINK_XFRM_SOCKET__NLMSG_READ },
+};
+
+static const struct nlmsg_perm nlmsg_audit_perms[] = {
+ { AUDIT_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_SET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_LIST, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV },
+ { AUDIT_ADD, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_DEL, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_LIST_RULES, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV },
+ { AUDIT_ADD_RULE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_DEL_RULE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_USER, NETLINK_AUDIT_SOCKET__NLMSG_RELAY },
+ { AUDIT_SIGNAL_INFO, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_TRIM, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+ { AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
+ { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
+};
+
+
+static int nlmsg_perm(u16 nlmsg_type, u32 *perm, const struct nlmsg_perm *tab, size_t tabsize)
+{
+ int i, err = -EINVAL;
+
+ for (i = 0; i < tabsize/sizeof(struct nlmsg_perm); i++)
+ if (nlmsg_type == tab[i].nlmsg_type) {
+ *perm = tab[i].perm;
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
+{
+ int err = 0;
+
+ switch (sclass) {
+ case SECCLASS_NETLINK_ROUTE_SOCKET:
+ /* RTM_MAX always points to RTM_SETxxxx, ie RTM_NEWxxx + 3.
+ * If the BUILD_BUG_ON() below fails you must update the
+ * structures at the top of this file with the new mappings
+ * before updating the BUILD_BUG_ON() macro!
+ */
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWTUNNEL + 3));
+ err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
+ sizeof(nlmsg_route_perms));
+ break;
+
+ case SECCLASS_NETLINK_TCPDIAG_SOCKET:
+ err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms,
+ sizeof(nlmsg_tcpdiag_perms));
+ break;
+
+ case SECCLASS_NETLINK_XFRM_SOCKET:
+ /* If the BUILD_BUG_ON() below fails you must update the
+ * structures at the top of this file with the new mappings
+ * before updating the BUILD_BUG_ON() macro!
+ */
+ BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_GETDEFAULT);
+ err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
+ sizeof(nlmsg_xfrm_perms));
+ break;
+
+ case SECCLASS_NETLINK_AUDIT_SOCKET:
+ if ((nlmsg_type >= AUDIT_FIRST_USER_MSG &&
+ nlmsg_type <= AUDIT_LAST_USER_MSG) ||
+ (nlmsg_type >= AUDIT_FIRST_USER_MSG2 &&
+ nlmsg_type <= AUDIT_LAST_USER_MSG2)) {
+ *perm = NETLINK_AUDIT_SOCKET__NLMSG_RELAY;
+ } else {
+ err = nlmsg_perm(nlmsg_type, perm, nlmsg_audit_perms,
+ sizeof(nlmsg_audit_perms));
+ }
+ break;
+
+ /* No messaging from userspace, or class unknown/unhandled */
+ default:
+ err = -ENOENT;
+ break;
+ }
+
+ return err;
+}
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
new file mode 100644
index 000000000..a00d19139
--- /dev/null
+++ b/security/selinux/selinuxfs.c
@@ -0,0 +1,2261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Updated: Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Added conditional policy language extensions
+ *
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ * Added support for the policy capability bitmap
+ *
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/mount.h>
+#include <linux/mutex.h>
+#include <linux/namei.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/security.h>
+#include <linux/major.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/audit.h>
+#include <linux/uaccess.h>
+#include <linux/kobject.h>
+#include <linux/ctype.h>
+
+/* selinuxfs pseudo filesystem for exporting the security policy API.
+ Based on the proc code and the fs/nfsd/nfsctl.c code. */
+
+#include "flask.h"
+#include "avc.h"
+#include "avc_ss.h"
+#include "security.h"
+#include "objsec.h"
+#include "conditional.h"
+#include "ima.h"
+
+enum sel_inos {
+ SEL_ROOT_INO = 2,
+ SEL_LOAD, /* load policy */
+ SEL_ENFORCE, /* get or set enforcing status */
+ SEL_CONTEXT, /* validate context */
+ SEL_ACCESS, /* compute access decision */
+ SEL_CREATE, /* compute create labeling decision */
+ SEL_RELABEL, /* compute relabeling decision */
+ SEL_USER, /* compute reachable user contexts */
+ SEL_POLICYVERS, /* return policy version for this kernel */
+ SEL_COMMIT_BOOLS, /* commit new boolean values */
+ SEL_MLS, /* return if MLS policy is enabled */
+ SEL_DISABLE, /* disable SELinux until next reboot */
+ SEL_MEMBER, /* compute polyinstantiation membership decision */
+ SEL_CHECKREQPROT, /* check requested protection, not kernel-applied one */
+ SEL_COMPAT_NET, /* whether to use old compat network packet controls */
+ SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
+ SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
+ SEL_STATUS, /* export current status using mmap() */
+ SEL_POLICY, /* allow userspace to read the in kernel policy */
+ SEL_VALIDATE_TRANS, /* compute validatetrans decision */
+ SEL_INO_NEXT, /* The next inode number to use */
+};
+
+struct selinux_fs_info {
+ struct dentry *bool_dir;
+ unsigned int bool_num;
+ char **bool_pending_names;
+ int *bool_pending_values;
+ struct dentry *class_dir;
+ unsigned long last_class_ino;
+ bool policy_opened;
+ struct dentry *policycap_dir;
+ unsigned long last_ino;
+ struct selinux_state *state;
+ struct super_block *sb;
+};
+
+static int selinux_fs_info_create(struct super_block *sb)
+{
+ struct selinux_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(*fsi), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fsi->last_ino = SEL_INO_NEXT - 1;
+ fsi->state = &selinux_state;
+ fsi->sb = sb;
+ sb->s_fs_info = fsi;
+ return 0;
+}
+
+static void selinux_fs_info_free(struct super_block *sb)
+{
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ int i;
+
+ if (fsi) {
+ for (i = 0; i < fsi->bool_num; i++)
+ kfree(fsi->bool_pending_names[i]);
+ kfree(fsi->bool_pending_names);
+ kfree(fsi->bool_pending_values);
+ }
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+}
+
+#define SEL_INITCON_INO_OFFSET 0x01000000
+#define SEL_BOOL_INO_OFFSET 0x02000000
+#define SEL_CLASS_INO_OFFSET 0x04000000
+#define SEL_POLICYCAP_INO_OFFSET 0x08000000
+#define SEL_INO_MASK 0x00ffffff
+
+#define BOOL_DIR_NAME "booleans"
+#define CLASS_DIR_NAME "class"
+#define POLICYCAP_DIR_NAME "policy_capabilities"
+
+#define TMPBUFLEN 12
+static ssize_t sel_read_enforce(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+ enforcing_enabled(fsi->state));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *page = NULL;
+ ssize_t length;
+ int old_value, new_value;
+
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ length = -EINVAL;
+ if (sscanf(page, "%d", &new_value) != 1)
+ goto out;
+
+ new_value = !!new_value;
+
+ old_value = enforcing_enabled(state);
+ if (new_value != old_value) {
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__SETENFORCE,
+ NULL);
+ if (length)
+ goto out;
+ audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_STATUS,
+ "enforcing=%d old_enforcing=%d auid=%u ses=%u"
+ " enabled=1 old-enabled=1 lsm=selinux res=1",
+ new_value, old_value,
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
+ enforcing_set(state, new_value);
+ if (new_value)
+ avc_ss_reset(state->avc, 0);
+ selnl_notify_setenforce(new_value);
+ selinux_status_update_setenforce(state, new_value);
+ if (!new_value)
+ call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+
+ selinux_ima_measure_state(state);
+ }
+ length = count;
+out:
+ kfree(page);
+ return length;
+}
+#else
+#define sel_write_enforce NULL
+#endif
+
+static const struct file_operations sel_enforce_ops = {
+ .read = sel_read_enforce,
+ .write = sel_write_enforce,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+ ino_t ino = file_inode(filp)->i_ino;
+ int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
+ security_get_reject_unknown(state) :
+ !security_get_allow_unknown(state);
+
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d", handle_unknown);
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_handle_unknown_ops = {
+ .read = sel_read_handle_unknown,
+ .llseek = generic_file_llseek,
+};
+
+static int sel_open_handle_status(struct inode *inode, struct file *filp)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct page *status = selinux_kernel_status_page(fsi->state);
+
+ if (!status)
+ return -ENOMEM;
+
+ filp->private_data = status;
+
+ return 0;
+}
+
+static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct page *status = filp->private_data;
+
+ BUG_ON(!status);
+
+ return simple_read_from_buffer(buf, count, ppos,
+ page_address(status),
+ sizeof(struct selinux_kernel_status));
+}
+
+static int sel_mmap_handle_status(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct page *status = filp->private_data;
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ BUG_ON(!status);
+
+ /* only allows one page from the head */
+ if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
+ return -EIO;
+ /* disallow writable mapping */
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ /* disallow mprotect() turns it into writable */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ page_to_pfn(status),
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations sel_handle_status_ops = {
+ .open = sel_open_handle_status,
+ .read = sel_read_handle_status,
+ .mmap = sel_mmap_handle_status,
+ .llseek = generic_file_llseek,
+};
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static ssize_t sel_write_disable(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ char *page;
+ ssize_t length;
+ int new_value;
+ int enforcing;
+
+ /* NOTE: we are now officially considering runtime disable as
+ * deprecated, and using it will become increasingly painful
+ * (e.g. sleeping/blocking) as we progress through future
+ * kernel releases until eventually it is removed
+ */
+ pr_err("SELinux: Runtime disable is deprecated, use selinux=0 on the kernel cmdline.\n");
+ pr_err("SELinux: https://github.com/SELinuxProject/selinux-kernel/wiki/DEPRECATE-runtime-disable\n");
+ ssleep(5);
+
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ length = -EINVAL;
+ if (sscanf(page, "%d", &new_value) != 1)
+ goto out;
+
+ if (new_value) {
+ enforcing = enforcing_enabled(fsi->state);
+ length = selinux_disable(fsi->state);
+ if (length)
+ goto out;
+ audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_STATUS,
+ "enforcing=%d old_enforcing=%d auid=%u ses=%u"
+ " enabled=0 old-enabled=1 lsm=selinux res=1",
+ enforcing, enforcing,
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
+ }
+
+ length = count;
+out:
+ kfree(page);
+ return length;
+}
+#else
+#define sel_write_disable NULL
+#endif
+
+static const struct file_operations sel_disable_ops = {
+ .write = sel_write_disable,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_read_policyvers(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%u", POLICYDB_VERSION_MAX);
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_policyvers_ops = {
+ .read = sel_read_policyvers,
+ .llseek = generic_file_llseek,
+};
+
+/* declaration for sel_write_load */
+static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_dir,
+ unsigned int *bool_num, char ***bool_pending_names,
+ int **bool_pending_values);
+static int sel_make_classes(struct selinux_policy *newpolicy,
+ struct dentry *class_dir,
+ unsigned long *last_class_ino);
+
+/* declaration for sel_make_class_dirs */
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
+ unsigned long *ino);
+
+/* declaration for sel_make_policy_nodes */
+static struct dentry *sel_make_disconnected_dir(struct super_block *sb,
+ unsigned long *ino);
+
+/* declaration for sel_make_policy_nodes */
+static void sel_remove_entries(struct dentry *de);
+
+static ssize_t sel_read_mls(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+ security_mls_enabled(fsi->state));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_mls_ops = {
+ .read = sel_read_mls,
+ .llseek = generic_file_llseek,
+};
+
+struct policy_load_memory {
+ size_t len;
+ void *data;
+};
+
+static int sel_open_policy(struct inode *inode, struct file *filp)
+{
+ struct selinux_fs_info *fsi = inode->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ struct policy_load_memory *plm = NULL;
+ int rc;
+
+ BUG_ON(filp->private_data);
+
+ mutex_lock(&fsi->state->policy_mutex);
+
+ rc = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
+ if (rc)
+ goto err;
+
+ rc = -EBUSY;
+ if (fsi->policy_opened)
+ goto err;
+
+ rc = -ENOMEM;
+ plm = kzalloc(sizeof(*plm), GFP_KERNEL);
+ if (!plm)
+ goto err;
+
+ rc = security_read_policy(state, &plm->data, &plm->len);
+ if (rc)
+ goto err;
+
+ if ((size_t)i_size_read(inode) != plm->len) {
+ inode_lock(inode);
+ i_size_write(inode, plm->len);
+ inode_unlock(inode);
+ }
+
+ fsi->policy_opened = 1;
+
+ filp->private_data = plm;
+
+ mutex_unlock(&fsi->state->policy_mutex);
+
+ return 0;
+err:
+ mutex_unlock(&fsi->state->policy_mutex);
+
+ if (plm)
+ vfree(plm->data);
+ kfree(plm);
+ return rc;
+}
+
+static int sel_release_policy(struct inode *inode, struct file *filp)
+{
+ struct selinux_fs_info *fsi = inode->i_sb->s_fs_info;
+ struct policy_load_memory *plm = filp->private_data;
+
+ BUG_ON(!plm);
+
+ fsi->policy_opened = 0;
+
+ vfree(plm->data);
+ kfree(plm);
+
+ return 0;
+}
+
+static ssize_t sel_read_policy(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct policy_load_memory *plm = filp->private_data;
+ int ret;
+
+ ret = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
+ if (ret)
+ return ret;
+
+ return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+}
+
+static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
+{
+ struct policy_load_memory *plm = vmf->vma->vm_file->private_data;
+ unsigned long offset;
+ struct page *page;
+
+ if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+ if (offset >= roundup(plm->len, PAGE_SIZE))
+ return VM_FAULT_SIGBUS;
+
+ page = vmalloc_to_page(plm->data + offset);
+ get_page(page);
+
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct sel_mmap_policy_ops = {
+ .fault = sel_mmap_policy_fault,
+ .page_mkwrite = sel_mmap_policy_fault,
+};
+
+static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHARED) {
+ /* do not allow mprotect to make mapping writable */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EACCES;
+ }
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &sel_mmap_policy_ops;
+
+ return 0;
+}
+
+static const struct file_operations sel_policy_ops = {
+ .open = sel_open_policy,
+ .read = sel_read_policy,
+ .mmap = sel_mmap_policy,
+ .release = sel_release_policy,
+ .llseek = generic_file_llseek,
+};
+
+static void sel_remove_old_bool_data(unsigned int bool_num, char **bool_names,
+ int *bool_values)
+{
+ u32 i;
+
+ /* bool_dir cleanup */
+ for (i = 0; i < bool_num; i++)
+ kfree(bool_names[i]);
+ kfree(bool_names);
+ kfree(bool_values);
+}
+
+static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
+ struct selinux_policy *newpolicy)
+{
+ int ret = 0;
+ struct dentry *tmp_parent, *tmp_bool_dir, *tmp_class_dir, *old_dentry;
+ unsigned int tmp_bool_num, old_bool_num;
+ char **tmp_bool_names, **old_bool_names;
+ int *tmp_bool_values, *old_bool_values;
+ unsigned long tmp_ino = fsi->last_ino; /* Don't increment last_ino in this function */
+
+ tmp_parent = sel_make_disconnected_dir(fsi->sb, &tmp_ino);
+ if (IS_ERR(tmp_parent))
+ return PTR_ERR(tmp_parent);
+
+ tmp_ino = fsi->bool_dir->d_inode->i_ino - 1; /* sel_make_dir will increment and set */
+ tmp_bool_dir = sel_make_dir(tmp_parent, BOOL_DIR_NAME, &tmp_ino);
+ if (IS_ERR(tmp_bool_dir)) {
+ ret = PTR_ERR(tmp_bool_dir);
+ goto out;
+ }
+
+ tmp_ino = fsi->class_dir->d_inode->i_ino - 1; /* sel_make_dir will increment and set */
+ tmp_class_dir = sel_make_dir(tmp_parent, CLASS_DIR_NAME, &tmp_ino);
+ if (IS_ERR(tmp_class_dir)) {
+ ret = PTR_ERR(tmp_class_dir);
+ goto out;
+ }
+
+ ret = sel_make_bools(newpolicy, tmp_bool_dir, &tmp_bool_num,
+ &tmp_bool_names, &tmp_bool_values);
+ if (ret)
+ goto out;
+
+ ret = sel_make_classes(newpolicy, tmp_class_dir,
+ &fsi->last_class_ino);
+ if (ret)
+ goto out;
+
+ /* booleans */
+ old_dentry = fsi->bool_dir;
+ lock_rename(tmp_bool_dir, old_dentry);
+ d_exchange(tmp_bool_dir, fsi->bool_dir);
+
+ old_bool_num = fsi->bool_num;
+ old_bool_names = fsi->bool_pending_names;
+ old_bool_values = fsi->bool_pending_values;
+
+ fsi->bool_num = tmp_bool_num;
+ fsi->bool_pending_names = tmp_bool_names;
+ fsi->bool_pending_values = tmp_bool_values;
+
+ sel_remove_old_bool_data(old_bool_num, old_bool_names, old_bool_values);
+
+ fsi->bool_dir = tmp_bool_dir;
+ unlock_rename(tmp_bool_dir, old_dentry);
+
+ /* classes */
+ old_dentry = fsi->class_dir;
+ lock_rename(tmp_class_dir, old_dentry);
+ d_exchange(tmp_class_dir, fsi->class_dir);
+ fsi->class_dir = tmp_class_dir;
+ unlock_rename(tmp_class_dir, old_dentry);
+
+out:
+ /* Since the other temporary dirs are children of tmp_parent
+ * this will handle all the cleanup in the case of a failure before
+ * the swapover
+ */
+ sel_remove_entries(tmp_parent);
+ dput(tmp_parent); /* d_genocide() only handles the children */
+
+ return ret;
+}
+
+static ssize_t sel_write_load(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_load_state load_state;
+ ssize_t length;
+ void *data = NULL;
+
+ mutex_lock(&fsi->state->policy_mutex);
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__LOAD_POLICY, NULL);
+ if (length)
+ goto out;
+
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ length = -ENOMEM;
+ data = vmalloc(count);
+ if (!data)
+ goto out;
+
+ length = -EFAULT;
+ if (copy_from_user(data, buf, count) != 0)
+ goto out;
+
+ length = security_load_policy(fsi->state, data, count, &load_state);
+ if (length) {
+ pr_warn_ratelimited("SELinux: failed to load policy\n");
+ goto out;
+ }
+
+ length = sel_make_policy_nodes(fsi, load_state.policy);
+ if (length) {
+ pr_warn_ratelimited("SELinux: failed to initialize selinuxfs\n");
+ selinux_policy_cancel(fsi->state, &load_state);
+ goto out;
+ }
+
+ selinux_policy_commit(fsi->state, &load_state);
+
+ length = count;
+
+ audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
+ "auid=%u ses=%u lsm=selinux res=1",
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
+out:
+ mutex_unlock(&fsi->state->policy_mutex);
+ vfree(data);
+ return length;
+}
+
+static const struct file_operations sel_load_ops = {
+ .write = sel_write_load,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *canon = NULL;
+ u32 sid, len;
+ ssize_t length;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__CHECK_CONTEXT, NULL);
+ if (length)
+ goto out;
+
+ length = security_context_to_sid(state, buf, size, &sid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_sid_to_context(state, sid, &canon, &len);
+ if (length)
+ goto out;
+
+ length = -ERANGE;
+ if (len > SIMPLE_TRANSACTION_LIMIT) {
+ pr_err("SELinux: %s: context size (%u) exceeds "
+ "payload max\n", __func__, len);
+ goto out;
+ }
+
+ memcpy(buf, canon, len);
+ length = len;
+out:
+ kfree(canon);
+ return length;
+}
+
+static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
+ checkreqprot_get(fsi->state));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ char *page;
+ ssize_t length;
+ unsigned int new_value;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__SETCHECKREQPROT,
+ NULL);
+ if (length)
+ return length;
+
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ length = -EINVAL;
+ if (sscanf(page, "%u", &new_value) != 1)
+ goto out;
+
+ if (new_value) {
+ char comm[sizeof(current->comm)];
+
+ memcpy(comm, current->comm, sizeof(comm));
+ pr_err("SELinux: %s (%d) set checkreqprot to 1. This is deprecated and will be rejected in a future kernel release.\n",
+ comm, current->pid);
+ }
+
+ checkreqprot_set(fsi->state, (new_value ? 1 : 0));
+ if (new_value)
+ ssleep(5);
+ length = count;
+
+ selinux_ima_measure_state(fsi->state);
+
+out:
+ kfree(page);
+ return length;
+}
+static const struct file_operations sel_checkreqprot_ops = {
+ .read = sel_read_checkreqprot,
+ .write = sel_write_checkreqprot,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_write_validatetrans(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *oldcon = NULL, *newcon = NULL, *taskcon = NULL;
+ char *req = NULL;
+ u32 osid, nsid, tsid;
+ u16 tclass;
+ int rc;
+
+ rc = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__VALIDATE_TRANS, NULL);
+ if (rc)
+ goto out;
+
+ rc = -ENOMEM;
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ /* No partial writes. */
+ rc = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ req = memdup_user_nul(buf, count);
+ if (IS_ERR(req)) {
+ rc = PTR_ERR(req);
+ req = NULL;
+ goto out;
+ }
+
+ rc = -ENOMEM;
+ oldcon = kzalloc(count + 1, GFP_KERNEL);
+ if (!oldcon)
+ goto out;
+
+ newcon = kzalloc(count + 1, GFP_KERNEL);
+ if (!newcon)
+ goto out;
+
+ taskcon = kzalloc(count + 1, GFP_KERNEL);
+ if (!taskcon)
+ goto out;
+
+ rc = -EINVAL;
+ if (sscanf(req, "%s %s %hu %s", oldcon, newcon, &tclass, taskcon) != 4)
+ goto out;
+
+ rc = security_context_str_to_sid(state, oldcon, &osid, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+ rc = security_context_str_to_sid(state, newcon, &nsid, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+ rc = security_context_str_to_sid(state, taskcon, &tsid, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+ rc = security_validate_transition_user(state, osid, nsid, tsid, tclass);
+ if (!rc)
+ rc = count;
+out:
+ kfree(req);
+ kfree(oldcon);
+ kfree(newcon);
+ kfree(taskcon);
+ return rc;
+}
+
+static const struct file_operations sel_transition_ops = {
+ .write = sel_write_validatetrans,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * Remaining nodes use transaction based IO methods like nfsd/nfsctl.c
+ */
+static ssize_t sel_write_access(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_create(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_user(struct file *file, char *buf, size_t size);
+static ssize_t sel_write_member(struct file *file, char *buf, size_t size);
+
+static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
+ [SEL_ACCESS] = sel_write_access,
+ [SEL_CREATE] = sel_write_create,
+ [SEL_RELABEL] = sel_write_relabel,
+ [SEL_USER] = sel_write_user,
+ [SEL_MEMBER] = sel_write_member,
+ [SEL_CONTEXT] = sel_write_context,
+};
+
+static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+{
+ ino_t ino = file_inode(file)->i_ino;
+ char *data;
+ ssize_t rv;
+
+ if (ino >= ARRAY_SIZE(write_op) || !write_op[ino])
+ return -EINVAL;
+
+ data = simple_transaction_get(file, buf, size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ rv = write_op[ino](file, data, size);
+ if (rv > 0) {
+ simple_transaction_set(file, rv);
+ rv = size;
+ }
+ return rv;
+}
+
+static const struct file_operations transaction_ops = {
+ .write = selinux_transaction_write,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * payload - write methods
+ * If the method has a response, the response should be put in buf,
+ * and the length returned. Otherwise return 0 or and -error.
+ */
+
+static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *scon = NULL, *tcon = NULL;
+ u32 ssid, tsid;
+ u16 tclass;
+ struct av_decision avd;
+ ssize_t length;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__COMPUTE_AV, NULL);
+ if (length)
+ goto out;
+
+ length = -ENOMEM;
+ scon = kzalloc(size + 1, GFP_KERNEL);
+ if (!scon)
+ goto out;
+
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
+ if (!tcon)
+ goto out;
+
+ length = -EINVAL;
+ if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+ goto out;
+
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ security_compute_av_user(state, ssid, tsid, tclass, &avd);
+
+ length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
+ "%x %x %x %x %u %x",
+ avd.allowed, 0xffffffff,
+ avd.auditallow, avd.auditdeny,
+ avd.seqno, avd.flags);
+out:
+ kfree(tcon);
+ kfree(scon);
+ return length;
+}
+
+static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *scon = NULL, *tcon = NULL;
+ char *namebuf = NULL, *objname = NULL;
+ u32 ssid, tsid, newsid;
+ u16 tclass;
+ ssize_t length;
+ char *newcon = NULL;
+ u32 len;
+ int nargs;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE,
+ NULL);
+ if (length)
+ goto out;
+
+ length = -ENOMEM;
+ scon = kzalloc(size + 1, GFP_KERNEL);
+ if (!scon)
+ goto out;
+
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
+ if (!tcon)
+ goto out;
+
+ length = -ENOMEM;
+ namebuf = kzalloc(size + 1, GFP_KERNEL);
+ if (!namebuf)
+ goto out;
+
+ length = -EINVAL;
+ nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
+ if (nargs < 3 || nargs > 4)
+ goto out;
+ if (nargs == 4) {
+ /*
+ * If and when the name of new object to be queried contains
+ * either whitespace or multibyte characters, they shall be
+ * encoded based on the percentage-encoding rule.
+ * If not encoded, the sscanf logic picks up only left-half
+ * of the supplied name; splitted by a whitespace unexpectedly.
+ */
+ char *r, *w;
+ int c1, c2;
+
+ r = w = namebuf;
+ do {
+ c1 = *r++;
+ if (c1 == '+')
+ c1 = ' ';
+ else if (c1 == '%') {
+ c1 = hex_to_bin(*r++);
+ if (c1 < 0)
+ goto out;
+ c2 = hex_to_bin(*r++);
+ if (c2 < 0)
+ goto out;
+ c1 = (c1 << 4) | c2;
+ }
+ *w++ = c1;
+ } while (c1 != '\0');
+
+ objname = namebuf;
+ }
+
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_transition_sid_user(state, ssid, tsid, tclass,
+ objname, &newsid);
+ if (length)
+ goto out;
+
+ length = security_sid_to_context(state, newsid, &newcon, &len);
+ if (length)
+ goto out;
+
+ length = -ERANGE;
+ if (len > SIMPLE_TRANSACTION_LIMIT) {
+ pr_err("SELinux: %s: context size (%u) exceeds "
+ "payload max\n", __func__, len);
+ goto out;
+ }
+
+ memcpy(buf, newcon, len);
+ length = len;
+out:
+ kfree(newcon);
+ kfree(namebuf);
+ kfree(tcon);
+ kfree(scon);
+ return length;
+}
+
+static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *scon = NULL, *tcon = NULL;
+ u32 ssid, tsid, newsid;
+ u16 tclass;
+ ssize_t length;
+ char *newcon = NULL;
+ u32 len;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__COMPUTE_RELABEL,
+ NULL);
+ if (length)
+ goto out;
+
+ length = -ENOMEM;
+ scon = kzalloc(size + 1, GFP_KERNEL);
+ if (!scon)
+ goto out;
+
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
+ if (!tcon)
+ goto out;
+
+ length = -EINVAL;
+ if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+ goto out;
+
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_change_sid(state, ssid, tsid, tclass, &newsid);
+ if (length)
+ goto out;
+
+ length = security_sid_to_context(state, newsid, &newcon, &len);
+ if (length)
+ goto out;
+
+ length = -ERANGE;
+ if (len > SIMPLE_TRANSACTION_LIMIT)
+ goto out;
+
+ memcpy(buf, newcon, len);
+ length = len;
+out:
+ kfree(newcon);
+ kfree(tcon);
+ kfree(scon);
+ return length;
+}
+
+static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *con = NULL, *user = NULL, *ptr;
+ u32 sid, *sids = NULL;
+ ssize_t length;
+ char *newcon;
+ int i, rc;
+ u32 len, nsids;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__COMPUTE_USER,
+ NULL);
+ if (length)
+ goto out;
+
+ length = -ENOMEM;
+ con = kzalloc(size + 1, GFP_KERNEL);
+ if (!con)
+ goto out;
+
+ length = -ENOMEM;
+ user = kzalloc(size + 1, GFP_KERNEL);
+ if (!user)
+ goto out;
+
+ length = -EINVAL;
+ if (sscanf(buf, "%s %s", con, user) != 2)
+ goto out;
+
+ length = security_context_str_to_sid(state, con, &sid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_get_user_sids(state, sid, user, &sids, &nsids);
+ if (length)
+ goto out;
+
+ length = sprintf(buf, "%u", nsids) + 1;
+ ptr = buf + length;
+ for (i = 0; i < nsids; i++) {
+ rc = security_sid_to_context(state, sids[i], &newcon, &len);
+ if (rc) {
+ length = rc;
+ goto out;
+ }
+ if ((length + len) >= SIMPLE_TRANSACTION_LIMIT) {
+ kfree(newcon);
+ length = -ERANGE;
+ goto out;
+ }
+ memcpy(ptr, newcon, len);
+ kfree(newcon);
+ ptr += len;
+ length += len;
+ }
+out:
+ kfree(sids);
+ kfree(user);
+ kfree(con);
+ return length;
+}
+
+static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *scon = NULL, *tcon = NULL;
+ u32 ssid, tsid, newsid;
+ u16 tclass;
+ ssize_t length;
+ char *newcon = NULL;
+ u32 len;
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER,
+ NULL);
+ if (length)
+ goto out;
+
+ length = -ENOMEM;
+ scon = kzalloc(size + 1, GFP_KERNEL);
+ if (!scon)
+ goto out;
+
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
+ if (!tcon)
+ goto out;
+
+ length = -EINVAL;
+ if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+ goto out;
+
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_member_sid(state, ssid, tsid, tclass, &newsid);
+ if (length)
+ goto out;
+
+ length = security_sid_to_context(state, newsid, &newcon, &len);
+ if (length)
+ goto out;
+
+ length = -ERANGE;
+ if (len > SIMPLE_TRANSACTION_LIMIT) {
+ pr_err("SELinux: %s: context size (%u) exceeds "
+ "payload max\n", __func__, len);
+ goto out;
+ }
+
+ memcpy(buf, newcon, len);
+ length = len;
+out:
+ kfree(newcon);
+ kfree(tcon);
+ kfree(scon);
+ return length;
+}
+
+static struct inode *sel_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret = new_inode(sb);
+
+ if (ret) {
+ ret->i_mode = mode;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ }
+ return ret;
+}
+
+static ssize_t sel_read_bool(struct file *filep, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info;
+ char *page = NULL;
+ ssize_t length;
+ ssize_t ret;
+ int cur_enforcing;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
+ const char *name = filep->f_path.dentry->d_name.name;
+
+ mutex_lock(&fsi->state->policy_mutex);
+
+ ret = -EINVAL;
+ if (index >= fsi->bool_num || strcmp(name,
+ fsi->bool_pending_names[index]))
+ goto out_unlock;
+
+ ret = -ENOMEM;
+ page = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ goto out_unlock;
+
+ cur_enforcing = security_get_bool_value(fsi->state, index);
+ if (cur_enforcing < 0) {
+ ret = cur_enforcing;
+ goto out_unlock;
+ }
+ length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
+ fsi->bool_pending_values[index]);
+ mutex_unlock(&fsi->state->policy_mutex);
+ ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out_free:
+ free_page((unsigned long)page);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&fsi->state->policy_mutex);
+ goto out_free;
+}
+
+static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info;
+ char *page = NULL;
+ ssize_t length;
+ int new_value;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
+ const char *name = filep->f_path.dentry->d_name.name;
+
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ mutex_lock(&fsi->state->policy_mutex);
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__SETBOOL,
+ NULL);
+ if (length)
+ goto out;
+
+ length = -EINVAL;
+ if (index >= fsi->bool_num || strcmp(name,
+ fsi->bool_pending_names[index]))
+ goto out;
+
+ length = -EINVAL;
+ if (sscanf(page, "%d", &new_value) != 1)
+ goto out;
+
+ if (new_value)
+ new_value = 1;
+
+ fsi->bool_pending_values[index] = new_value;
+ length = count;
+
+out:
+ mutex_unlock(&fsi->state->policy_mutex);
+ kfree(page);
+ return length;
+}
+
+static const struct file_operations sel_bool_ops = {
+ .read = sel_read_bool,
+ .write = sel_write_bool,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_commit_bools_write(struct file *filep,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info;
+ char *page = NULL;
+ ssize_t length;
+ int new_value;
+
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ mutex_lock(&fsi->state->policy_mutex);
+
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__SETBOOL,
+ NULL);
+ if (length)
+ goto out;
+
+ length = -EINVAL;
+ if (sscanf(page, "%d", &new_value) != 1)
+ goto out;
+
+ length = 0;
+ if (new_value && fsi->bool_pending_values)
+ length = security_set_bools(fsi->state, fsi->bool_num,
+ fsi->bool_pending_values);
+
+ if (!length)
+ length = count;
+
+out:
+ mutex_unlock(&fsi->state->policy_mutex);
+ kfree(page);
+ return length;
+}
+
+static const struct file_operations sel_commit_bools_ops = {
+ .write = sel_commit_bools_write,
+ .llseek = generic_file_llseek,
+};
+
+static void sel_remove_entries(struct dentry *de)
+{
+ d_genocide(de);
+ shrink_dcache_parent(de);
+}
+
+static int sel_make_bools(struct selinux_policy *newpolicy, struct dentry *bool_dir,
+ unsigned int *bool_num, char ***bool_pending_names,
+ int **bool_pending_values)
+{
+ int ret;
+ ssize_t len;
+ struct dentry *dentry = NULL;
+ struct inode *inode = NULL;
+ struct inode_security_struct *isec;
+ char **names = NULL, *page;
+ u32 i, num;
+ int *values = NULL;
+ u32 sid;
+
+ ret = -ENOMEM;
+ page = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ goto out;
+
+ ret = security_get_bools(newpolicy, &num, &names, &values);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < num; i++) {
+ ret = -ENOMEM;
+ dentry = d_alloc_name(bool_dir, names[i]);
+ if (!dentry)
+ goto out;
+
+ ret = -ENOMEM;
+ inode = sel_make_inode(bool_dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
+ if (!inode) {
+ dput(dentry);
+ goto out;
+ }
+
+ ret = -ENAMETOOLONG;
+ len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
+ if (len >= PAGE_SIZE) {
+ dput(dentry);
+ iput(inode);
+ goto out;
+ }
+
+ isec = selinux_inode(inode);
+ ret = selinux_policy_genfs_sid(newpolicy, "selinuxfs", page,
+ SECCLASS_FILE, &sid);
+ if (ret) {
+ pr_warn_ratelimited("SELinux: no sid found, defaulting to security isid for %s\n",
+ page);
+ sid = SECINITSID_SECURITY;
+ }
+
+ isec->sid = sid;
+ isec->initialized = LABEL_INITIALIZED;
+ inode->i_fop = &sel_bool_ops;
+ inode->i_ino = i|SEL_BOOL_INO_OFFSET;
+ d_add(dentry, inode);
+ }
+ *bool_num = num;
+ *bool_pending_names = names;
+ *bool_pending_values = values;
+
+ free_page((unsigned long)page);
+ return 0;
+out:
+ free_page((unsigned long)page);
+
+ if (names) {
+ for (i = 0; i < num; i++)
+ kfree(names[i]);
+ kfree(names);
+ }
+ kfree(values);
+ sel_remove_entries(bool_dir);
+
+ return ret;
+}
+
+static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
+ avc_get_cache_threshold(state->avc));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static ssize_t sel_write_avc_cache_threshold(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *page;
+ ssize_t ret;
+ unsigned int new_value;
+
+ ret = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__SETSECPARAM,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ ret = -EINVAL;
+ if (sscanf(page, "%u", &new_value) != 1)
+ goto out;
+
+ avc_set_cache_threshold(state->avc, new_value);
+
+ ret = count;
+out:
+ kfree(page);
+ return ret;
+}
+
+static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *page;
+ ssize_t length;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ length = avc_get_hash_stats(state->avc, page);
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos, page, length);
+ free_page((unsigned long)page);
+
+ return length;
+}
+
+static ssize_t sel_read_sidtab_hash_stats(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *page;
+ ssize_t length;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ length = security_sidtab_hash_stats(state, page);
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos, page,
+ length);
+ free_page((unsigned long)page);
+
+ return length;
+}
+
+static const struct file_operations sel_sidtab_hash_stats_ops = {
+ .read = sel_read_sidtab_hash_stats,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations sel_avc_cache_threshold_ops = {
+ .read = sel_read_avc_cache_threshold,
+ .write = sel_write_avc_cache_threshold,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations sel_avc_hash_stats_ops = {
+ .read = sel_read_avc_hash_stats,
+ .llseek = generic_file_llseek,
+};
+
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
+{
+ int cpu;
+
+ for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *idx = cpu + 1;
+ return &per_cpu(avc_cache_stats, cpu);
+ }
+ (*idx)++;
+ return NULL;
+}
+
+static void *sel_avc_stats_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ loff_t n = *pos - 1;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ return sel_avc_get_stat_idx(&n);
+}
+
+static void *sel_avc_stats_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return sel_avc_get_stat_idx(pos);
+}
+
+static int sel_avc_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct avc_cache_stats *st = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq,
+ "lookups hits misses allocations reclaims frees\n");
+ } else {
+ unsigned int lookups = st->lookups;
+ unsigned int misses = st->misses;
+ unsigned int hits = lookups - misses;
+ seq_printf(seq, "%u %u %u %u %u %u\n", lookups,
+ hits, misses, st->allocations,
+ st->reclaims, st->frees);
+ }
+ return 0;
+}
+
+static void sel_avc_stats_seq_stop(struct seq_file *seq, void *v)
+{ }
+
+static const struct seq_operations sel_avc_cache_stats_seq_ops = {
+ .start = sel_avc_stats_seq_start,
+ .next = sel_avc_stats_seq_next,
+ .show = sel_avc_stats_seq_show,
+ .stop = sel_avc_stats_seq_stop,
+};
+
+static int sel_open_avc_cache_stats(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &sel_avc_cache_stats_seq_ops);
+}
+
+static const struct file_operations sel_avc_cache_stats_ops = {
+ .open = sel_open_avc_cache_stats,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+static int sel_make_avc_files(struct dentry *dir)
+{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ int i;
+ static const struct tree_descr files[] = {
+ { "cache_threshold",
+ &sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR },
+ { "hash_stats", &sel_avc_hash_stats_ops, S_IRUGO },
+#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
+ { "cache_stats", &sel_avc_cache_stats_ops, S_IRUGO },
+#endif
+ };
+
+ for (i = 0; i < ARRAY_SIZE(files); i++) {
+ struct inode *inode;
+ struct dentry *dentry;
+
+ dentry = d_alloc_name(dir, files[i].name);
+ if (!dentry)
+ return -ENOMEM;
+
+ inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+ if (!inode) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = files[i].ops;
+ inode->i_ino = ++fsi->last_ino;
+ d_add(dentry, inode);
+ }
+
+ return 0;
+}
+
+static int sel_make_ss_files(struct dentry *dir)
+{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ int i;
+ static struct tree_descr files[] = {
+ { "sidtab_hash_stats", &sel_sidtab_hash_stats_ops, S_IRUGO },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(files); i++) {
+ struct inode *inode;
+ struct dentry *dentry;
+
+ dentry = d_alloc_name(dir, files[i].name);
+ if (!dentry)
+ return -ENOMEM;
+
+ inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+ if (!inode) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = files[i].ops;
+ inode->i_ino = ++fsi->last_ino;
+ d_add(dentry, inode);
+ }
+
+ return 0;
+}
+
+static ssize_t sel_read_initcon(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ char *con;
+ u32 sid, len;
+ ssize_t ret;
+
+ sid = file_inode(file)->i_ino&SEL_INO_MASK;
+ ret = security_sid_to_context(fsi->state, sid, &con, &len);
+ if (ret)
+ return ret;
+
+ ret = simple_read_from_buffer(buf, count, ppos, con, len);
+ kfree(con);
+ return ret;
+}
+
+static const struct file_operations sel_initcon_ops = {
+ .read = sel_read_initcon,
+ .llseek = generic_file_llseek,
+};
+
+static int sel_make_initcon_files(struct dentry *dir)
+{
+ int i;
+
+ for (i = 1; i <= SECINITSID_NUM; i++) {
+ struct inode *inode;
+ struct dentry *dentry;
+ const char *s = security_get_initial_sid_context(i);
+
+ if (!s)
+ continue;
+ dentry = d_alloc_name(dir, s);
+ if (!dentry)
+ return -ENOMEM;
+
+ inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+ if (!inode) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = &sel_initcon_ops;
+ inode->i_ino = i|SEL_INITCON_INO_OFFSET;
+ d_add(dentry, inode);
+ }
+
+ return 0;
+}
+
+static inline unsigned long sel_class_to_ino(u16 class)
+{
+ return (class * (SEL_VEC_MAX + 1)) | SEL_CLASS_INO_OFFSET;
+}
+
+static inline u16 sel_ino_to_class(unsigned long ino)
+{
+ return (ino & SEL_INO_MASK) / (SEL_VEC_MAX + 1);
+}
+
+static inline unsigned long sel_perm_to_ino(u16 class, u32 perm)
+{
+ return (class * (SEL_VEC_MAX + 1) + perm) | SEL_CLASS_INO_OFFSET;
+}
+
+static inline u32 sel_ino_to_perm(unsigned long ino)
+{
+ return (ino & SEL_INO_MASK) % (SEL_VEC_MAX + 1);
+}
+
+static ssize_t sel_read_class(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ino = file_inode(file)->i_ino;
+ char res[TMPBUFLEN];
+ ssize_t len = scnprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+ return simple_read_from_buffer(buf, count, ppos, res, len);
+}
+
+static const struct file_operations sel_class_ops = {
+ .read = sel_read_class,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_read_perm(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ino = file_inode(file)->i_ino;
+ char res[TMPBUFLEN];
+ ssize_t len = scnprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+ return simple_read_from_buffer(buf, count, ppos, res, len);
+}
+
+static const struct file_operations sel_perm_ops = {
+ .read = sel_read_perm,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t sel_read_policycap(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ int value;
+ char tmpbuf[TMPBUFLEN];
+ ssize_t length;
+ unsigned long i_ino = file_inode(file)->i_ino;
+
+ value = security_policycap_supported(fsi->state, i_ino & SEL_INO_MASK);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
+
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_policycap_ops = {
+ .read = sel_read_policycap,
+ .llseek = generic_file_llseek,
+};
+
+static int sel_make_perm_files(struct selinux_policy *newpolicy,
+ char *objclass, int classvalue,
+ struct dentry *dir)
+{
+ int i, rc, nperms;
+ char **perms;
+
+ rc = security_get_permissions(newpolicy, objclass, &perms, &nperms);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < nperms; i++) {
+ struct inode *inode;
+ struct dentry *dentry;
+
+ rc = -ENOMEM;
+ dentry = d_alloc_name(dir, perms[i]);
+ if (!dentry)
+ goto out;
+
+ rc = -ENOMEM;
+ inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+ if (!inode) {
+ dput(dentry);
+ goto out;
+ }
+
+ inode->i_fop = &sel_perm_ops;
+ /* i+1 since perm values are 1-indexed */
+ inode->i_ino = sel_perm_to_ino(classvalue, i + 1);
+ d_add(dentry, inode);
+ }
+ rc = 0;
+out:
+ for (i = 0; i < nperms; i++)
+ kfree(perms[i]);
+ kfree(perms);
+ return rc;
+}
+
+static int sel_make_class_dir_entries(struct selinux_policy *newpolicy,
+ char *classname, int index,
+ struct dentry *dir)
+{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ struct dentry *dentry = NULL;
+ struct inode *inode = NULL;
+
+ dentry = d_alloc_name(dir, "index");
+ if (!dentry)
+ return -ENOMEM;
+
+ inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+ if (!inode) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = &sel_class_ops;
+ inode->i_ino = sel_class_to_ino(index);
+ d_add(dentry, inode);
+
+ dentry = sel_make_dir(dir, "perms", &fsi->last_class_ino);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return sel_make_perm_files(newpolicy, classname, index, dentry);
+}
+
+static int sel_make_classes(struct selinux_policy *newpolicy,
+ struct dentry *class_dir,
+ unsigned long *last_class_ino)
+{
+
+ int rc, nclasses, i;
+ char **classes;
+
+ rc = security_get_classes(newpolicy, &classes, &nclasses);
+ if (rc)
+ return rc;
+
+ /* +2 since classes are 1-indexed */
+ *last_class_ino = sel_class_to_ino(nclasses + 2);
+
+ for (i = 0; i < nclasses; i++) {
+ struct dentry *class_name_dir;
+
+ class_name_dir = sel_make_dir(class_dir, classes[i],
+ last_class_ino);
+ if (IS_ERR(class_name_dir)) {
+ rc = PTR_ERR(class_name_dir);
+ goto out;
+ }
+
+ /* i+1 since class values are 1-indexed */
+ rc = sel_make_class_dir_entries(newpolicy, classes[i], i + 1,
+ class_name_dir);
+ if (rc)
+ goto out;
+ }
+ rc = 0;
+out:
+ for (i = 0; i < nclasses; i++)
+ kfree(classes[i]);
+ kfree(classes);
+ return rc;
+}
+
+static int sel_make_policycap(struct selinux_fs_info *fsi)
+{
+ unsigned int iter;
+ struct dentry *dentry = NULL;
+ struct inode *inode = NULL;
+
+ for (iter = 0; iter <= POLICYDB_CAP_MAX; iter++) {
+ if (iter < ARRAY_SIZE(selinux_policycap_names))
+ dentry = d_alloc_name(fsi->policycap_dir,
+ selinux_policycap_names[iter]);
+ else
+ dentry = d_alloc_name(fsi->policycap_dir, "unknown");
+
+ if (dentry == NULL)
+ return -ENOMEM;
+
+ inode = sel_make_inode(fsi->sb, S_IFREG | 0444);
+ if (inode == NULL) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = &sel_policycap_ops;
+ inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
+ d_add(dentry, inode);
+ }
+
+ return 0;
+}
+
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
+ unsigned long *ino)
+{
+ struct dentry *dentry = d_alloc_name(dir, name);
+ struct inode *inode;
+
+ if (!dentry)
+ return ERR_PTR(-ENOMEM);
+
+ inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_ino = ++(*ino);
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_add(dentry, inode);
+ /* bump link count on parent directory, too */
+ inc_nlink(d_inode(dir));
+
+ return dentry;
+}
+
+static struct dentry *sel_make_disconnected_dir(struct super_block *sb,
+ unsigned long *ino)
+{
+ struct inode *inode = sel_make_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_ino = ++(*ino);
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ return d_obtain_alias(inode);
+}
+
+#define NULL_FILE_NAME "null"
+
+static int sel_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct selinux_fs_info *fsi;
+ int ret;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct inode_security_struct *isec;
+
+ static const struct tree_descr selinux_files[] = {
+ [SEL_LOAD] = {"load", &sel_load_ops, S_IRUSR|S_IWUSR},
+ [SEL_ENFORCE] = {"enforce", &sel_enforce_ops, S_IRUGO|S_IWUSR},
+ [SEL_CONTEXT] = {"context", &transaction_ops, S_IRUGO|S_IWUGO},
+ [SEL_ACCESS] = {"access", &transaction_ops, S_IRUGO|S_IWUGO},
+ [SEL_CREATE] = {"create", &transaction_ops, S_IRUGO|S_IWUGO},
+ [SEL_RELABEL] = {"relabel", &transaction_ops, S_IRUGO|S_IWUGO},
+ [SEL_USER] = {"user", &transaction_ops, S_IRUGO|S_IWUGO},
+ [SEL_POLICYVERS] = {"policyvers", &sel_policyvers_ops, S_IRUGO},
+ [SEL_COMMIT_BOOLS] = {"commit_pending_bools", &sel_commit_bools_ops, S_IWUSR},
+ [SEL_MLS] = {"mls", &sel_mls_ops, S_IRUGO},
+ [SEL_DISABLE] = {"disable", &sel_disable_ops, S_IWUSR},
+ [SEL_MEMBER] = {"member", &transaction_ops, S_IRUGO|S_IWUGO},
+ [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
+ [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
+ [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
+ [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
+ [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO},
+ [SEL_VALIDATE_TRANS] = {"validatetrans", &sel_transition_ops,
+ S_IWUGO},
+ /* last one */ {""}
+ };
+
+ ret = selinux_fs_info_create(sb);
+ if (ret)
+ goto err;
+
+ ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
+ if (ret)
+ goto err;
+
+ fsi = sb->s_fs_info;
+ fsi->bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &fsi->last_ino);
+ if (IS_ERR(fsi->bool_dir)) {
+ ret = PTR_ERR(fsi->bool_dir);
+ fsi->bool_dir = NULL;
+ goto err;
+ }
+
+ ret = -ENOMEM;
+ dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
+ if (!dentry)
+ goto err;
+
+ ret = -ENOMEM;
+ inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
+ if (!inode) {
+ dput(dentry);
+ goto err;
+ }
+
+ inode->i_ino = ++fsi->last_ino;
+ isec = selinux_inode(inode);
+ isec->sid = SECINITSID_DEVNULL;
+ isec->sclass = SECCLASS_CHR_FILE;
+ isec->initialized = LABEL_INITIALIZED;
+
+ init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
+ d_add(dentry, inode);
+
+ dentry = sel_make_dir(sb->s_root, "avc", &fsi->last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ ret = sel_make_avc_files(dentry);
+ if (ret)
+ goto err;
+
+ dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ ret = sel_make_ss_files(dentry);
+ if (ret)
+ goto err;
+
+ dentry = sel_make_dir(sb->s_root, "initial_contexts", &fsi->last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ ret = sel_make_initcon_files(dentry);
+ if (ret)
+ goto err;
+
+ fsi->class_dir = sel_make_dir(sb->s_root, CLASS_DIR_NAME, &fsi->last_ino);
+ if (IS_ERR(fsi->class_dir)) {
+ ret = PTR_ERR(fsi->class_dir);
+ fsi->class_dir = NULL;
+ goto err;
+ }
+
+ fsi->policycap_dir = sel_make_dir(sb->s_root, POLICYCAP_DIR_NAME,
+ &fsi->last_ino);
+ if (IS_ERR(fsi->policycap_dir)) {
+ ret = PTR_ERR(fsi->policycap_dir);
+ fsi->policycap_dir = NULL;
+ goto err;
+ }
+
+ ret = sel_make_policycap(fsi);
+ if (ret) {
+ pr_err("SELinux: failed to load policy capabilities\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ pr_err("SELinux: %s: failed while creating inodes\n",
+ __func__);
+
+ selinux_fs_info_free(sb);
+
+ return ret;
+}
+
+static int sel_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, sel_fill_super);
+}
+
+static const struct fs_context_operations sel_context_ops = {
+ .get_tree = sel_get_tree,
+};
+
+static int sel_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &sel_context_ops;
+ return 0;
+}
+
+static void sel_kill_sb(struct super_block *sb)
+{
+ selinux_fs_info_free(sb);
+ kill_litter_super(sb);
+}
+
+static struct file_system_type sel_fs_type = {
+ .name = "selinuxfs",
+ .init_fs_context = sel_init_fs_context,
+ .kill_sb = sel_kill_sb,
+};
+
+static struct vfsmount *selinuxfs_mount __ro_after_init;
+struct path selinux_null __ro_after_init;
+
+static int __init init_sel_fs(void)
+{
+ struct qstr null_name = QSTR_INIT(NULL_FILE_NAME,
+ sizeof(NULL_FILE_NAME)-1);
+ int err;
+
+ if (!selinux_enabled_boot)
+ return 0;
+
+ err = sysfs_create_mount_point(fs_kobj, "selinux");
+ if (err)
+ return err;
+
+ err = register_filesystem(&sel_fs_type);
+ if (err) {
+ sysfs_remove_mount_point(fs_kobj, "selinux");
+ return err;
+ }
+
+ selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
+ if (IS_ERR(selinuxfs_mount)) {
+ pr_err("selinuxfs: could not mount!\n");
+ err = PTR_ERR(selinuxfs_mount);
+ selinuxfs_mount = NULL;
+ }
+ selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root,
+ &null_name);
+ if (IS_ERR(selinux_null.dentry)) {
+ pr_err("selinuxfs: could not lookup null!\n");
+ err = PTR_ERR(selinux_null.dentry);
+ selinux_null.dentry = NULL;
+ }
+
+ return err;
+}
+
+__initcall(init_sel_fs);
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+void exit_sel_fs(void)
+{
+ sysfs_remove_mount_point(fs_kobj, "selinux");
+ dput(selinux_null.dentry);
+ kern_unmount(selinuxfs_mount);
+ unregister_filesystem(&sel_fs_type);
+}
+#endif
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
new file mode 100644
index 000000000..8480ec6c6
--- /dev/null
+++ b/security/selinux/ss/avtab.c
@@ -0,0 +1,679 @@
+/*
+ * Implementation of the access vector table type.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+
+/* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Added conditional policy language extensions
+ *
+ * Copyright (C) 2003 Tresys Technology, LLC
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * Tuned number of hash slots for avtab to reduce memory usage
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include "avtab.h"
+#include "policydb.h"
+
+static struct kmem_cache *avtab_node_cachep __ro_after_init;
+static struct kmem_cache *avtab_xperms_cachep __ro_after_init;
+
+/* Based on MurmurHash3, written by Austin Appleby and placed in the
+ * public domain.
+ */
+static inline int avtab_hash(const struct avtab_key *keyp, u32 mask)
+{
+ static const u32 c1 = 0xcc9e2d51;
+ static const u32 c2 = 0x1b873593;
+ static const u32 r1 = 15;
+ static const u32 r2 = 13;
+ static const u32 m = 5;
+ static const u32 n = 0xe6546b64;
+
+ u32 hash = 0;
+
+#define mix(input) do { \
+ u32 v = input; \
+ v *= c1; \
+ v = (v << r1) | (v >> (32 - r1)); \
+ v *= c2; \
+ hash ^= v; \
+ hash = (hash << r2) | (hash >> (32 - r2)); \
+ hash = hash * m + n; \
+ } while (0)
+
+ mix(keyp->target_class);
+ mix(keyp->target_type);
+ mix(keyp->source_type);
+
+#undef mix
+
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 13;
+ hash *= 0xc2b2ae35;
+ hash ^= hash >> 16;
+
+ return hash & mask;
+}
+
+static struct avtab_node*
+avtab_insert_node(struct avtab *h, int hvalue,
+ struct avtab_node *prev,
+ const struct avtab_key *key, const struct avtab_datum *datum)
+{
+ struct avtab_node *newnode;
+ struct avtab_extended_perms *xperms;
+ newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
+ if (newnode == NULL)
+ return NULL;
+ newnode->key = *key;
+
+ if (key->specified & AVTAB_XPERMS) {
+ xperms = kmem_cache_zalloc(avtab_xperms_cachep, GFP_KERNEL);
+ if (xperms == NULL) {
+ kmem_cache_free(avtab_node_cachep, newnode);
+ return NULL;
+ }
+ *xperms = *(datum->u.xperms);
+ newnode->datum.u.xperms = xperms;
+ } else {
+ newnode->datum.u.data = datum->u.data;
+ }
+
+ if (prev) {
+ newnode->next = prev->next;
+ prev->next = newnode;
+ } else {
+ struct avtab_node **n = &h->htable[hvalue];
+
+ newnode->next = *n;
+ *n = newnode;
+ }
+
+ h->nel++;
+ return newnode;
+}
+
+static int avtab_insert(struct avtab *h, const struct avtab_key *key,
+ const struct avtab_datum *datum)
+{
+ int hvalue;
+ struct avtab_node *prev, *cur, *newnode;
+ u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+ if (!h || !h->nslot)
+ return -EINVAL;
+
+ hvalue = avtab_hash(key, h->mask);
+ for (prev = NULL, cur = h->htable[hvalue];
+ cur;
+ prev = cur, cur = cur->next) {
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class == cur->key.target_class &&
+ (specified & cur->key.specified)) {
+ /* extended perms may not be unique */
+ if (specified & AVTAB_XPERMS)
+ break;
+ return -EEXIST;
+ }
+ if (key->source_type < cur->key.source_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type < cur->key.target_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class < cur->key.target_class)
+ break;
+ }
+
+ newnode = avtab_insert_node(h, hvalue, prev, key, datum);
+ if (!newnode)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Unlike avtab_insert(), this function allow multiple insertions of the same
+ * key/specified mask into the table, as needed by the conditional avtab.
+ * It also returns a pointer to the node inserted.
+ */
+struct avtab_node *avtab_insert_nonunique(struct avtab *h,
+ const struct avtab_key *key,
+ const struct avtab_datum *datum)
+{
+ int hvalue;
+ struct avtab_node *prev, *cur;
+ u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+ if (!h || !h->nslot)
+ return NULL;
+ hvalue = avtab_hash(key, h->mask);
+ for (prev = NULL, cur = h->htable[hvalue];
+ cur;
+ prev = cur, cur = cur->next) {
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class == cur->key.target_class &&
+ (specified & cur->key.specified))
+ break;
+ if (key->source_type < cur->key.source_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type < cur->key.target_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class < cur->key.target_class)
+ break;
+ }
+ return avtab_insert_node(h, hvalue, prev, key, datum);
+}
+
+struct avtab_datum *avtab_search(struct avtab *h, const struct avtab_key *key)
+{
+ int hvalue;
+ struct avtab_node *cur;
+ u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+ if (!h || !h->nslot)
+ return NULL;
+
+ hvalue = avtab_hash(key, h->mask);
+ for (cur = h->htable[hvalue]; cur;
+ cur = cur->next) {
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class == cur->key.target_class &&
+ (specified & cur->key.specified))
+ return &cur->datum;
+
+ if (key->source_type < cur->key.source_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type < cur->key.target_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class < cur->key.target_class)
+ break;
+ }
+
+ return NULL;
+}
+
+/* This search function returns a node pointer, and can be used in
+ * conjunction with avtab_search_next_node()
+ */
+struct avtab_node *avtab_search_node(struct avtab *h,
+ const struct avtab_key *key)
+{
+ int hvalue;
+ struct avtab_node *cur;
+ u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+
+ if (!h || !h->nslot)
+ return NULL;
+
+ hvalue = avtab_hash(key, h->mask);
+ for (cur = h->htable[hvalue]; cur;
+ cur = cur->next) {
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class == cur->key.target_class &&
+ (specified & cur->key.specified))
+ return cur;
+
+ if (key->source_type < cur->key.source_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type < cur->key.target_type)
+ break;
+ if (key->source_type == cur->key.source_type &&
+ key->target_type == cur->key.target_type &&
+ key->target_class < cur->key.target_class)
+ break;
+ }
+ return NULL;
+}
+
+struct avtab_node*
+avtab_search_node_next(struct avtab_node *node, int specified)
+{
+ struct avtab_node *cur;
+
+ if (!node)
+ return NULL;
+
+ specified &= ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ for (cur = node->next; cur; cur = cur->next) {
+ if (node->key.source_type == cur->key.source_type &&
+ node->key.target_type == cur->key.target_type &&
+ node->key.target_class == cur->key.target_class &&
+ (specified & cur->key.specified))
+ return cur;
+
+ if (node->key.source_type < cur->key.source_type)
+ break;
+ if (node->key.source_type == cur->key.source_type &&
+ node->key.target_type < cur->key.target_type)
+ break;
+ if (node->key.source_type == cur->key.source_type &&
+ node->key.target_type == cur->key.target_type &&
+ node->key.target_class < cur->key.target_class)
+ break;
+ }
+ return NULL;
+}
+
+void avtab_destroy(struct avtab *h)
+{
+ int i;
+ struct avtab_node *cur, *temp;
+
+ if (!h)
+ return;
+
+ for (i = 0; i < h->nslot; i++) {
+ cur = h->htable[i];
+ while (cur) {
+ temp = cur;
+ cur = cur->next;
+ if (temp->key.specified & AVTAB_XPERMS)
+ kmem_cache_free(avtab_xperms_cachep,
+ temp->datum.u.xperms);
+ kmem_cache_free(avtab_node_cachep, temp);
+ }
+ }
+ kvfree(h->htable);
+ h->htable = NULL;
+ h->nel = 0;
+ h->nslot = 0;
+ h->mask = 0;
+}
+
+void avtab_init(struct avtab *h)
+{
+ h->htable = NULL;
+ h->nel = 0;
+ h->nslot = 0;
+ h->mask = 0;
+}
+
+static int avtab_alloc_common(struct avtab *h, u32 nslot)
+{
+ if (!nslot)
+ return 0;
+
+ h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
+ if (!h->htable)
+ return -ENOMEM;
+
+ h->nslot = nslot;
+ h->mask = nslot - 1;
+ return 0;
+}
+
+int avtab_alloc(struct avtab *h, u32 nrules)
+{
+ int rc;
+ u32 nslot = 0;
+
+ if (nrules != 0) {
+ u32 shift = 1;
+ u32 work = nrules >> 3;
+ while (work) {
+ work >>= 1;
+ shift++;
+ }
+ nslot = 1 << shift;
+ if (nslot > MAX_AVTAB_HASH_BUCKETS)
+ nslot = MAX_AVTAB_HASH_BUCKETS;
+
+ rc = avtab_alloc_common(h, nslot);
+ if (rc)
+ return rc;
+ }
+
+ pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
+ return 0;
+}
+
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
+{
+ return avtab_alloc_common(new, orig->nslot);
+}
+
+void avtab_hash_eval(struct avtab *h, char *tag)
+{
+ int i, chain_len, slots_used, max_chain_len;
+ unsigned long long chain2_len_sum;
+ struct avtab_node *cur;
+
+ slots_used = 0;
+ max_chain_len = 0;
+ chain2_len_sum = 0;
+ for (i = 0; i < h->nslot; i++) {
+ cur = h->htable[i];
+ if (cur) {
+ slots_used++;
+ chain_len = 0;
+ while (cur) {
+ chain_len++;
+ cur = cur->next;
+ }
+
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ chain2_len_sum += chain_len * chain_len;
+ }
+ }
+
+ pr_debug("SELinux: %s: %d entries and %d/%d buckets used, "
+ "longest chain length %d sum of chain length^2 %llu\n",
+ tag, h->nel, slots_used, h->nslot, max_chain_len,
+ chain2_len_sum);
+}
+
+static const uint16_t spec_order[] = {
+ AVTAB_ALLOWED,
+ AVTAB_AUDITDENY,
+ AVTAB_AUDITALLOW,
+ AVTAB_TRANSITION,
+ AVTAB_CHANGE,
+ AVTAB_MEMBER,
+ AVTAB_XPERMS_ALLOWED,
+ AVTAB_XPERMS_AUDITALLOW,
+ AVTAB_XPERMS_DONTAUDIT
+};
+
+int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
+ int (*insertf)(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *p),
+ void *p)
+{
+ __le16 buf16[4];
+ u16 enabled;
+ u32 items, items2, val, vers = pol->policyvers;
+ struct avtab_key key;
+ struct avtab_datum datum;
+ struct avtab_extended_perms xperms;
+ __le32 buf32[ARRAY_SIZE(xperms.perms.p)];
+ int i, rc;
+ unsigned set;
+
+ memset(&key, 0, sizeof(struct avtab_key));
+ memset(&datum, 0, sizeof(struct avtab_datum));
+
+ if (vers < POLICYDB_VERSION_AVTAB) {
+ rc = next_entry(buf32, fp, sizeof(u32));
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ items2 = le32_to_cpu(buf32[0]);
+ if (items2 > ARRAY_SIZE(buf32)) {
+ pr_err("SELinux: avtab: entry overflow\n");
+ return -EINVAL;
+
+ }
+ rc = next_entry(buf32, fp, sizeof(u32)*items2);
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ items = 0;
+
+ val = le32_to_cpu(buf32[items++]);
+ key.source_type = (u16)val;
+ if (key.source_type != val) {
+ pr_err("SELinux: avtab: truncated source type\n");
+ return -EINVAL;
+ }
+ val = le32_to_cpu(buf32[items++]);
+ key.target_type = (u16)val;
+ if (key.target_type != val) {
+ pr_err("SELinux: avtab: truncated target type\n");
+ return -EINVAL;
+ }
+ val = le32_to_cpu(buf32[items++]);
+ key.target_class = (u16)val;
+ if (key.target_class != val) {
+ pr_err("SELinux: avtab: truncated target class\n");
+ return -EINVAL;
+ }
+
+ val = le32_to_cpu(buf32[items++]);
+ enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0;
+
+ if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
+ pr_err("SELinux: avtab: null entry\n");
+ return -EINVAL;
+ }
+ if ((val & AVTAB_AV) &&
+ (val & AVTAB_TYPE)) {
+ pr_err("SELinux: avtab: entry has both access vectors and types\n");
+ return -EINVAL;
+ }
+ if (val & AVTAB_XPERMS) {
+ pr_err("SELinux: avtab: entry has extended permissions\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
+ if (val & spec_order[i]) {
+ key.specified = spec_order[i] | enabled;
+ datum.u.data = le32_to_cpu(buf32[items++]);
+ rc = insertf(a, &key, &datum, p);
+ if (rc)
+ return rc;
+ }
+ }
+
+ if (items != items2) {
+ pr_err("SELinux: avtab: entry only had %d items, expected %d\n",
+ items2, items);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ rc = next_entry(buf16, fp, sizeof(u16)*4);
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+
+ items = 0;
+ key.source_type = le16_to_cpu(buf16[items++]);
+ key.target_type = le16_to_cpu(buf16[items++]);
+ key.target_class = le16_to_cpu(buf16[items++]);
+ key.specified = le16_to_cpu(buf16[items++]);
+
+ if (!policydb_type_isvalid(pol, key.source_type) ||
+ !policydb_type_isvalid(pol, key.target_type) ||
+ !policydb_class_isvalid(pol, key.target_class)) {
+ pr_err("SELinux: avtab: invalid type or class\n");
+ return -EINVAL;
+ }
+
+ set = 0;
+ for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
+ if (key.specified & spec_order[i])
+ set++;
+ }
+ if (!set || set > 1) {
+ pr_err("SELinux: avtab: more than one specifier\n");
+ return -EINVAL;
+ }
+
+ if ((vers < POLICYDB_VERSION_XPERMS_IOCTL) &&
+ (key.specified & AVTAB_XPERMS)) {
+ pr_err("SELinux: avtab: policy version %u does not "
+ "support extended permissions rules and one "
+ "was specified\n", vers);
+ return -EINVAL;
+ } else if (key.specified & AVTAB_XPERMS) {
+ memset(&xperms, 0, sizeof(struct avtab_extended_perms));
+ rc = next_entry(&xperms.specified, fp, sizeof(u8));
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ rc = next_entry(&xperms.driver, fp, sizeof(u8));
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++)
+ xperms.perms.p[i] = le32_to_cpu(buf32[i]);
+ datum.u.xperms = &xperms;
+ } else {
+ rc = next_entry(buf32, fp, sizeof(u32));
+ if (rc) {
+ pr_err("SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ datum.u.data = le32_to_cpu(*buf32);
+ }
+ if ((key.specified & AVTAB_TYPE) &&
+ !policydb_type_isvalid(pol, datum.u.data)) {
+ pr_err("SELinux: avtab: invalid type\n");
+ return -EINVAL;
+ }
+ return insertf(a, &key, &datum, p);
+}
+
+static int avtab_insertf(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *p)
+{
+ return avtab_insert(a, k, d);
+}
+
+int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
+{
+ int rc;
+ __le32 buf[1];
+ u32 nel, i;
+
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc < 0) {
+ pr_err("SELinux: avtab: truncated table\n");
+ goto bad;
+ }
+ nel = le32_to_cpu(buf[0]);
+ if (!nel) {
+ pr_err("SELinux: avtab: table is empty\n");
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ rc = avtab_alloc(a, nel);
+ if (rc)
+ goto bad;
+
+ for (i = 0; i < nel; i++) {
+ rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
+ if (rc) {
+ if (rc == -ENOMEM)
+ pr_err("SELinux: avtab: out of memory\n");
+ else if (rc == -EEXIST)
+ pr_err("SELinux: avtab: duplicate entry\n");
+
+ goto bad;
+ }
+ }
+
+ rc = 0;
+out:
+ return rc;
+
+bad:
+ avtab_destroy(a);
+ goto out;
+}
+
+int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp)
+{
+ __le16 buf16[4];
+ __le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
+ int rc;
+ unsigned int i;
+
+ buf16[0] = cpu_to_le16(cur->key.source_type);
+ buf16[1] = cpu_to_le16(cur->key.target_type);
+ buf16[2] = cpu_to_le16(cur->key.target_class);
+ buf16[3] = cpu_to_le16(cur->key.specified);
+ rc = put_entry(buf16, sizeof(u16), 4, fp);
+ if (rc)
+ return rc;
+
+ if (cur->key.specified & AVTAB_XPERMS) {
+ rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
+ if (rc)
+ return rc;
+ for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++)
+ buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]);
+ rc = put_entry(buf32, sizeof(u32),
+ ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp);
+ } else {
+ buf32[0] = cpu_to_le32(cur->datum.u.data);
+ rc = put_entry(buf32, sizeof(u32), 1, fp);
+ }
+ if (rc)
+ return rc;
+ return 0;
+}
+
+int avtab_write(struct policydb *p, struct avtab *a, void *fp)
+{
+ unsigned int i;
+ int rc = 0;
+ struct avtab_node *cur;
+ __le32 buf[1];
+
+ buf[0] = cpu_to_le32(a->nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < a->nslot; i++) {
+ for (cur = a->htable[i]; cur;
+ cur = cur->next) {
+ rc = avtab_write_item(p, cur, fp);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+void __init avtab_cache_init(void)
+{
+ avtab_node_cachep = kmem_cache_create("avtab_node",
+ sizeof(struct avtab_node),
+ 0, SLAB_PANIC, NULL);
+ avtab_xperms_cachep = kmem_cache_create("avtab_extended_perms",
+ sizeof(struct avtab_extended_perms),
+ 0, SLAB_PANIC, NULL);
+}
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
new file mode 100644
index 000000000..d3ebea8d1
--- /dev/null
+++ b/security/selinux/ss/avtab.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * An access vector table (avtab) is a hash table
+ * of access vectors and transition types indexed
+ * by a type pair and a class. An access vector
+ * table is used to represent the type enforcement
+ * tables.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+
+/* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Added conditional policy language extensions
+ *
+ * Copyright (C) 2003 Tresys Technology, LLC
+ *
+ * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * Tuned number of hash slots for avtab to reduce memory usage
+ */
+#ifndef _SS_AVTAB_H_
+#define _SS_AVTAB_H_
+
+#include "security.h"
+
+struct avtab_key {
+ u16 source_type; /* source type */
+ u16 target_type; /* target type */
+ u16 target_class; /* target object class */
+#define AVTAB_ALLOWED 0x0001
+#define AVTAB_AUDITALLOW 0x0002
+#define AVTAB_AUDITDENY 0x0004
+#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
+#define AVTAB_TRANSITION 0x0010
+#define AVTAB_MEMBER 0x0020
+#define AVTAB_CHANGE 0x0040
+#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+/* extended permissions */
+#define AVTAB_XPERMS_ALLOWED 0x0100
+#define AVTAB_XPERMS_AUDITALLOW 0x0200
+#define AVTAB_XPERMS_DONTAUDIT 0x0400
+#define AVTAB_XPERMS (AVTAB_XPERMS_ALLOWED | \
+ AVTAB_XPERMS_AUDITALLOW | \
+ AVTAB_XPERMS_DONTAUDIT)
+#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
+#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
+ u16 specified; /* what field is specified */
+};
+
+/*
+ * For operations that require more than the 32 permissions provided by the avc
+ * extended permissions may be used to provide 256 bits of permissions.
+ */
+struct avtab_extended_perms {
+/* These are not flags. All 256 values may be used */
+#define AVTAB_XPERMS_IOCTLFUNCTION 0x01
+#define AVTAB_XPERMS_IOCTLDRIVER 0x02
+ /* extension of the avtab_key specified */
+ u8 specified; /* ioctl, netfilter, ... */
+ /*
+ * if 256 bits is not adequate as is often the case with ioctls, then
+ * multiple extended perms may be used and the driver field
+ * specifies which permissions are included.
+ */
+ u8 driver;
+ /* 256 bits of permissions */
+ struct extended_perms_data perms;
+};
+
+struct avtab_datum {
+ union {
+ u32 data; /* access vector or type value */
+ struct avtab_extended_perms *xperms;
+ } u;
+};
+
+struct avtab_node {
+ struct avtab_key key;
+ struct avtab_datum datum;
+ struct avtab_node *next;
+};
+
+struct avtab {
+ struct avtab_node **htable;
+ u32 nel; /* number of elements */
+ u32 nslot; /* number of hash slots */
+ u32 mask; /* mask to compute hash func */
+};
+
+void avtab_init(struct avtab *h);
+int avtab_alloc(struct avtab *, u32);
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
+struct avtab_datum *avtab_search(struct avtab *h, const struct avtab_key *k);
+void avtab_destroy(struct avtab *h);
+void avtab_hash_eval(struct avtab *h, char *tag);
+
+struct policydb;
+int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
+ int (*insert)(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *p),
+ void *p);
+
+int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
+int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp);
+int avtab_write(struct policydb *p, struct avtab *a, void *fp);
+
+struct avtab_node *avtab_insert_nonunique(struct avtab *h,
+ const struct avtab_key *key,
+ const struct avtab_datum *datum);
+
+struct avtab_node *avtab_search_node(struct avtab *h,
+ const struct avtab_key *key);
+
+struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified);
+
+#define MAX_AVTAB_HASH_BITS 16
+#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
+
+#endif /* _SS_AVTAB_H_ */
+
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
new file mode 100644
index 000000000..e11219fdf
--- /dev/null
+++ b/security/selinux/ss/conditional.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Authors: Karl MacMillan <kmacmillan@tresys.com>
+ * Frank Mayer <mayerf@tresys.com>
+ *
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include "security.h"
+#include "conditional.h"
+#include "services.h"
+
+/*
+ * cond_evaluate_expr evaluates a conditional expr
+ * in reverse polish notation. It returns true (1), false (0),
+ * or undefined (-1). Undefined occurs when the expression
+ * exceeds the stack depth of COND_EXPR_MAXDEPTH.
+ */
+static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
+{
+ u32 i;
+ int s[COND_EXPR_MAXDEPTH];
+ int sp = -1;
+
+ if (expr->len == 0)
+ return -1;
+
+ for (i = 0; i < expr->len; i++) {
+ struct cond_expr_node *node = &expr->nodes[i];
+
+ switch (node->expr_type) {
+ case COND_BOOL:
+ if (sp == (COND_EXPR_MAXDEPTH - 1))
+ return -1;
+ sp++;
+ s[sp] = p->bool_val_to_struct[node->bool - 1]->state;
+ break;
+ case COND_NOT:
+ if (sp < 0)
+ return -1;
+ s[sp] = !s[sp];
+ break;
+ case COND_OR:
+ if (sp < 1)
+ return -1;
+ sp--;
+ s[sp] |= s[sp + 1];
+ break;
+ case COND_AND:
+ if (sp < 1)
+ return -1;
+ sp--;
+ s[sp] &= s[sp + 1];
+ break;
+ case COND_XOR:
+ if (sp < 1)
+ return -1;
+ sp--;
+ s[sp] ^= s[sp + 1];
+ break;
+ case COND_EQ:
+ if (sp < 1)
+ return -1;
+ sp--;
+ s[sp] = (s[sp] == s[sp + 1]);
+ break;
+ case COND_NEQ:
+ if (sp < 1)
+ return -1;
+ sp--;
+ s[sp] = (s[sp] != s[sp + 1]);
+ break;
+ default:
+ return -1;
+ }
+ }
+ return s[0];
+}
+
+/*
+ * evaluate_cond_node evaluates the conditional stored in
+ * a struct cond_node and if the result is different than the
+ * current state of the node it sets the rules in the true/false
+ * list appropriately. If the result of the expression is undefined
+ * all of the rules are disabled for safety.
+ */
+static void evaluate_cond_node(struct policydb *p, struct cond_node *node)
+{
+ struct avtab_node *avnode;
+ int new_state;
+ u32 i;
+
+ new_state = cond_evaluate_expr(p, &node->expr);
+ if (new_state != node->cur_state) {
+ node->cur_state = new_state;
+ if (new_state == -1)
+ pr_err("SELinux: expression result was undefined - disabling all rules.\n");
+ /* turn the rules on or off */
+ for (i = 0; i < node->true_list.len; i++) {
+ avnode = node->true_list.nodes[i];
+ if (new_state <= 0)
+ avnode->key.specified &= ~AVTAB_ENABLED;
+ else
+ avnode->key.specified |= AVTAB_ENABLED;
+ }
+
+ for (i = 0; i < node->false_list.len; i++) {
+ avnode = node->false_list.nodes[i];
+ /* -1 or 1 */
+ if (new_state)
+ avnode->key.specified &= ~AVTAB_ENABLED;
+ else
+ avnode->key.specified |= AVTAB_ENABLED;
+ }
+ }
+}
+
+void evaluate_cond_nodes(struct policydb *p)
+{
+ u32 i;
+
+ for (i = 0; i < p->cond_list_len; i++)
+ evaluate_cond_node(p, &p->cond_list[i]);
+}
+
+void cond_policydb_init(struct policydb *p)
+{
+ p->bool_val_to_struct = NULL;
+ p->cond_list = NULL;
+ p->cond_list_len = 0;
+
+ avtab_init(&p->te_cond_avtab);
+}
+
+static void cond_node_destroy(struct cond_node *node)
+{
+ kfree(node->expr.nodes);
+ /* the avtab_ptr_t nodes are destroyed by the avtab */
+ kfree(node->true_list.nodes);
+ kfree(node->false_list.nodes);
+}
+
+static void cond_list_destroy(struct policydb *p)
+{
+ u32 i;
+
+ for (i = 0; i < p->cond_list_len; i++)
+ cond_node_destroy(&p->cond_list[i]);
+ kfree(p->cond_list);
+ p->cond_list = NULL;
+ p->cond_list_len = 0;
+}
+
+void cond_policydb_destroy(struct policydb *p)
+{
+ kfree(p->bool_val_to_struct);
+ avtab_destroy(&p->te_cond_avtab);
+ cond_list_destroy(p);
+}
+
+int cond_init_bool_indexes(struct policydb *p)
+{
+ kfree(p->bool_val_to_struct);
+ p->bool_val_to_struct = kmalloc_array(p->p_bools.nprim,
+ sizeof(*p->bool_val_to_struct),
+ GFP_KERNEL);
+ if (!p->bool_val_to_struct)
+ return -ENOMEM;
+ return 0;
+}
+
+int cond_destroy_bool(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+int cond_index_bool(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct cond_bool_datum *booldatum;
+
+ booldatum = datum;
+ p = datap;
+
+ if (!booldatum->value || booldatum->value > p->p_bools.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_BOOLS][booldatum->value - 1] = key;
+ p->bool_val_to_struct[booldatum->value - 1] = booldatum;
+
+ return 0;
+}
+
+static int bool_isvalid(struct cond_bool_datum *b)
+{
+ if (!(b->state == 0 || b->state == 1))
+ return 0;
+ return 1;
+}
+
+int cond_read_bool(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct cond_bool_datum *booldatum;
+ __le32 buf[3];
+ u32 len;
+ int rc;
+
+ booldatum = kzalloc(sizeof(*booldatum), GFP_KERNEL);
+ if (!booldatum)
+ return -ENOMEM;
+
+ rc = next_entry(buf, fp, sizeof(buf));
+ if (rc)
+ goto err;
+
+ booldatum->value = le32_to_cpu(buf[0]);
+ booldatum->state = le32_to_cpu(buf[1]);
+
+ rc = -EINVAL;
+ if (!bool_isvalid(booldatum))
+ goto err;
+
+ len = le32_to_cpu(buf[2]);
+ if (((len == 0) || (len == (u32)-1)))
+ goto err;
+
+ rc = -ENOMEM;
+ key = kmalloc(len + 1, GFP_KERNEL);
+ if (!key)
+ goto err;
+ rc = next_entry(key, fp, len);
+ if (rc)
+ goto err;
+ key[len] = '\0';
+ rc = symtab_insert(s, key, booldatum);
+ if (rc)
+ goto err;
+
+ return 0;
+err:
+ cond_destroy_bool(key, booldatum, NULL);
+ return rc;
+}
+
+struct cond_insertf_data {
+ struct policydb *p;
+ struct avtab_node **dst;
+ struct cond_av_list *other;
+};
+
+static int cond_insertf(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *ptr)
+{
+ struct cond_insertf_data *data = ptr;
+ struct policydb *p = data->p;
+ struct cond_av_list *other = data->other;
+ struct avtab_node *node_ptr;
+ u32 i;
+ bool found;
+
+ /*
+ * For type rules we have to make certain there aren't any
+ * conflicting rules by searching the te_avtab and the
+ * cond_te_avtab.
+ */
+ if (k->specified & AVTAB_TYPE) {
+ if (avtab_search(&p->te_avtab, k)) {
+ pr_err("SELinux: type rule already exists outside of a conditional.\n");
+ return -EINVAL;
+ }
+ /*
+ * If we are reading the false list other will be a pointer to
+ * the true list. We can have duplicate entries if there is only
+ * 1 other entry and it is in our true list.
+ *
+ * If we are reading the true list (other == NULL) there shouldn't
+ * be any other entries.
+ */
+ if (other) {
+ node_ptr = avtab_search_node(&p->te_cond_avtab, k);
+ if (node_ptr) {
+ if (avtab_search_node_next(node_ptr, k->specified)) {
+ pr_err("SELinux: too many conflicting type rules.\n");
+ return -EINVAL;
+ }
+ found = false;
+ for (i = 0; i < other->len; i++) {
+ if (other->nodes[i] == node_ptr) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("SELinux: conflicting type rules.\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (avtab_search(&p->te_cond_avtab, k)) {
+ pr_err("SELinux: conflicting type rules when adding type rule for true.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
+ if (!node_ptr) {
+ pr_err("SELinux: could not insert rule.\n");
+ return -ENOMEM;
+ }
+
+ *data->dst = node_ptr;
+ return 0;
+}
+
+static int cond_read_av_list(struct policydb *p, void *fp,
+ struct cond_av_list *list,
+ struct cond_av_list *other)
+{
+ int rc;
+ __le32 buf[1];
+ u32 i, len;
+ struct cond_insertf_data data;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(buf[0]);
+ if (len == 0)
+ return 0;
+
+ list->nodes = kcalloc(len, sizeof(*list->nodes), GFP_KERNEL);
+ if (!list->nodes)
+ return -ENOMEM;
+
+ data.p = p;
+ data.other = other;
+ for (i = 0; i < len; i++) {
+ data.dst = &list->nodes[i];
+ rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf,
+ &data);
+ if (rc) {
+ kfree(list->nodes);
+ list->nodes = NULL;
+ return rc;
+ }
+ }
+
+ list->len = len;
+ return 0;
+}
+
+static int expr_node_isvalid(struct policydb *p, struct cond_expr_node *expr)
+{
+ if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
+ pr_err("SELinux: conditional expressions uses unknown operator.\n");
+ return 0;
+ }
+
+ if (expr->bool > p->p_bools.nprim) {
+ pr_err("SELinux: conditional expressions uses unknown bool.\n");
+ return 0;
+ }
+ return 1;
+}
+
+static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
+{
+ __le32 buf[2];
+ u32 i, len;
+ int rc;
+
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ return rc;
+
+ node->cur_state = le32_to_cpu(buf[0]);
+
+ /* expr */
+ len = le32_to_cpu(buf[1]);
+ node->expr.nodes = kcalloc(len, sizeof(*node->expr.nodes), GFP_KERNEL);
+ if (!node->expr.nodes)
+ return -ENOMEM;
+
+ node->expr.len = len;
+
+ for (i = 0; i < len; i++) {
+ struct cond_expr_node *expr = &node->expr.nodes[i];
+
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ return rc;
+
+ expr->expr_type = le32_to_cpu(buf[0]);
+ expr->bool = le32_to_cpu(buf[1]);
+
+ if (!expr_node_isvalid(p, expr))
+ return -EINVAL;
+ }
+
+ rc = cond_read_av_list(p, fp, &node->true_list, NULL);
+ if (rc)
+ return rc;
+ return cond_read_av_list(p, fp, &node->false_list, &node->true_list);
+}
+
+int cond_read_list(struct policydb *p, void *fp)
+{
+ __le32 buf[1];
+ u32 i, len;
+ int rc;
+
+ rc = next_entry(buf, fp, sizeof(buf));
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(buf[0]);
+
+ p->cond_list = kcalloc(len, sizeof(*p->cond_list), GFP_KERNEL);
+ if (!p->cond_list)
+ return -ENOMEM;
+
+ rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
+ if (rc)
+ goto err;
+
+ p->cond_list_len = len;
+
+ for (i = 0; i < len; i++) {
+ rc = cond_read_node(p, &p->cond_list[i], fp);
+ if (rc)
+ goto err;
+ }
+ return 0;
+err:
+ cond_list_destroy(p);
+ return rc;
+}
+
+int cond_write_bool(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct cond_bool_datum *booldatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ u32 len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(booldatum->value);
+ buf[1] = cpu_to_le32(booldatum->state);
+ buf[2] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+/*
+ * cond_write_cond_av_list doesn't write out the av_list nodes.
+ * Instead it writes out the key/value pairs from the avtab. This
+ * is necessary because there is no way to uniquely identifying rules
+ * in the avtab so it is not possible to associate individual rules
+ * in the avtab with a conditional without saving them as part of
+ * the conditional. This means that the avtab with the conditional
+ * rules will not be saved but will be rebuilt on policy load.
+ */
+static int cond_write_av_list(struct policydb *p,
+ struct cond_av_list *list, struct policy_file *fp)
+{
+ __le32 buf[1];
+ u32 i;
+ int rc;
+
+ buf[0] = cpu_to_le32(list->len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < list->len; i++) {
+ rc = avtab_write_item(p, list->nodes[i], fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cond_write_node(struct policydb *p, struct cond_node *node,
+ struct policy_file *fp)
+{
+ __le32 buf[2];
+ int rc;
+ u32 i;
+
+ buf[0] = cpu_to_le32(node->cur_state);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(node->expr.len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < node->expr.len; i++) {
+ buf[0] = cpu_to_le32(node->expr.nodes[i].expr_type);
+ buf[1] = cpu_to_le32(node->expr.nodes[i].bool);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ }
+
+ rc = cond_write_av_list(p, &node->true_list, fp);
+ if (rc)
+ return rc;
+ rc = cond_write_av_list(p, &node->false_list, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int cond_write_list(struct policydb *p, void *fp)
+{
+ u32 i;
+ __le32 buf[1];
+ int rc;
+
+ buf[0] = cpu_to_le32(p->cond_list_len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < p->cond_list_len; i++) {
+ rc = cond_write_node(p, &p->cond_list[i], fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+ struct extended_perms_decision *xpermd)
+{
+ struct avtab_node *node;
+
+ if (!ctab || !key || !xpermd)
+ return;
+
+ for (node = avtab_search_node(ctab, key); node;
+ node = avtab_search_node_next(node, key->specified)) {
+ if (node->key.specified & AVTAB_ENABLED)
+ services_compute_xperms_decision(xpermd, node);
+ }
+}
+/* Determine whether additional permissions are granted by the conditional
+ * av table, and if so, add them to the result
+ */
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+ struct av_decision *avd, struct extended_perms *xperms)
+{
+ struct avtab_node *node;
+
+ if (!ctab || !key || !avd)
+ return;
+
+ for (node = avtab_search_node(ctab, key); node;
+ node = avtab_search_node_next(node, key->specified)) {
+ if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
+ (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
+ avd->allowed |= node->datum.u.data;
+ if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
+ (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
+ /* Since a '0' in an auditdeny mask represents a
+ * permission we do NOT want to audit (dontaudit), we use
+ * the '&' operand to ensure that all '0's in the mask
+ * are retained (much unlike the allow and auditallow cases).
+ */
+ avd->auditdeny &= node->datum.u.data;
+ if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
+ (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
+ avd->auditallow |= node->datum.u.data;
+ if (xperms && (node->key.specified & AVTAB_ENABLED) &&
+ (node->key.specified & AVTAB_XPERMS))
+ services_compute_xperms_drivers(xperms, node);
+ }
+}
+
+static int cond_dup_av_list(struct cond_av_list *new,
+ struct cond_av_list *orig,
+ struct avtab *avtab)
+{
+ u32 i;
+
+ memset(new, 0, sizeof(*new));
+
+ new->nodes = kcalloc(orig->len, sizeof(*new->nodes), GFP_KERNEL);
+ if (!new->nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < orig->len; i++) {
+ new->nodes[i] = avtab_insert_nonunique(avtab,
+ &orig->nodes[i]->key,
+ &orig->nodes[i]->datum);
+ if (!new->nodes[i])
+ return -ENOMEM;
+ new->len++;
+ }
+
+ return 0;
+}
+
+static int duplicate_policydb_cond_list(struct policydb *newp,
+ struct policydb *origp)
+{
+ int rc;
+ u32 i;
+
+ rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
+ if (rc)
+ return rc;
+
+ newp->cond_list_len = 0;
+ newp->cond_list = kcalloc(origp->cond_list_len,
+ sizeof(*newp->cond_list),
+ GFP_KERNEL);
+ if (!newp->cond_list)
+ goto error;
+
+ for (i = 0; i < origp->cond_list_len; i++) {
+ struct cond_node *newn = &newp->cond_list[i];
+ struct cond_node *orign = &origp->cond_list[i];
+
+ newp->cond_list_len++;
+
+ newn->cur_state = orign->cur_state;
+ newn->expr.nodes = kmemdup(orign->expr.nodes,
+ orign->expr.len * sizeof(*orign->expr.nodes),
+ GFP_KERNEL);
+ if (!newn->expr.nodes)
+ goto error;
+
+ newn->expr.len = orign->expr.len;
+
+ rc = cond_dup_av_list(&newn->true_list, &orign->true_list,
+ &newp->te_cond_avtab);
+ if (rc)
+ goto error;
+
+ rc = cond_dup_av_list(&newn->false_list, &orign->false_list,
+ &newp->te_cond_avtab);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ avtab_destroy(&newp->te_cond_avtab);
+ cond_list_destroy(newp);
+ return -ENOMEM;
+}
+
+static int cond_bools_destroy(void *key, void *datum, void *args)
+{
+ /* key was not copied so no need to free here */
+ kfree(datum);
+ return 0;
+}
+
+static int cond_bools_copy(struct hashtab_node *new, struct hashtab_node *orig, void *args)
+{
+ struct cond_bool_datum *datum;
+
+ datum = kmemdup(orig->datum, sizeof(struct cond_bool_datum),
+ GFP_KERNEL);
+ if (!datum)
+ return -ENOMEM;
+
+ new->key = orig->key; /* No need to copy, never modified */
+ new->datum = datum;
+ return 0;
+}
+
+static int cond_bools_index(void *key, void *datum, void *args)
+{
+ struct cond_bool_datum *booldatum, **cond_bool_array;
+
+ booldatum = datum;
+ cond_bool_array = args;
+ cond_bool_array[booldatum->value - 1] = booldatum;
+
+ return 0;
+}
+
+static int duplicate_policydb_bools(struct policydb *newdb,
+ struct policydb *orig)
+{
+ struct cond_bool_datum **cond_bool_array;
+ int rc;
+
+ cond_bool_array = kmalloc_array(orig->p_bools.nprim,
+ sizeof(*orig->bool_val_to_struct),
+ GFP_KERNEL);
+ if (!cond_bool_array)
+ return -ENOMEM;
+
+ rc = hashtab_duplicate(&newdb->p_bools.table, &orig->p_bools.table,
+ cond_bools_copy, cond_bools_destroy, NULL);
+ if (rc) {
+ kfree(cond_bool_array);
+ return -ENOMEM;
+ }
+
+ hashtab_map(&newdb->p_bools.table, cond_bools_index, cond_bool_array);
+ newdb->bool_val_to_struct = cond_bool_array;
+
+ newdb->p_bools.nprim = orig->p_bools.nprim;
+
+ return 0;
+}
+
+void cond_policydb_destroy_dup(struct policydb *p)
+{
+ hashtab_map(&p->p_bools.table, cond_bools_destroy, NULL);
+ hashtab_destroy(&p->p_bools.table);
+ cond_policydb_destroy(p);
+}
+
+int cond_policydb_dup(struct policydb *new, struct policydb *orig)
+{
+ cond_policydb_init(new);
+
+ if (duplicate_policydb_bools(new, orig))
+ return -ENOMEM;
+
+ if (duplicate_policydb_cond_list(new, orig)) {
+ cond_policydb_destroy_dup(new);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
new file mode 100644
index 000000000..e47ec6dde
--- /dev/null
+++ b/security/selinux/ss/conditional.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Authors: Karl MacMillan <kmacmillan@tresys.com>
+ * Frank Mayer <mayerf@tresys.com>
+ *
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ */
+
+#ifndef _CONDITIONAL_H_
+#define _CONDITIONAL_H_
+
+#include "avtab.h"
+#include "symtab.h"
+#include "policydb.h"
+#include "../include/conditional.h"
+
+#define COND_EXPR_MAXDEPTH 10
+
+/*
+ * A conditional expression is a list of operators and operands
+ * in reverse polish notation.
+ */
+struct cond_expr_node {
+#define COND_BOOL 1 /* plain bool */
+#define COND_NOT 2 /* !bool */
+#define COND_OR 3 /* bool || bool */
+#define COND_AND 4 /* bool && bool */
+#define COND_XOR 5 /* bool ^ bool */
+#define COND_EQ 6 /* bool == bool */
+#define COND_NEQ 7 /* bool != bool */
+#define COND_LAST COND_NEQ
+ u32 expr_type;
+ u32 bool;
+};
+
+struct cond_expr {
+ struct cond_expr_node *nodes;
+ u32 len;
+};
+
+/*
+ * Each cond_node contains a list of rules to be enabled/disabled
+ * depending on the current value of the conditional expression. This
+ * struct is for that list.
+ */
+struct cond_av_list {
+ struct avtab_node **nodes;
+ u32 len;
+};
+
+/*
+ * A cond node represents a conditional block in a policy. It
+ * contains a conditional expression, the current state of the expression,
+ * two lists of rules to enable/disable depending on the value of the
+ * expression (the true list corresponds to if and the false list corresponds
+ * to else)..
+ */
+struct cond_node {
+ int cur_state;
+ struct cond_expr expr;
+ struct cond_av_list true_list;
+ struct cond_av_list false_list;
+};
+
+void cond_policydb_init(struct policydb *p);
+void cond_policydb_destroy(struct policydb *p);
+
+int cond_init_bool_indexes(struct policydb *p);
+int cond_destroy_bool(void *key, void *datum, void *p);
+
+int cond_index_bool(void *key, void *datum, void *datap);
+
+int cond_read_bool(struct policydb *p, struct symtab *s, void *fp);
+int cond_read_list(struct policydb *p, void *fp);
+int cond_write_bool(void *key, void *datum, void *ptr);
+int cond_write_list(struct policydb *p, void *fp);
+
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+ struct av_decision *avd, struct extended_perms *xperms);
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+ struct extended_perms_decision *xpermd);
+void evaluate_cond_nodes(struct policydb *p);
+void cond_policydb_destroy_dup(struct policydb *p);
+int cond_policydb_dup(struct policydb *new, struct policydb *orig);
+
+#endif /* _CONDITIONAL_H_ */
diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h
new file mode 100644
index 000000000..4e563be9e
--- /dev/null
+++ b/security/selinux/ss/constraint.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A constraint is a condition that must be satisfied in
+ * order for one or more permissions to be granted.
+ * Constraints are used to impose additional restrictions
+ * beyond the type-based rules in `te' or the role-based
+ * transition rules in `rbac'. Constraints are typically
+ * used to prevent a process from transitioning to a new user
+ * identity or role unless it is in a privileged type.
+ * Constraints are likewise typically used to prevent a
+ * process from labeling an object with a different user
+ * identity.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SS_CONSTRAINT_H_
+#define _SS_CONSTRAINT_H_
+
+#include "ebitmap.h"
+
+#define CEXPR_MAXDEPTH 5
+
+struct constraint_expr {
+#define CEXPR_NOT 1 /* not expr */
+#define CEXPR_AND 2 /* expr and expr */
+#define CEXPR_OR 3 /* expr or expr */
+#define CEXPR_ATTR 4 /* attr op attr */
+#define CEXPR_NAMES 5 /* attr op names */
+ u32 expr_type; /* expression type */
+
+#define CEXPR_USER 1 /* user */
+#define CEXPR_ROLE 2 /* role */
+#define CEXPR_TYPE 4 /* type */
+#define CEXPR_TARGET 8 /* target if set, source otherwise */
+#define CEXPR_XTARGET 16 /* special 3rd target for validatetrans rule */
+#define CEXPR_L1L2 32 /* low level 1 vs. low level 2 */
+#define CEXPR_L1H2 64 /* low level 1 vs. high level 2 */
+#define CEXPR_H1L2 128 /* high level 1 vs. low level 2 */
+#define CEXPR_H1H2 256 /* high level 1 vs. high level 2 */
+#define CEXPR_L1H1 512 /* low level 1 vs. high level 1 */
+#define CEXPR_L2H2 1024 /* low level 2 vs. high level 2 */
+ u32 attr; /* attribute */
+
+#define CEXPR_EQ 1 /* == or eq */
+#define CEXPR_NEQ 2 /* != */
+#define CEXPR_DOM 3 /* dom */
+#define CEXPR_DOMBY 4 /* domby */
+#define CEXPR_INCOMP 5 /* incomp */
+ u32 op; /* operator */
+
+ struct ebitmap names; /* names */
+ struct type_set *type_names;
+
+ struct constraint_expr *next; /* next expression */
+};
+
+struct constraint_node {
+ u32 permissions; /* constrained permissions */
+ struct constraint_expr *expr; /* constraint on permissions */
+ struct constraint_node *next; /* next constraint */
+};
+
+#endif /* _SS_CONSTRAINT_H_ */
diff --git a/security/selinux/ss/context.c b/security/selinux/ss/context.c
new file mode 100644
index 000000000..38bc0aa52
--- /dev/null
+++ b/security/selinux/ss/context.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementations of the security context functions.
+ *
+ * Author: Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2020 Red Hat, Inc.
+ */
+
+#include <linux/jhash.h>
+
+#include "context.h"
+#include "mls.h"
+
+u32 context_compute_hash(const struct context *c)
+{
+ u32 hash = 0;
+
+ /*
+ * If a context is invalid, it will always be represented by a
+ * context struct with only the len & str set (and vice versa)
+ * under a given policy. Since context structs from different
+ * policies should never meet, it is safe to hash valid and
+ * invalid contexts differently. The context_cmp() function
+ * already operates under the same assumption.
+ */
+ if (c->len)
+ return full_name_hash(NULL, c->str, c->len);
+
+ hash = jhash_3words(c->user, c->role, c->type, hash);
+ hash = mls_range_hash(&c->range, hash);
+ return hash;
+}
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
new file mode 100644
index 000000000..eda32c3d4
--- /dev/null
+++ b/security/selinux/ss/context.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A security context is a set of security attributes
+ * associated with each subject and object controlled
+ * by the security policy. Security contexts are
+ * externally represented as variable-length strings
+ * that can be interpreted by a user or application
+ * with an understanding of the security policy.
+ * Internally, the security server uses a simple
+ * structure. This structure is private to the
+ * security server and can be changed without affecting
+ * clients of the security server.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SS_CONTEXT_H_
+#define _SS_CONTEXT_H_
+
+#include "ebitmap.h"
+#include "mls_types.h"
+#include "security.h"
+
+/*
+ * A security context consists of an authenticated user
+ * identity, a role, a type and a MLS range.
+ */
+struct context {
+ u32 user;
+ u32 role;
+ u32 type;
+ u32 len; /* length of string in bytes */
+ struct mls_range range;
+ char *str; /* string representation if context cannot be mapped. */
+};
+
+static inline void mls_context_init(struct context *c)
+{
+ memset(&c->range, 0, sizeof(c->range));
+}
+
+static inline int mls_context_cpy(struct context *dst, const struct context *src)
+{
+ int rc;
+
+ dst->range.level[0].sens = src->range.level[0].sens;
+ rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
+ if (rc)
+ goto out;
+
+ dst->range.level[1].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+ if (rc)
+ ebitmap_destroy(&dst->range.level[0].cat);
+out:
+ return rc;
+}
+
+/*
+ * Sets both levels in the MLS range of 'dst' to the low level of 'src'.
+ */
+static inline int mls_context_cpy_low(struct context *dst, const struct context *src)
+{
+ int rc;
+
+ dst->range.level[0].sens = src->range.level[0].sens;
+ rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
+ if (rc)
+ goto out;
+
+ dst->range.level[1].sens = src->range.level[0].sens;
+ rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat);
+ if (rc)
+ ebitmap_destroy(&dst->range.level[0].cat);
+out:
+ return rc;
+}
+
+/*
+ * Sets both levels in the MLS range of 'dst' to the high level of 'src'.
+ */
+static inline int mls_context_cpy_high(struct context *dst, const struct context *src)
+{
+ int rc;
+
+ dst->range.level[0].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
+ if (rc)
+ goto out;
+
+ dst->range.level[1].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+ if (rc)
+ ebitmap_destroy(&dst->range.level[0].cat);
+out:
+ return rc;
+}
+
+
+static inline int mls_context_glblub(struct context *dst,
+ const struct context *c1, const struct context *c2)
+{
+ struct mls_range *dr = &dst->range;
+ const struct mls_range *r1 = &c1->range, *r2 = &c2->range;
+ int rc = 0;
+
+ if (r1->level[1].sens < r2->level[0].sens ||
+ r2->level[1].sens < r1->level[0].sens)
+ /* These ranges have no common sensitivities */
+ return -EINVAL;
+
+ /* Take the greatest of the low */
+ dr->level[0].sens = max(r1->level[0].sens, r2->level[0].sens);
+
+ /* Take the least of the high */
+ dr->level[1].sens = min(r1->level[1].sens, r2->level[1].sens);
+
+ rc = ebitmap_and(&dr->level[0].cat,
+ &r1->level[0].cat, &r2->level[0].cat);
+ if (rc)
+ goto out;
+
+ rc = ebitmap_and(&dr->level[1].cat,
+ &r1->level[1].cat, &r2->level[1].cat);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
+static inline int mls_context_cmp(const struct context *c1, const struct context *c2)
+{
+ return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
+ ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
+ (c1->range.level[1].sens == c2->range.level[1].sens) &&
+ ebitmap_cmp(&c1->range.level[1].cat, &c2->range.level[1].cat));
+}
+
+static inline void mls_context_destroy(struct context *c)
+{
+ ebitmap_destroy(&c->range.level[0].cat);
+ ebitmap_destroy(&c->range.level[1].cat);
+ mls_context_init(c);
+}
+
+static inline void context_init(struct context *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline int context_cpy(struct context *dst, const struct context *src)
+{
+ int rc;
+
+ dst->user = src->user;
+ dst->role = src->role;
+ dst->type = src->type;
+ if (src->str) {
+ dst->str = kstrdup(src->str, GFP_ATOMIC);
+ if (!dst->str)
+ return -ENOMEM;
+ dst->len = src->len;
+ } else {
+ dst->str = NULL;
+ dst->len = 0;
+ }
+ rc = mls_context_cpy(dst, src);
+ if (rc) {
+ kfree(dst->str);
+ return rc;
+ }
+ return 0;
+}
+
+static inline void context_destroy(struct context *c)
+{
+ c->user = c->role = c->type = 0;
+ kfree(c->str);
+ c->str = NULL;
+ c->len = 0;
+ mls_context_destroy(c);
+}
+
+static inline int context_cmp(const struct context *c1, const struct context *c2)
+{
+ if (c1->len && c2->len)
+ return (c1->len == c2->len && !strcmp(c1->str, c2->str));
+ if (c1->len || c2->len)
+ return 0;
+ return ((c1->user == c2->user) &&
+ (c1->role == c2->role) &&
+ (c1->type == c2->type) &&
+ mls_context_cmp(c1, c2));
+}
+
+u32 context_compute_hash(const struct context *c);
+
+#endif /* _SS_CONTEXT_H_ */
+
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
new file mode 100644
index 000000000..d31b87be9
--- /dev/null
+++ b/security/selinux/ss/ebitmap.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the extensible bitmap type.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+/*
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ * Added support to import/export the NetLabel category bitmap
+ *
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+/*
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ * Applied standard bit operations to improve bitmap scanning.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jhash.h>
+#include <net/netlabel.h>
+#include "ebitmap.h"
+#include "policydb.h"
+
+#define BITS_PER_U64 (sizeof(u64) * 8)
+
+static struct kmem_cache *ebitmap_node_cachep __ro_after_init;
+
+int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2)
+{
+ const struct ebitmap_node *n1, *n2;
+
+ if (e1->highbit != e2->highbit)
+ return 0;
+
+ n1 = e1->node;
+ n2 = e2->node;
+ while (n1 && n2 &&
+ (n1->startbit == n2->startbit) &&
+ !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
+ n1 = n1->next;
+ n2 = n2->next;
+ }
+
+ if (n1 || n2)
+ return 0;
+
+ return 1;
+}
+
+int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src)
+{
+ struct ebitmap_node *new, *prev;
+ const struct ebitmap_node *n;
+
+ ebitmap_init(dst);
+ n = src->node;
+ prev = NULL;
+ while (n) {
+ new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
+ if (!new) {
+ ebitmap_destroy(dst);
+ return -ENOMEM;
+ }
+ new->startbit = n->startbit;
+ memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
+ new->next = NULL;
+ if (prev)
+ prev->next = new;
+ else
+ dst->node = new;
+ prev = new;
+ n = n->next;
+ }
+
+ dst->highbit = src->highbit;
+ return 0;
+}
+
+int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1, const struct ebitmap *e2)
+{
+ struct ebitmap_node *n;
+ int bit, rc;
+
+ ebitmap_init(dst);
+
+ ebitmap_for_each_positive_bit(e1, n, bit) {
+ if (ebitmap_get_bit(e2, bit)) {
+ rc = ebitmap_set_bit(dst, bit, 1);
+ if (rc < 0)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_NETLABEL
+/**
+ * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
+ * @ebmap: the ebitmap to export
+ * @catmap: the NetLabel category bitmap
+ *
+ * Description:
+ * Export a SELinux extensibile bitmap into a NetLabel category bitmap.
+ * Returns zero on success, negative values on error.
+ *
+ */
+int ebitmap_netlbl_export(struct ebitmap *ebmap,
+ struct netlbl_lsm_catmap **catmap)
+{
+ struct ebitmap_node *e_iter = ebmap->node;
+ unsigned long e_map;
+ u32 offset;
+ unsigned int iter;
+ int rc;
+
+ if (e_iter == NULL) {
+ *catmap = NULL;
+ return 0;
+ }
+
+ if (*catmap != NULL)
+ netlbl_catmap_free(*catmap);
+ *catmap = NULL;
+
+ while (e_iter) {
+ offset = e_iter->startbit;
+ for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
+ e_map = e_iter->maps[iter];
+ if (e_map != 0) {
+ rc = netlbl_catmap_setlong(catmap,
+ offset,
+ e_map,
+ GFP_ATOMIC);
+ if (rc != 0)
+ goto netlbl_export_failure;
+ }
+ offset += EBITMAP_UNIT_SIZE;
+ }
+ e_iter = e_iter->next;
+ }
+
+ return 0;
+
+netlbl_export_failure:
+ netlbl_catmap_free(*catmap);
+ return -ENOMEM;
+}
+
+/**
+ * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
+ * @ebmap: the ebitmap to import
+ * @catmap: the NetLabel category bitmap
+ *
+ * Description:
+ * Import a NetLabel category bitmap into a SELinux extensibile bitmap.
+ * Returns zero on success, negative values on error.
+ *
+ */
+int ebitmap_netlbl_import(struct ebitmap *ebmap,
+ struct netlbl_lsm_catmap *catmap)
+{
+ int rc;
+ struct ebitmap_node *e_iter = NULL;
+ struct ebitmap_node *e_prev = NULL;
+ u32 offset = 0, idx;
+ unsigned long bitmap;
+
+ for (;;) {
+ rc = netlbl_catmap_getlong(catmap, &offset, &bitmap);
+ if (rc < 0)
+ goto netlbl_import_failure;
+ if (offset == (u32)-1)
+ return 0;
+
+ /* don't waste ebitmap space if the netlabel bitmap is empty */
+ if (bitmap == 0) {
+ offset += EBITMAP_UNIT_SIZE;
+ continue;
+ }
+
+ if (e_iter == NULL ||
+ offset >= e_iter->startbit + EBITMAP_SIZE) {
+ e_prev = e_iter;
+ e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
+ if (e_iter == NULL)
+ goto netlbl_import_failure;
+ e_iter->startbit = offset - (offset % EBITMAP_SIZE);
+ if (e_prev == NULL)
+ ebmap->node = e_iter;
+ else
+ e_prev->next = e_iter;
+ ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
+ }
+
+ /* offset will always be aligned to an unsigned long */
+ idx = EBITMAP_NODE_INDEX(e_iter, offset);
+ e_iter->maps[idx] = bitmap;
+
+ /* next */
+ offset += EBITMAP_UNIT_SIZE;
+ }
+
+ /* NOTE: we should never reach this return */
+ return 0;
+
+netlbl_import_failure:
+ ebitmap_destroy(ebmap);
+ return -ENOMEM;
+}
+#endif /* CONFIG_NETLABEL */
+
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2, u32 last_e2bit)
+{
+ const struct ebitmap_node *n1, *n2;
+ int i;
+
+ if (e1->highbit < e2->highbit)
+ return 0;
+
+ n1 = e1->node;
+ n2 = e2->node;
+
+ while (n1 && n2 && (n1->startbit <= n2->startbit)) {
+ if (n1->startbit < n2->startbit) {
+ n1 = n1->next;
+ continue;
+ }
+ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+ i--; /* Skip trailing NULL map entries */
+ if (last_e2bit && (i >= 0)) {
+ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+ __fls(n2->maps[i]);
+ if (lastsetbit > last_e2bit)
+ return 0;
+ }
+
+ while (i >= 0) {
+ if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
+ return 0;
+ i--;
+ }
+
+ n1 = n1->next;
+ n2 = n2->next;
+ }
+
+ if (n2)
+ return 0;
+
+ return 1;
+}
+
+int ebitmap_get_bit(const struct ebitmap *e, unsigned long bit)
+{
+ const struct ebitmap_node *n;
+
+ if (e->highbit < bit)
+ return 0;
+
+ n = e->node;
+ while (n && (n->startbit <= bit)) {
+ if ((n->startbit + EBITMAP_SIZE) > bit)
+ return ebitmap_node_get_bit(n, bit);
+ n = n->next;
+ }
+
+ return 0;
+}
+
+int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
+{
+ struct ebitmap_node *n, *prev, *new;
+
+ prev = NULL;
+ n = e->node;
+ while (n && n->startbit <= bit) {
+ if ((n->startbit + EBITMAP_SIZE) > bit) {
+ if (value) {
+ ebitmap_node_set_bit(n, bit);
+ } else {
+ unsigned int s;
+
+ ebitmap_node_clr_bit(n, bit);
+
+ s = find_first_bit(n->maps, EBITMAP_SIZE);
+ if (s < EBITMAP_SIZE)
+ return 0;
+
+ /* drop this node from the bitmap */
+ if (!n->next) {
+ /*
+ * this was the highest map
+ * within the bitmap
+ */
+ if (prev)
+ e->highbit = prev->startbit
+ + EBITMAP_SIZE;
+ else
+ e->highbit = 0;
+ }
+ if (prev)
+ prev->next = n->next;
+ else
+ e->node = n->next;
+ kmem_cache_free(ebitmap_node_cachep, n);
+ }
+ return 0;
+ }
+ prev = n;
+ n = n->next;
+ }
+
+ if (!value)
+ return 0;
+
+ new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
+ new->startbit = bit - (bit % EBITMAP_SIZE);
+ ebitmap_node_set_bit(new, bit);
+
+ if (!n)
+ /* this node will be the highest map within the bitmap */
+ e->highbit = new->startbit + EBITMAP_SIZE;
+
+ if (prev) {
+ new->next = prev->next;
+ prev->next = new;
+ } else {
+ new->next = e->node;
+ e->node = new;
+ }
+
+ return 0;
+}
+
+void ebitmap_destroy(struct ebitmap *e)
+{
+ struct ebitmap_node *n, *temp;
+
+ if (!e)
+ return;
+
+ n = e->node;
+ while (n) {
+ temp = n;
+ n = n->next;
+ kmem_cache_free(ebitmap_node_cachep, temp);
+ }
+
+ e->highbit = 0;
+ e->node = NULL;
+}
+
+int ebitmap_read(struct ebitmap *e, void *fp)
+{
+ struct ebitmap_node *n = NULL;
+ u32 mapunit, count, startbit, index;
+ __le32 ebitmap_start;
+ u64 map;
+ __le64 mapbits;
+ __le32 buf[3];
+ int rc, i;
+
+ ebitmap_init(e);
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc < 0)
+ goto out;
+
+ mapunit = le32_to_cpu(buf[0]);
+ e->highbit = le32_to_cpu(buf[1]);
+ count = le32_to_cpu(buf[2]);
+
+ if (mapunit != BITS_PER_U64) {
+ pr_err("SELinux: ebitmap: map size %u does not "
+ "match my size %zd (high bit was %d)\n",
+ mapunit, BITS_PER_U64, e->highbit);
+ goto bad;
+ }
+
+ /* round up e->highbit */
+ e->highbit += EBITMAP_SIZE - 1;
+ e->highbit -= (e->highbit % EBITMAP_SIZE);
+
+ if (!e->highbit) {
+ e->node = NULL;
+ goto ok;
+ }
+
+ if (e->highbit && !count)
+ goto bad;
+
+ for (i = 0; i < count; i++) {
+ rc = next_entry(&ebitmap_start, fp, sizeof(u32));
+ if (rc < 0) {
+ pr_err("SELinux: ebitmap: truncated map\n");
+ goto bad;
+ }
+ startbit = le32_to_cpu(ebitmap_start);
+
+ if (startbit & (mapunit - 1)) {
+ pr_err("SELinux: ebitmap start bit (%d) is "
+ "not a multiple of the map unit size (%u)\n",
+ startbit, mapunit);
+ goto bad;
+ }
+ if (startbit > e->highbit - mapunit) {
+ pr_err("SELinux: ebitmap start bit (%d) is "
+ "beyond the end of the bitmap (%u)\n",
+ startbit, (e->highbit - mapunit));
+ goto bad;
+ }
+
+ if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
+ struct ebitmap_node *tmp;
+ tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("SELinux: ebitmap: out of memory\n");
+ rc = -ENOMEM;
+ goto bad;
+ }
+ /* round down */
+ tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
+ if (n)
+ n->next = tmp;
+ else
+ e->node = tmp;
+ n = tmp;
+ } else if (startbit <= n->startbit) {
+ pr_err("SELinux: ebitmap: start bit %d"
+ " comes after start bit %d\n",
+ startbit, n->startbit);
+ goto bad;
+ }
+
+ rc = next_entry(&mapbits, fp, sizeof(u64));
+ if (rc < 0) {
+ pr_err("SELinux: ebitmap: truncated map\n");
+ goto bad;
+ }
+ map = le64_to_cpu(mapbits);
+
+ index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
+ while (map) {
+ n->maps[index++] = map & (-1UL);
+ map = EBITMAP_SHIFT_UNIT_SIZE(map);
+ }
+ }
+ok:
+ rc = 0;
+out:
+ return rc;
+bad:
+ if (!rc)
+ rc = -EINVAL;
+ ebitmap_destroy(e);
+ goto out;
+}
+
+int ebitmap_write(const struct ebitmap *e, void *fp)
+{
+ struct ebitmap_node *n;
+ u32 count;
+ __le32 buf[3];
+ u64 map;
+ int bit, last_bit, last_startbit, rc;
+
+ buf[0] = cpu_to_le32(BITS_PER_U64);
+
+ count = 0;
+ last_bit = 0;
+ last_startbit = -1;
+ ebitmap_for_each_positive_bit(e, n, bit) {
+ if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+ count++;
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ }
+ last_bit = roundup(bit + 1, BITS_PER_U64);
+ }
+ buf[1] = cpu_to_le32(last_bit);
+ buf[2] = cpu_to_le32(count);
+
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ map = 0;
+ last_startbit = INT_MIN;
+ ebitmap_for_each_positive_bit(e, n, bit) {
+ if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+ __le64 buf64[1];
+
+ /* this is the very first bit */
+ if (!map) {
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ map = (u64)1 << (bit - last_startbit);
+ continue;
+ }
+
+ /* write the last node */
+ buf[0] = cpu_to_le32(last_startbit);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf64[0] = cpu_to_le64(map);
+ rc = put_entry(buf64, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+
+ /* set up for the next node */
+ map = 0;
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ }
+ map |= (u64)1 << (bit - last_startbit);
+ }
+ /* write the last node */
+ if (map) {
+ __le64 buf64[1];
+
+ /* write the last node */
+ buf[0] = cpu_to_le32(last_startbit);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf64[0] = cpu_to_le64(map);
+ rc = put_entry(buf64, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+u32 ebitmap_hash(const struct ebitmap *e, u32 hash)
+{
+ struct ebitmap_node *node;
+
+ /* need to change hash even if ebitmap is empty */
+ hash = jhash_1word(e->highbit, hash);
+ for (node = e->node; node; node = node->next) {
+ hash = jhash_1word(node->startbit, hash);
+ hash = jhash(node->maps, sizeof(node->maps), hash);
+ }
+ return hash;
+}
+
+void __init ebitmap_cache_init(void)
+{
+ ebitmap_node_cachep = kmem_cache_create("ebitmap_node",
+ sizeof(struct ebitmap_node),
+ 0, SLAB_PANIC, NULL);
+}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
new file mode 100644
index 000000000..e5b57dc3f
--- /dev/null
+++ b/security/selinux/ss/ebitmap.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * An extensible bitmap is a bitmap that supports an
+ * arbitrary number of bits. Extensible bitmaps are
+ * used to represent sets of values, such as types,
+ * roles, categories, and classes.
+ *
+ * Each extensible bitmap is implemented as a linked
+ * list of bitmap nodes, where each bitmap node has
+ * an explicitly specified starting bit position within
+ * the total bitmap.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SS_EBITMAP_H_
+#define _SS_EBITMAP_H_
+
+#include <net/netlabel.h>
+
+#ifdef CONFIG_64BIT
+#define EBITMAP_NODE_SIZE 64
+#else
+#define EBITMAP_NODE_SIZE 32
+#endif
+
+#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
+ / sizeof(unsigned long))
+#define EBITMAP_UNIT_SIZE BITS_PER_LONG
+#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
+#define EBITMAP_BIT 1ULL
+#define EBITMAP_SHIFT_UNIT_SIZE(x) \
+ (((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
+
+struct ebitmap_node {
+ struct ebitmap_node *next;
+ unsigned long maps[EBITMAP_UNIT_NUMS];
+ u32 startbit;
+};
+
+struct ebitmap {
+ struct ebitmap_node *node; /* first node in the bitmap */
+ u32 highbit; /* highest position in the total bitmap */
+};
+
+#define ebitmap_length(e) ((e)->highbit)
+
+static inline unsigned int ebitmap_start_positive(const struct ebitmap *e,
+ struct ebitmap_node **n)
+{
+ unsigned int ofs;
+
+ for (*n = e->node; *n; *n = (*n)->next) {
+ ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
+ if (ofs < EBITMAP_SIZE)
+ return (*n)->startbit + ofs;
+ }
+ return ebitmap_length(e);
+}
+
+static inline void ebitmap_init(struct ebitmap *e)
+{
+ memset(e, 0, sizeof(*e));
+}
+
+static inline unsigned int ebitmap_next_positive(const struct ebitmap *e,
+ struct ebitmap_node **n,
+ unsigned int bit)
+{
+ unsigned int ofs;
+
+ ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1);
+ if (ofs < EBITMAP_SIZE)
+ return ofs + (*n)->startbit;
+
+ for (*n = (*n)->next; *n; *n = (*n)->next) {
+ ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
+ if (ofs < EBITMAP_SIZE)
+ return ofs + (*n)->startbit;
+ }
+ return ebitmap_length(e);
+}
+
+#define EBITMAP_NODE_INDEX(node, bit) \
+ (((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
+#define EBITMAP_NODE_OFFSET(node, bit) \
+ (((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
+
+static inline int ebitmap_node_get_bit(const struct ebitmap_node *n,
+ unsigned int bit)
+{
+ unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+ unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+ BUG_ON(index >= EBITMAP_UNIT_NUMS);
+ if ((n->maps[index] & (EBITMAP_BIT << ofs)))
+ return 1;
+ return 0;
+}
+
+static inline void ebitmap_node_set_bit(struct ebitmap_node *n,
+ unsigned int bit)
+{
+ unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+ unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+ BUG_ON(index >= EBITMAP_UNIT_NUMS);
+ n->maps[index] |= (EBITMAP_BIT << ofs);
+}
+
+static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
+ unsigned int bit)
+{
+ unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+ unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+ BUG_ON(index >= EBITMAP_UNIT_NUMS);
+ n->maps[index] &= ~(EBITMAP_BIT << ofs);
+}
+
+#define ebitmap_for_each_positive_bit(e, n, bit) \
+ for ((bit) = ebitmap_start_positive(e, &(n)); \
+ (bit) < ebitmap_length(e); \
+ (bit) = ebitmap_next_positive(e, &(n), bit)) \
+
+int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2);
+int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src);
+int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1, const struct ebitmap *e2);
+int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2, u32 last_e2bit);
+int ebitmap_get_bit(const struct ebitmap *e, unsigned long bit);
+int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
+void ebitmap_destroy(struct ebitmap *e);
+int ebitmap_read(struct ebitmap *e, void *fp);
+int ebitmap_write(const struct ebitmap *e, void *fp);
+u32 ebitmap_hash(const struct ebitmap *e, u32 hash);
+
+#ifdef CONFIG_NETLABEL
+int ebitmap_netlbl_export(struct ebitmap *ebmap,
+ struct netlbl_lsm_catmap **catmap);
+int ebitmap_netlbl_import(struct ebitmap *ebmap,
+ struct netlbl_lsm_catmap *catmap);
+#else
+static inline int ebitmap_netlbl_export(struct ebitmap *ebmap,
+ struct netlbl_lsm_catmap **catmap)
+{
+ return -ENOMEM;
+}
+static inline int ebitmap_netlbl_import(struct ebitmap *ebmap,
+ struct netlbl_lsm_catmap *catmap)
+{
+ return -ENOMEM;
+}
+#endif
+
+#endif /* _SS_EBITMAP_H_ */
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
new file mode 100644
index 000000000..3fb8f9026
--- /dev/null
+++ b/security/selinux/ss/hashtab.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the hash table type.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include "hashtab.h"
+#include "security.h"
+
+static struct kmem_cache *hashtab_node_cachep __ro_after_init;
+
+/*
+ * Here we simply round the number of elements up to the nearest power of two.
+ * I tried also other options like rounding down or rounding to the closest
+ * power of two (up or down based on which is closer), but I was unable to
+ * find any significant difference in lookup/insert performance that would
+ * justify switching to a different (less intuitive) formula. It could be that
+ * a different formula is actually more optimal, but any future changes here
+ * should be supported with performance/memory usage data.
+ *
+ * The total memory used by the htable arrays (only) with Fedora policy loaded
+ * is approximately 163 KB at the time of writing.
+ */
+static u32 hashtab_compute_size(u32 nel)
+{
+ return nel == 0 ? 0 : roundup_pow_of_two(nel);
+}
+
+int hashtab_init(struct hashtab *h, u32 nel_hint)
+{
+ u32 size = hashtab_compute_size(nel_hint);
+
+ /* should already be zeroed, but better be safe */
+ h->nel = 0;
+ h->size = 0;
+ h->htable = NULL;
+
+ if (size) {
+ h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
+ if (!h->htable)
+ return -ENOMEM;
+ h->size = size;
+ }
+ return 0;
+}
+
+int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
+ void *key, void *datum)
+{
+ struct hashtab_node *newnode;
+
+ newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
+ if (!newnode)
+ return -ENOMEM;
+ newnode->key = key;
+ newnode->datum = datum;
+ newnode->next = *dst;
+ *dst = newnode;
+
+ h->nel++;
+ return 0;
+}
+
+void hashtab_destroy(struct hashtab *h)
+{
+ u32 i;
+ struct hashtab_node *cur, *temp;
+
+ for (i = 0; i < h->size; i++) {
+ cur = h->htable[i];
+ while (cur) {
+ temp = cur;
+ cur = cur->next;
+ kmem_cache_free(hashtab_node_cachep, temp);
+ }
+ h->htable[i] = NULL;
+ }
+
+ kfree(h->htable);
+ h->htable = NULL;
+}
+
+int hashtab_map(struct hashtab *h,
+ int (*apply)(void *k, void *d, void *args),
+ void *args)
+{
+ u32 i;
+ int ret;
+ struct hashtab_node *cur;
+
+ for (i = 0; i < h->size; i++) {
+ cur = h->htable[i];
+ while (cur) {
+ ret = apply(cur->key, cur->datum, args);
+ if (ret)
+ return ret;
+ cur = cur->next;
+ }
+ }
+ return 0;
+}
+
+
+void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
+{
+ u32 i, chain_len, slots_used, max_chain_len;
+ struct hashtab_node *cur;
+
+ slots_used = 0;
+ max_chain_len = 0;
+ for (i = 0; i < h->size; i++) {
+ cur = h->htable[i];
+ if (cur) {
+ slots_used++;
+ chain_len = 0;
+ while (cur) {
+ chain_len++;
+ cur = cur->next;
+ }
+
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ }
+ }
+
+ info->slots_used = slots_used;
+ info->max_chain_len = max_chain_len;
+}
+
+int hashtab_duplicate(struct hashtab *new, struct hashtab *orig,
+ int (*copy)(struct hashtab_node *new,
+ struct hashtab_node *orig, void *args),
+ int (*destroy)(void *k, void *d, void *args),
+ void *args)
+{
+ struct hashtab_node *cur, *tmp, *tail;
+ int i, rc;
+
+ memset(new, 0, sizeof(*new));
+
+ new->htable = kcalloc(orig->size, sizeof(*new->htable), GFP_KERNEL);
+ if (!new->htable)
+ return -ENOMEM;
+
+ new->size = orig->size;
+
+ for (i = 0; i < orig->size; i++) {
+ tail = NULL;
+ for (cur = orig->htable[i]; cur; cur = cur->next) {
+ tmp = kmem_cache_zalloc(hashtab_node_cachep,
+ GFP_KERNEL);
+ if (!tmp)
+ goto error;
+ rc = copy(tmp, cur, args);
+ if (rc) {
+ kmem_cache_free(hashtab_node_cachep, tmp);
+ goto error;
+ }
+ tmp->next = NULL;
+ if (!tail)
+ new->htable[i] = tmp;
+ else
+ tail->next = tmp;
+ tail = tmp;
+ new->nel++;
+ }
+ }
+
+ return 0;
+
+ error:
+ for (i = 0; i < new->size; i++) {
+ for (cur = new->htable[i]; cur; cur = tmp) {
+ tmp = cur->next;
+ destroy(cur->key, cur->datum, args);
+ kmem_cache_free(hashtab_node_cachep, cur);
+ }
+ }
+ kfree(new->htable);
+ memset(new, 0, sizeof(*new));
+ return -ENOMEM;
+}
+
+void __init hashtab_cache_init(void)
+{
+ hashtab_node_cachep = kmem_cache_create("hashtab_node",
+ sizeof(struct hashtab_node),
+ 0, SLAB_PANIC, NULL);
+}
diff --git a/security/selinux/ss/hashtab.h b/security/selinux/ss/hashtab.h
new file mode 100644
index 000000000..043a773bf
--- /dev/null
+++ b/security/selinux/ss/hashtab.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A hash table (hashtab) maintains associations between
+ * key values and datum values. The type of the key values
+ * and the type of the datum values is arbitrary. The
+ * functions for hash computation and key comparison are
+ * provided by the creator of the table.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SS_HASHTAB_H_
+#define _SS_HASHTAB_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+#define HASHTAB_MAX_NODES U32_MAX
+
+struct hashtab_key_params {
+ u32 (*hash)(const void *key); /* hash function */
+ int (*cmp)(const void *key1, const void *key2);
+ /* key comparison function */
+};
+
+struct hashtab_node {
+ void *key;
+ void *datum;
+ struct hashtab_node *next;
+};
+
+struct hashtab {
+ struct hashtab_node **htable; /* hash table */
+ u32 size; /* number of slots in hash table */
+ u32 nel; /* number of elements in hash table */
+};
+
+struct hashtab_info {
+ u32 slots_used;
+ u32 max_chain_len;
+};
+
+/*
+ * Initializes a new hash table with the specified characteristics.
+ *
+ * Returns -ENOMEM if insufficient space is available or 0 otherwise.
+ */
+int hashtab_init(struct hashtab *h, u32 nel_hint);
+
+int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
+ void *key, void *datum);
+
+/*
+ * Inserts the specified (key, datum) pair into the specified hash table.
+ *
+ * Returns -ENOMEM on memory allocation error,
+ * -EEXIST if there is already an entry with the same key,
+ * -EINVAL for general errors or
+ 0 otherwise.
+ */
+static inline int hashtab_insert(struct hashtab *h, void *key, void *datum,
+ struct hashtab_key_params key_params)
+{
+ u32 hvalue;
+ struct hashtab_node *prev, *cur;
+
+ cond_resched();
+
+ if (!h->size || h->nel == HASHTAB_MAX_NODES)
+ return -EINVAL;
+
+ hvalue = key_params.hash(key) & (h->size - 1);
+ prev = NULL;
+ cur = h->htable[hvalue];
+ while (cur) {
+ int cmp = key_params.cmp(key, cur->key);
+
+ if (cmp == 0)
+ return -EEXIST;
+ if (cmp < 0)
+ break;
+ prev = cur;
+ cur = cur->next;
+ }
+
+ return __hashtab_insert(h, prev ? &prev->next : &h->htable[hvalue],
+ key, datum);
+}
+
+/*
+ * Searches for the entry with the specified key in the hash table.
+ *
+ * Returns NULL if no entry has the specified key or
+ * the datum of the entry otherwise.
+ */
+static inline void *hashtab_search(struct hashtab *h, const void *key,
+ struct hashtab_key_params key_params)
+{
+ u32 hvalue;
+ struct hashtab_node *cur;
+
+ if (!h->size)
+ return NULL;
+
+ hvalue = key_params.hash(key) & (h->size - 1);
+ cur = h->htable[hvalue];
+ while (cur) {
+ int cmp = key_params.cmp(key, cur->key);
+
+ if (cmp == 0)
+ return cur->datum;
+ if (cmp < 0)
+ break;
+ cur = cur->next;
+ }
+ return NULL;
+}
+
+/*
+ * Destroys the specified hash table.
+ */
+void hashtab_destroy(struct hashtab *h);
+
+/*
+ * Applies the specified apply function to (key,datum,args)
+ * for each entry in the specified hash table.
+ *
+ * The order in which the function is applied to the entries
+ * is dependent upon the internal structure of the hash table.
+ *
+ * If apply returns a non-zero status, then hashtab_map will cease
+ * iterating through the hash table and will propagate the error
+ * return to its caller.
+ */
+int hashtab_map(struct hashtab *h,
+ int (*apply)(void *k, void *d, void *args),
+ void *args);
+
+int hashtab_duplicate(struct hashtab *new, struct hashtab *orig,
+ int (*copy)(struct hashtab_node *new,
+ struct hashtab_node *orig, void *args),
+ int (*destroy)(void *k, void *d, void *args),
+ void *args);
+
+/* Fill info with some hash table statistics */
+void hashtab_stat(struct hashtab *h, struct hashtab_info *info);
+
+#endif /* _SS_HASHTAB_H */
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
new file mode 100644
index 000000000..99571b19d
--- /dev/null
+++ b/security/selinux/ss/mls.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the multi-level security (MLS) policy.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ * Support for enhanced MLS infrastructure.
+ *
+ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+ */
+/*
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ * Added support to import/export the MLS label from NetLabel
+ *
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <net/netlabel.h>
+#include "sidtab.h"
+#include "mls.h"
+#include "policydb.h"
+#include "services.h"
+
+/*
+ * Return the length in bytes for the MLS fields of the
+ * security context string representation of `context'.
+ */
+int mls_compute_context_len(struct policydb *p, struct context *context)
+{
+ int i, l, len, head, prev;
+ char *nm;
+ struct ebitmap *e;
+ struct ebitmap_node *node;
+
+ if (!p->mls_enabled)
+ return 0;
+
+ len = 1; /* for the beginning ":" */
+ for (l = 0; l < 2; l++) {
+ int index_sens = context->range.level[l].sens;
+ len += strlen(sym_name(p, SYM_LEVELS, index_sens - 1));
+
+ /* categories */
+ head = -2;
+ prev = -2;
+ e = &context->range.level[l].cat;
+ ebitmap_for_each_positive_bit(e, node, i) {
+ if (i - prev > 1) {
+ /* one or more negative bits are skipped */
+ if (head != prev) {
+ nm = sym_name(p, SYM_CATS, prev);
+ len += strlen(nm) + 1;
+ }
+ nm = sym_name(p, SYM_CATS, i);
+ len += strlen(nm) + 1;
+ head = i;
+ }
+ prev = i;
+ }
+ if (prev != head) {
+ nm = sym_name(p, SYM_CATS, prev);
+ len += strlen(nm) + 1;
+ }
+ if (l == 0) {
+ if (mls_level_eq(&context->range.level[0],
+ &context->range.level[1]))
+ break;
+ else
+ len++;
+ }
+ }
+
+ return len;
+}
+
+/*
+ * Write the security context string representation of
+ * the MLS fields of `context' into the string `*scontext'.
+ * Update `*scontext' to point to the end of the MLS fields.
+ */
+void mls_sid_to_context(struct policydb *p,
+ struct context *context,
+ char **scontext)
+{
+ char *scontextp, *nm;
+ int i, l, head, prev;
+ struct ebitmap *e;
+ struct ebitmap_node *node;
+
+ if (!p->mls_enabled)
+ return;
+
+ scontextp = *scontext;
+
+ *scontextp = ':';
+ scontextp++;
+
+ for (l = 0; l < 2; l++) {
+ strcpy(scontextp, sym_name(p, SYM_LEVELS,
+ context->range.level[l].sens - 1));
+ scontextp += strlen(scontextp);
+
+ /* categories */
+ head = -2;
+ prev = -2;
+ e = &context->range.level[l].cat;
+ ebitmap_for_each_positive_bit(e, node, i) {
+ if (i - prev > 1) {
+ /* one or more negative bits are skipped */
+ if (prev != head) {
+ if (prev - head > 1)
+ *scontextp++ = '.';
+ else
+ *scontextp++ = ',';
+ nm = sym_name(p, SYM_CATS, prev);
+ strcpy(scontextp, nm);
+ scontextp += strlen(nm);
+ }
+ if (prev < 0)
+ *scontextp++ = ':';
+ else
+ *scontextp++ = ',';
+ nm = sym_name(p, SYM_CATS, i);
+ strcpy(scontextp, nm);
+ scontextp += strlen(nm);
+ head = i;
+ }
+ prev = i;
+ }
+
+ if (prev != head) {
+ if (prev - head > 1)
+ *scontextp++ = '.';
+ else
+ *scontextp++ = ',';
+ nm = sym_name(p, SYM_CATS, prev);
+ strcpy(scontextp, nm);
+ scontextp += strlen(nm);
+ }
+
+ if (l == 0) {
+ if (mls_level_eq(&context->range.level[0],
+ &context->range.level[1]))
+ break;
+ else
+ *scontextp++ = '-';
+ }
+ }
+
+ *scontext = scontextp;
+}
+
+int mls_level_isvalid(struct policydb *p, struct mls_level *l)
+{
+ struct level_datum *levdatum;
+
+ if (!l->sens || l->sens > p->p_levels.nprim)
+ return 0;
+ levdatum = symtab_search(&p->p_levels,
+ sym_name(p, SYM_LEVELS, l->sens - 1));
+ if (!levdatum)
+ return 0;
+
+ /*
+ * Return 1 iff all the bits set in l->cat are also be set in
+ * levdatum->level->cat and no bit in l->cat is larger than
+ * p->p_cats.nprim.
+ */
+ return ebitmap_contains(&levdatum->level->cat, &l->cat,
+ p->p_cats.nprim);
+}
+
+int mls_range_isvalid(struct policydb *p, struct mls_range *r)
+{
+ return (mls_level_isvalid(p, &r->level[0]) &&
+ mls_level_isvalid(p, &r->level[1]) &&
+ mls_level_dom(&r->level[1], &r->level[0]));
+}
+
+/*
+ * Return 1 if the MLS fields in the security context
+ * structure `c' are valid. Return 0 otherwise.
+ */
+int mls_context_isvalid(struct policydb *p, struct context *c)
+{
+ struct user_datum *usrdatum;
+
+ if (!p->mls_enabled)
+ return 1;
+
+ if (!mls_range_isvalid(p, &c->range))
+ return 0;
+
+ if (c->role == OBJECT_R_VAL)
+ return 1;
+
+ /*
+ * User must be authorized for the MLS range.
+ */
+ if (!c->user || c->user > p->p_users.nprim)
+ return 0;
+ usrdatum = p->user_val_to_struct[c->user - 1];
+ if (!mls_range_contains(usrdatum->range, c->range))
+ return 0; /* user may not be associated with range */
+
+ return 1;
+}
+
+/*
+ * Set the MLS fields in the security context structure
+ * `context' based on the string representation in
+ * the string `scontext'.
+ *
+ * This function modifies the string in place, inserting
+ * NULL characters to terminate the MLS fields.
+ *
+ * If a def_sid is provided and no MLS field is present,
+ * copy the MLS field of the associated default context.
+ * Used for upgraded to MLS systems where objects may lack
+ * MLS fields.
+ *
+ * Policy read-lock must be held for sidtab lookup.
+ *
+ */
+int mls_context_to_sid(struct policydb *pol,
+ char oldc,
+ char *scontext,
+ struct context *context,
+ struct sidtab *s,
+ u32 def_sid)
+{
+ char *sensitivity, *cur_cat, *next_cat, *rngptr;
+ struct level_datum *levdatum;
+ struct cat_datum *catdatum, *rngdatum;
+ int l, rc, i;
+ char *rangep[2];
+
+ if (!pol->mls_enabled) {
+ /*
+ * With no MLS, only return -EINVAL if there is a MLS field
+ * and it did not come from an xattr.
+ */
+ if (oldc && def_sid == SECSID_NULL)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * No MLS component to the security context, try and map to
+ * default if provided.
+ */
+ if (!oldc) {
+ struct context *defcon;
+
+ if (def_sid == SECSID_NULL)
+ return -EINVAL;
+
+ defcon = sidtab_search(s, def_sid);
+ if (!defcon)
+ return -EINVAL;
+
+ return mls_context_cpy(context, defcon);
+ }
+
+ /*
+ * If we're dealing with a range, figure out where the two parts
+ * of the range begin.
+ */
+ rangep[0] = scontext;
+ rangep[1] = strchr(scontext, '-');
+ if (rangep[1]) {
+ rangep[1][0] = '\0';
+ rangep[1]++;
+ }
+
+ /* For each part of the range: */
+ for (l = 0; l < 2; l++) {
+ /* Split sensitivity and category set. */
+ sensitivity = rangep[l];
+ if (sensitivity == NULL)
+ break;
+ next_cat = strchr(sensitivity, ':');
+ if (next_cat)
+ *(next_cat++) = '\0';
+
+ /* Parse sensitivity. */
+ levdatum = symtab_search(&pol->p_levels, sensitivity);
+ if (!levdatum)
+ return -EINVAL;
+ context->range.level[l].sens = levdatum->level->sens;
+
+ /* Extract category set. */
+ while (next_cat != NULL) {
+ cur_cat = next_cat;
+ next_cat = strchr(next_cat, ',');
+ if (next_cat != NULL)
+ *(next_cat++) = '\0';
+
+ /* Separate into range if exists */
+ rngptr = strchr(cur_cat, '.');
+ if (rngptr != NULL) {
+ /* Remove '.' */
+ *rngptr++ = '\0';
+ }
+
+ catdatum = symtab_search(&pol->p_cats, cur_cat);
+ if (!catdatum)
+ return -EINVAL;
+
+ rc = ebitmap_set_bit(&context->range.level[l].cat,
+ catdatum->value - 1, 1);
+ if (rc)
+ return rc;
+
+ /* If range, set all categories in range */
+ if (rngptr == NULL)
+ continue;
+
+ rngdatum = symtab_search(&pol->p_cats, rngptr);
+ if (!rngdatum)
+ return -EINVAL;
+
+ if (catdatum->value >= rngdatum->value)
+ return -EINVAL;
+
+ for (i = catdatum->value; i < rngdatum->value; i++) {
+ rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
+ if (rc)
+ return rc;
+ }
+ }
+ }
+
+ /* If we didn't see a '-', the range start is also the range end. */
+ if (rangep[1] == NULL) {
+ context->range.level[1].sens = context->range.level[0].sens;
+ rc = ebitmap_cpy(&context->range.level[1].cat,
+ &context->range.level[0].cat);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Set the MLS fields in the security context structure
+ * `context' based on the string representation in
+ * the string `str'. This function will allocate temporary memory with the
+ * given constraints of gfp_mask.
+ */
+int mls_from_string(struct policydb *p, char *str, struct context *context,
+ gfp_t gfp_mask)
+{
+ char *tmpstr;
+ int rc;
+
+ if (!p->mls_enabled)
+ return -EINVAL;
+
+ tmpstr = kstrdup(str, gfp_mask);
+ if (!tmpstr) {
+ rc = -ENOMEM;
+ } else {
+ rc = mls_context_to_sid(p, ':', tmpstr, context,
+ NULL, SECSID_NULL);
+ kfree(tmpstr);
+ }
+
+ return rc;
+}
+
+/*
+ * Copies the MLS range `range' into `context'.
+ */
+int mls_range_set(struct context *context,
+ struct mls_range *range)
+{
+ int l, rc = 0;
+
+ /* Copy the MLS range into the context */
+ for (l = 0; l < 2; l++) {
+ context->range.level[l].sens = range->level[l].sens;
+ rc = ebitmap_cpy(&context->range.level[l].cat,
+ &range->level[l].cat);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int mls_setup_user_range(struct policydb *p,
+ struct context *fromcon, struct user_datum *user,
+ struct context *usercon)
+{
+ if (p->mls_enabled) {
+ struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
+ struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
+ struct mls_level *user_low = &(user->range.level[0]);
+ struct mls_level *user_clr = &(user->range.level[1]);
+ struct mls_level *user_def = &(user->dfltlevel);
+ struct mls_level *usercon_sen = &(usercon->range.level[0]);
+ struct mls_level *usercon_clr = &(usercon->range.level[1]);
+
+ /* Honor the user's default level if we can */
+ if (mls_level_between(user_def, fromcon_sen, fromcon_clr))
+ *usercon_sen = *user_def;
+ else if (mls_level_between(fromcon_sen, user_def, user_clr))
+ *usercon_sen = *fromcon_sen;
+ else if (mls_level_between(fromcon_clr, user_low, user_def))
+ *usercon_sen = *user_low;
+ else
+ return -EINVAL;
+
+ /* Lower the clearance of available contexts
+ if the clearance of "fromcon" is lower than
+ that of the user's default clearance (but
+ only if the "fromcon" clearance dominates
+ the user's computed sensitivity level) */
+ if (mls_level_dom(user_clr, fromcon_clr))
+ *usercon_clr = *fromcon_clr;
+ else if (mls_level_dom(fromcon_clr, user_clr))
+ *usercon_clr = *user_clr;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Convert the MLS fields in the security context
+ * structure `oldc' from the values specified in the
+ * policy `oldp' to the values specified in the policy `newp',
+ * storing the resulting context in `newc'.
+ */
+int mls_convert_context(struct policydb *oldp,
+ struct policydb *newp,
+ struct context *oldc,
+ struct context *newc)
+{
+ struct level_datum *levdatum;
+ struct cat_datum *catdatum;
+ struct ebitmap_node *node;
+ int l, i;
+
+ if (!oldp->mls_enabled || !newp->mls_enabled)
+ return 0;
+
+ for (l = 0; l < 2; l++) {
+ char *name = sym_name(oldp, SYM_LEVELS,
+ oldc->range.level[l].sens - 1);
+
+ levdatum = symtab_search(&newp->p_levels, name);
+
+ if (!levdatum)
+ return -EINVAL;
+ newc->range.level[l].sens = levdatum->level->sens;
+
+ ebitmap_for_each_positive_bit(&oldc->range.level[l].cat,
+ node, i) {
+ int rc;
+
+ catdatum = symtab_search(&newp->p_cats,
+ sym_name(oldp, SYM_CATS, i));
+ if (!catdatum)
+ return -EINVAL;
+ rc = ebitmap_set_bit(&newc->range.level[l].cat,
+ catdatum->value - 1, 1);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int mls_compute_sid(struct policydb *p,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ u32 specified,
+ struct context *newcontext,
+ bool sock)
+{
+ struct range_trans rtr;
+ struct mls_range *r;
+ struct class_datum *cladatum;
+ int default_range = 0;
+
+ if (!p->mls_enabled)
+ return 0;
+
+ switch (specified) {
+ case AVTAB_TRANSITION:
+ /* Look for a range transition rule. */
+ rtr.source_type = scontext->type;
+ rtr.target_type = tcontext->type;
+ rtr.target_class = tclass;
+ r = policydb_rangetr_search(p, &rtr);
+ if (r)
+ return mls_range_set(newcontext, r);
+
+ if (tclass && tclass <= p->p_classes.nprim) {
+ cladatum = p->class_val_to_struct[tclass - 1];
+ if (cladatum)
+ default_range = cladatum->default_range;
+ }
+
+ switch (default_range) {
+ case DEFAULT_SOURCE_LOW:
+ return mls_context_cpy_low(newcontext, scontext);
+ case DEFAULT_SOURCE_HIGH:
+ return mls_context_cpy_high(newcontext, scontext);
+ case DEFAULT_SOURCE_LOW_HIGH:
+ return mls_context_cpy(newcontext, scontext);
+ case DEFAULT_TARGET_LOW:
+ return mls_context_cpy_low(newcontext, tcontext);
+ case DEFAULT_TARGET_HIGH:
+ return mls_context_cpy_high(newcontext, tcontext);
+ case DEFAULT_TARGET_LOW_HIGH:
+ return mls_context_cpy(newcontext, tcontext);
+ case DEFAULT_GLBLUB:
+ return mls_context_glblub(newcontext,
+ scontext, tcontext);
+ }
+
+ fallthrough;
+ case AVTAB_CHANGE:
+ if ((tclass == p->process_class) || sock)
+ /* Use the process MLS attributes. */
+ return mls_context_cpy(newcontext, scontext);
+ else
+ /* Use the process effective MLS attributes. */
+ return mls_context_cpy_low(newcontext, scontext);
+ case AVTAB_MEMBER:
+ /* Use the process effective MLS attributes. */
+ return mls_context_cpy_low(newcontext, scontext);
+ }
+ return -EINVAL;
+}
+
+#ifdef CONFIG_NETLABEL
+/**
+ * mls_export_netlbl_lvl - Export the MLS sensitivity levels to NetLabel
+ * @p: the policy
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Given the security context copy the low MLS sensitivity level into the
+ * NetLabel MLS sensitivity level field.
+ *
+ */
+void mls_export_netlbl_lvl(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ if (!p->mls_enabled)
+ return;
+
+ secattr->attr.mls.lvl = context->range.level[0].sens - 1;
+ secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+}
+
+/**
+ * mls_import_netlbl_lvl - Import the NetLabel MLS sensitivity levels
+ * @p: the policy
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Given the security context and the NetLabel security attributes, copy the
+ * NetLabel MLS sensitivity level into the context.
+ *
+ */
+void mls_import_netlbl_lvl(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ if (!p->mls_enabled)
+ return;
+
+ context->range.level[0].sens = secattr->attr.mls.lvl + 1;
+ context->range.level[1].sens = context->range.level[0].sens;
+}
+
+/**
+ * mls_export_netlbl_cat - Export the MLS categories to NetLabel
+ * @p: the policy
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Given the security context copy the low MLS categories into the NetLabel
+ * MLS category field. Returns zero on success, negative values on failure.
+ *
+ */
+int mls_export_netlbl_cat(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ int rc;
+
+ if (!p->mls_enabled)
+ return 0;
+
+ rc = ebitmap_netlbl_export(&context->range.level[0].cat,
+ &secattr->attr.mls.cat);
+ if (rc == 0 && secattr->attr.mls.cat != NULL)
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+
+ return rc;
+}
+
+/**
+ * mls_import_netlbl_cat - Import the MLS categories from NetLabel
+ * @p: the policy
+ * @context: the security context
+ * @secattr: the NetLabel security attributes
+ *
+ * Description:
+ * Copy the NetLabel security attributes into the SELinux context; since the
+ * NetLabel security attribute only contains a single MLS category use it for
+ * both the low and high categories of the context. Returns zero on success,
+ * negative values on failure.
+ *
+ */
+int mls_import_netlbl_cat(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ int rc;
+
+ if (!p->mls_enabled)
+ return 0;
+
+ rc = ebitmap_netlbl_import(&context->range.level[0].cat,
+ secattr->attr.mls.cat);
+ if (rc)
+ goto import_netlbl_cat_failure;
+ memcpy(&context->range.level[1].cat, &context->range.level[0].cat,
+ sizeof(context->range.level[0].cat));
+
+ return 0;
+
+import_netlbl_cat_failure:
+ ebitmap_destroy(&context->range.level[0].cat);
+ return rc;
+}
+#endif /* CONFIG_NETLABEL */
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
new file mode 100644
index 000000000..15cacde0f
--- /dev/null
+++ b/security/selinux/ss/mls.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Multi-level security (MLS) policy operations.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ * Support for enhanced MLS infrastructure.
+ *
+ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+ */
+/*
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ * Added support to import/export the MLS label from NetLabel
+ *
+ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
+ */
+
+#ifndef _SS_MLS_H_
+#define _SS_MLS_H_
+
+#include <linux/jhash.h>
+
+#include "context.h"
+#include "ebitmap.h"
+#include "policydb.h"
+
+int mls_compute_context_len(struct policydb *p, struct context *context);
+void mls_sid_to_context(struct policydb *p, struct context *context,
+ char **scontext);
+int mls_context_isvalid(struct policydb *p, struct context *c);
+int mls_range_isvalid(struct policydb *p, struct mls_range *r);
+int mls_level_isvalid(struct policydb *p, struct mls_level *l);
+
+int mls_context_to_sid(struct policydb *p,
+ char oldc,
+ char *scontext,
+ struct context *context,
+ struct sidtab *s,
+ u32 def_sid);
+
+int mls_from_string(struct policydb *p, char *str, struct context *context,
+ gfp_t gfp_mask);
+
+int mls_range_set(struct context *context, struct mls_range *range);
+
+int mls_convert_context(struct policydb *oldp,
+ struct policydb *newp,
+ struct context *oldc,
+ struct context *newc);
+
+int mls_compute_sid(struct policydb *p,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ u32 specified,
+ struct context *newcontext,
+ bool sock);
+
+int mls_setup_user_range(struct policydb *p,
+ struct context *fromcon, struct user_datum *user,
+ struct context *usercon);
+
+#ifdef CONFIG_NETLABEL
+void mls_export_netlbl_lvl(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr);
+void mls_import_netlbl_lvl(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr);
+int mls_export_netlbl_cat(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr);
+int mls_import_netlbl_cat(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr);
+#else
+static inline void mls_export_netlbl_lvl(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return;
+}
+static inline void mls_import_netlbl_lvl(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return;
+}
+static inline int mls_export_netlbl_cat(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOMEM;
+}
+static inline int mls_import_netlbl_cat(struct policydb *p,
+ struct context *context,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOMEM;
+}
+#endif
+
+static inline u32 mls_range_hash(const struct mls_range *r, u32 hash)
+{
+ hash = jhash_2words(r->level[0].sens, r->level[1].sens, hash);
+ hash = ebitmap_hash(&r->level[0].cat, hash);
+ hash = ebitmap_hash(&r->level[1].cat, hash);
+ return hash;
+}
+
+#endif /* _SS_MLS_H */
+
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
new file mode 100644
index 000000000..7d48d5e52
--- /dev/null
+++ b/security/selinux/ss/mls_types.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the multi-level security (MLS) policy.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ * Support for enhanced MLS infrastructure.
+ *
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ */
+
+#ifndef _SS_MLS_TYPES_H_
+#define _SS_MLS_TYPES_H_
+
+#include "security.h"
+#include "ebitmap.h"
+
+struct mls_level {
+ u32 sens; /* sensitivity */
+ struct ebitmap cat; /* category set */
+};
+
+struct mls_range {
+ struct mls_level level[2]; /* low == level[0], high == level[1] */
+};
+
+static inline int mls_level_eq(const struct mls_level *l1, const struct mls_level *l2)
+{
+ return ((l1->sens == l2->sens) &&
+ ebitmap_cmp(&l1->cat, &l2->cat));
+}
+
+static inline int mls_level_dom(const struct mls_level *l1, const struct mls_level *l2)
+{
+ return ((l1->sens >= l2->sens) &&
+ ebitmap_contains(&l1->cat, &l2->cat, 0));
+}
+
+#define mls_level_incomp(l1, l2) \
+(!mls_level_dom((l1), (l2)) && !mls_level_dom((l2), (l1)))
+
+#define mls_level_between(l1, l2, l3) \
+(mls_level_dom((l1), (l2)) && mls_level_dom((l3), (l1)))
+
+#define mls_range_contains(r1, r2) \
+(mls_level_dom(&(r2).level[0], &(r1).level[0]) && \
+ mls_level_dom(&(r1).level[1], &(r2).level[1]))
+
+#endif /* _SS_MLS_TYPES_H_ */
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
new file mode 100644
index 000000000..6f9ff4643
--- /dev/null
+++ b/security/selinux/ss/policydb.c
@@ -0,0 +1,3731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the policy database.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ * Support for enhanced MLS infrastructure.
+ *
+ * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Added conditional policy language extensions
+ *
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ * Added support for the policy capability bitmap
+ *
+ * Update: Mellanox Techonologies
+ *
+ * Added Infiniband support
+ *
+ * Copyright (C) 2016 Mellanox Techonologies
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/audit.h>
+#include "security.h"
+
+#include "policydb.h"
+#include "conditional.h"
+#include "mls.h"
+#include "services.h"
+
+#ifdef DEBUG_HASHES
+static const char *symtab_name[SYM_NUM] = {
+ "common prefixes",
+ "classes",
+ "roles",
+ "types",
+ "users",
+ "bools",
+ "levels",
+ "categories",
+};
+#endif
+
+struct policydb_compat_info {
+ int version;
+ int sym_num;
+ int ocon_num;
+};
+
+/* These need to be updated if SYM_NUM or OCON_NUM changes */
+static const struct policydb_compat_info policydb_compat[] = {
+ {
+ .version = POLICYDB_VERSION_BASE,
+ .sym_num = SYM_NUM - 3,
+ .ocon_num = OCON_NUM - 3,
+ },
+ {
+ .version = POLICYDB_VERSION_BOOL,
+ .sym_num = SYM_NUM - 2,
+ .ocon_num = OCON_NUM - 3,
+ },
+ {
+ .version = POLICYDB_VERSION_IPV6,
+ .sym_num = SYM_NUM - 2,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_NLCLASS,
+ .sym_num = SYM_NUM - 2,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_MLS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_AVTAB,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_RANGETRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_POLCAP,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_PERMISSIVE,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_BOUNDARY,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_FILENAME_TRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_ROLETRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_NEW_OBJECT_DEFAULTS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_DEFAULT_TYPE,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_CONSTRAINT_NAMES,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_XPERMS_IOCTL,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM - 2,
+ },
+ {
+ .version = POLICYDB_VERSION_INFINIBAND,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_GLBLUB,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_COMP_FTRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+};
+
+static const struct policydb_compat_info *policydb_lookup_compat(int version)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(policydb_compat); i++) {
+ if (policydb_compat[i].version == version)
+ return &policydb_compat[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * The following *_destroy functions are used to
+ * free any memory allocated for each kind of
+ * symbol data in the policy database.
+ */
+
+static int perm_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int common_destroy(void *key, void *datum, void *p)
+{
+ struct common_datum *comdatum;
+
+ kfree(key);
+ if (datum) {
+ comdatum = datum;
+ hashtab_map(&comdatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(&comdatum->permissions.table);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+ if (expr) {
+ ebitmap_destroy(&expr->names);
+ if (expr->type_names) {
+ ebitmap_destroy(&expr->type_names->types);
+ ebitmap_destroy(&expr->type_names->negset);
+ kfree(expr->type_names);
+ }
+ kfree(expr);
+ }
+}
+
+static int cls_destroy(void *key, void *datum, void *p)
+{
+ struct class_datum *cladatum;
+ struct constraint_node *constraint, *ctemp;
+ struct constraint_expr *e, *etmp;
+
+ kfree(key);
+ if (datum) {
+ cladatum = datum;
+ hashtab_map(&cladatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(&cladatum->permissions.table);
+ constraint = cladatum->constraints;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
+ }
+
+ constraint = cladatum->validatetrans;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
+ }
+ kfree(cladatum->comkey);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int role_destroy(void *key, void *datum, void *p)
+{
+ struct role_datum *role;
+
+ kfree(key);
+ if (datum) {
+ role = datum;
+ ebitmap_destroy(&role->dominates);
+ ebitmap_destroy(&role->types);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int type_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int user_destroy(void *key, void *datum, void *p)
+{
+ struct user_datum *usrdatum;
+
+ kfree(key);
+ if (datum) {
+ usrdatum = datum;
+ ebitmap_destroy(&usrdatum->roles);
+ ebitmap_destroy(&usrdatum->range.level[0].cat);
+ ebitmap_destroy(&usrdatum->range.level[1].cat);
+ ebitmap_destroy(&usrdatum->dfltlevel.cat);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int sens_destroy(void *key, void *datum, void *p)
+{
+ struct level_datum *levdatum;
+
+ kfree(key);
+ if (datum) {
+ levdatum = datum;
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
+ kfree(levdatum->level);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int cat_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int (*const destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) = {
+ common_destroy,
+ cls_destroy,
+ role_destroy,
+ type_destroy,
+ user_destroy,
+ cond_destroy_bool,
+ sens_destroy,
+ cat_destroy,
+};
+
+static int filenametr_destroy(void *key, void *datum, void *p)
+{
+ struct filename_trans_key *ft = key;
+ struct filename_trans_datum *next, *d = datum;
+
+ kfree(ft->name);
+ kfree(key);
+ do {
+ ebitmap_destroy(&d->stypes);
+ next = d->next;
+ kfree(d);
+ d = next;
+ } while (unlikely(d));
+ cond_resched();
+ return 0;
+}
+
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+ struct mls_range *rt = datum;
+
+ kfree(key);
+ ebitmap_destroy(&rt->level[0].cat);
+ ebitmap_destroy(&rt->level[1].cat);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
+static int role_tr_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static void ocontext_destroy(struct ocontext *c, int i)
+{
+ if (!c)
+ return;
+
+ context_destroy(&c->context[0]);
+ context_destroy(&c->context[1]);
+ if (i == OCON_ISID || i == OCON_FS ||
+ i == OCON_NETIF || i == OCON_FSUSE)
+ kfree(c->u.name);
+ kfree(c);
+}
+
+/*
+ * Initialize the role table.
+ */
+static int roles_init(struct policydb *p)
+{
+ char *key = NULL;
+ int rc;
+ struct role_datum *role;
+
+ role = kzalloc(sizeof(*role), GFP_KERNEL);
+ if (!role)
+ return -ENOMEM;
+
+ rc = -EINVAL;
+ role->value = ++p->p_roles.nprim;
+ if (role->value != OBJECT_R_VAL)
+ goto out;
+
+ rc = -ENOMEM;
+ key = kstrdup(OBJECT_R, GFP_KERNEL);
+ if (!key)
+ goto out;
+
+ rc = symtab_insert(&p->p_roles, key, role);
+ if (rc)
+ goto out;
+
+ return 0;
+out:
+ kfree(key);
+ kfree(role);
+ return rc;
+}
+
+static u32 filenametr_hash(const void *k)
+{
+ const struct filename_trans_key *ft = k;
+ unsigned long hash;
+ unsigned int byte_num;
+ unsigned char focus;
+
+ hash = ft->ttype ^ ft->tclass;
+
+ byte_num = 0;
+ while ((focus = ft->name[byte_num++]))
+ hash = partial_name_hash(focus, hash);
+ return hash;
+}
+
+static int filenametr_cmp(const void *k1, const void *k2)
+{
+ const struct filename_trans_key *ft1 = k1;
+ const struct filename_trans_key *ft2 = k2;
+ int v;
+
+ v = ft1->ttype - ft2->ttype;
+ if (v)
+ return v;
+
+ v = ft1->tclass - ft2->tclass;
+ if (v)
+ return v;
+
+ return strcmp(ft1->name, ft2->name);
+
+}
+
+static const struct hashtab_key_params filenametr_key_params = {
+ .hash = filenametr_hash,
+ .cmp = filenametr_cmp,
+};
+
+struct filename_trans_datum *policydb_filenametr_search(
+ struct policydb *p, struct filename_trans_key *key)
+{
+ return hashtab_search(&p->filename_trans, key, filenametr_key_params);
+}
+
+static u32 rangetr_hash(const void *k)
+{
+ const struct range_trans *key = k;
+
+ return key->source_type + (key->target_type << 3) +
+ (key->target_class << 5);
+}
+
+static int rangetr_cmp(const void *k1, const void *k2)
+{
+ const struct range_trans *key1 = k1, *key2 = k2;
+ int v;
+
+ v = key1->source_type - key2->source_type;
+ if (v)
+ return v;
+
+ v = key1->target_type - key2->target_type;
+ if (v)
+ return v;
+
+ v = key1->target_class - key2->target_class;
+
+ return v;
+}
+
+static const struct hashtab_key_params rangetr_key_params = {
+ .hash = rangetr_hash,
+ .cmp = rangetr_cmp,
+};
+
+struct mls_range *policydb_rangetr_search(struct policydb *p,
+ struct range_trans *key)
+{
+ return hashtab_search(&p->range_tr, key, rangetr_key_params);
+}
+
+static u32 role_trans_hash(const void *k)
+{
+ const struct role_trans_key *key = k;
+
+ return key->role + (key->type << 3) + (key->tclass << 5);
+}
+
+static int role_trans_cmp(const void *k1, const void *k2)
+{
+ const struct role_trans_key *key1 = k1, *key2 = k2;
+ int v;
+
+ v = key1->role - key2->role;
+ if (v)
+ return v;
+
+ v = key1->type - key2->type;
+ if (v)
+ return v;
+
+ return key1->tclass - key2->tclass;
+}
+
+static const struct hashtab_key_params roletr_key_params = {
+ .hash = role_trans_hash,
+ .cmp = role_trans_cmp,
+};
+
+struct role_trans_datum *policydb_roletr_search(struct policydb *p,
+ struct role_trans_key *key)
+{
+ return hashtab_search(&p->role_tr, key, roletr_key_params);
+}
+
+/*
+ * Initialize a policy database structure.
+ */
+static void policydb_init(struct policydb *p)
+{
+ memset(p, 0, sizeof(*p));
+
+ avtab_init(&p->te_avtab);
+ cond_policydb_init(p);
+
+ ebitmap_init(&p->filename_trans_ttypes);
+ ebitmap_init(&p->policycaps);
+ ebitmap_init(&p->permissive_map);
+}
+
+/*
+ * The following *_index functions are used to
+ * define the val_to_name and val_to_struct arrays
+ * in a policy database structure. The val_to_name
+ * arrays are used when converting security context
+ * structures into string representations. The
+ * val_to_struct arrays are used when the attributes
+ * of a class, role, or user are needed.
+ */
+
+static int common_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct common_datum *comdatum;
+
+ comdatum = datum;
+ p = datap;
+ if (!comdatum->value || comdatum->value > p->p_commons.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_COMMONS][comdatum->value - 1] = key;
+
+ return 0;
+}
+
+static int class_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct class_datum *cladatum;
+
+ cladatum = datum;
+ p = datap;
+ if (!cladatum->value || cladatum->value > p->p_classes.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_CLASSES][cladatum->value - 1] = key;
+ p->class_val_to_struct[cladatum->value - 1] = cladatum;
+ return 0;
+}
+
+static int role_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct role_datum *role;
+
+ role = datum;
+ p = datap;
+ if (!role->value
+ || role->value > p->p_roles.nprim
+ || role->bounds > p->p_roles.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_ROLES][role->value - 1] = key;
+ p->role_val_to_struct[role->value - 1] = role;
+ return 0;
+}
+
+static int type_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct type_datum *typdatum;
+
+ typdatum = datum;
+ p = datap;
+
+ if (typdatum->primary) {
+ if (!typdatum->value
+ || typdatum->value > p->p_types.nprim
+ || typdatum->bounds > p->p_types.nprim)
+ return -EINVAL;
+ p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key;
+ p->type_val_to_struct[typdatum->value - 1] = typdatum;
+ }
+
+ return 0;
+}
+
+static int user_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct user_datum *usrdatum;
+
+ usrdatum = datum;
+ p = datap;
+ if (!usrdatum->value
+ || usrdatum->value > p->p_users.nprim
+ || usrdatum->bounds > p->p_users.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_USERS][usrdatum->value - 1] = key;
+ p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
+ return 0;
+}
+
+static int sens_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct level_datum *levdatum;
+
+ levdatum = datum;
+ p = datap;
+
+ if (!levdatum->isalias) {
+ if (!levdatum->level->sens ||
+ levdatum->level->sens > p->p_levels.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_LEVELS][levdatum->level->sens - 1] = key;
+ }
+
+ return 0;
+}
+
+static int cat_index(void *key, void *datum, void *datap)
+{
+ struct policydb *p;
+ struct cat_datum *catdatum;
+
+ catdatum = datum;
+ p = datap;
+
+ if (!catdatum->isalias) {
+ if (!catdatum->value || catdatum->value > p->p_cats.nprim)
+ return -EINVAL;
+
+ p->sym_val_to_name[SYM_CATS][catdatum->value - 1] = key;
+ }
+
+ return 0;
+}
+
+static int (*const index_f[SYM_NUM]) (void *key, void *datum, void *datap) = {
+ common_index,
+ class_index,
+ role_index,
+ type_index,
+ user_index,
+ cond_index_bool,
+ sens_index,
+ cat_index,
+};
+
+#ifdef DEBUG_HASHES
+static void hash_eval(struct hashtab *h, const char *hash_name)
+{
+ struct hashtab_info info;
+
+ hashtab_stat(h, &info);
+ pr_debug("SELinux: %s: %d entries and %d/%d buckets used, longest chain length %d\n",
+ hash_name, h->nel, info.slots_used, h->size,
+ info.max_chain_len);
+}
+
+static void symtab_hash_eval(struct symtab *s)
+{
+ int i;
+
+ for (i = 0; i < SYM_NUM; i++)
+ hash_eval(&s[i].table, symtab_name[i]);
+}
+
+#else
+static inline void hash_eval(struct hashtab *h, const char *hash_name)
+{
+}
+#endif
+
+/*
+ * Define the other val_to_name and val_to_struct arrays
+ * in a policy database structure.
+ *
+ * Caller must clean up on failure.
+ */
+static int policydb_index(struct policydb *p)
+{
+ int i, rc;
+
+ if (p->mls_enabled)
+ pr_debug("SELinux: %d users, %d roles, %d types, %d bools, %d sens, %d cats\n",
+ p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim,
+ p->p_bools.nprim, p->p_levels.nprim, p->p_cats.nprim);
+ else
+ pr_debug("SELinux: %d users, %d roles, %d types, %d bools\n",
+ p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim,
+ p->p_bools.nprim);
+
+ pr_debug("SELinux: %d classes, %d rules\n",
+ p->p_classes.nprim, p->te_avtab.nel);
+
+#ifdef DEBUG_HASHES
+ avtab_hash_eval(&p->te_avtab, "rules");
+ symtab_hash_eval(p->symtab);
+#endif
+
+ p->class_val_to_struct = kcalloc(p->p_classes.nprim,
+ sizeof(*p->class_val_to_struct),
+ GFP_KERNEL);
+ if (!p->class_val_to_struct)
+ return -ENOMEM;
+
+ p->role_val_to_struct = kcalloc(p->p_roles.nprim,
+ sizeof(*p->role_val_to_struct),
+ GFP_KERNEL);
+ if (!p->role_val_to_struct)
+ return -ENOMEM;
+
+ p->user_val_to_struct = kcalloc(p->p_users.nprim,
+ sizeof(*p->user_val_to_struct),
+ GFP_KERNEL);
+ if (!p->user_val_to_struct)
+ return -ENOMEM;
+
+ p->type_val_to_struct = kvcalloc(p->p_types.nprim,
+ sizeof(*p->type_val_to_struct),
+ GFP_KERNEL);
+ if (!p->type_val_to_struct)
+ return -ENOMEM;
+
+ rc = cond_init_bool_indexes(p);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < SYM_NUM; i++) {
+ p->sym_val_to_name[i] = kvcalloc(p->symtab[i].nprim,
+ sizeof(char *),
+ GFP_KERNEL);
+ if (!p->sym_val_to_name[i])
+ return -ENOMEM;
+
+ rc = hashtab_map(&p->symtab[i].table, index_f[i], p);
+ if (rc)
+ goto out;
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
+/*
+ * Free any memory allocated by a policy database structure.
+ */
+void policydb_destroy(struct policydb *p)
+{
+ struct ocontext *c, *ctmp;
+ struct genfs *g, *gtmp;
+ int i;
+ struct role_allow *ra, *lra = NULL;
+
+ for (i = 0; i < SYM_NUM; i++) {
+ cond_resched();
+ hashtab_map(&p->symtab[i].table, destroy_f[i], NULL);
+ hashtab_destroy(&p->symtab[i].table);
+ }
+
+ for (i = 0; i < SYM_NUM; i++)
+ kvfree(p->sym_val_to_name[i]);
+
+ kfree(p->class_val_to_struct);
+ kfree(p->role_val_to_struct);
+ kfree(p->user_val_to_struct);
+ kvfree(p->type_val_to_struct);
+
+ avtab_destroy(&p->te_avtab);
+
+ for (i = 0; i < OCON_NUM; i++) {
+ cond_resched();
+ c = p->ocontexts[i];
+ while (c) {
+ ctmp = c;
+ c = c->next;
+ ocontext_destroy(ctmp, i);
+ }
+ p->ocontexts[i] = NULL;
+ }
+
+ g = p->genfs;
+ while (g) {
+ cond_resched();
+ kfree(g->fstype);
+ c = g->head;
+ while (c) {
+ ctmp = c;
+ c = c->next;
+ ocontext_destroy(ctmp, OCON_FSUSE);
+ }
+ gtmp = g;
+ g = g->next;
+ kfree(gtmp);
+ }
+ p->genfs = NULL;
+
+ cond_policydb_destroy(p);
+
+ hashtab_map(&p->role_tr, role_tr_destroy, NULL);
+ hashtab_destroy(&p->role_tr);
+
+ for (ra = p->role_allow; ra; ra = ra->next) {
+ cond_resched();
+ kfree(lra);
+ lra = ra;
+ }
+ kfree(lra);
+
+ hashtab_map(&p->filename_trans, filenametr_destroy, NULL);
+ hashtab_destroy(&p->filename_trans);
+
+ hashtab_map(&p->range_tr, range_tr_destroy, NULL);
+ hashtab_destroy(&p->range_tr);
+
+ if (p->type_attr_map_array) {
+ for (i = 0; i < p->p_types.nprim; i++)
+ ebitmap_destroy(&p->type_attr_map_array[i]);
+ kvfree(p->type_attr_map_array);
+ }
+
+ ebitmap_destroy(&p->filename_trans_ttypes);
+ ebitmap_destroy(&p->policycaps);
+ ebitmap_destroy(&p->permissive_map);
+}
+
+/*
+ * Load the initial SIDs specified in a policy database
+ * structure into a SID table.
+ */
+int policydb_load_isids(struct policydb *p, struct sidtab *s)
+{
+ struct ocontext *head, *c;
+ int rc;
+
+ rc = sidtab_init(s);
+ if (rc) {
+ pr_err("SELinux: out of memory on SID table init\n");
+ return rc;
+ }
+
+ head = p->ocontexts[OCON_ISID];
+ for (c = head; c; c = c->next) {
+ u32 sid = c->sid[0];
+ const char *name = security_get_initial_sid_context(sid);
+
+ if (sid == SECSID_NULL) {
+ pr_err("SELinux: SID 0 was assigned a context.\n");
+ sidtab_destroy(s);
+ return -EINVAL;
+ }
+
+ /* Ignore initial SIDs unused by this kernel. */
+ if (!name)
+ continue;
+
+ rc = sidtab_set_initial(s, sid, &c->context[0]);
+ if (rc) {
+ pr_err("SELinux: unable to load initial SID %s.\n",
+ name);
+ sidtab_destroy(s);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+int policydb_class_isvalid(struct policydb *p, unsigned int class)
+{
+ if (!class || class > p->p_classes.nprim)
+ return 0;
+ return 1;
+}
+
+int policydb_role_isvalid(struct policydb *p, unsigned int role)
+{
+ if (!role || role > p->p_roles.nprim)
+ return 0;
+ return 1;
+}
+
+int policydb_type_isvalid(struct policydb *p, unsigned int type)
+{
+ if (!type || type > p->p_types.nprim)
+ return 0;
+ return 1;
+}
+
+/*
+ * Return 1 if the fields in the security context
+ * structure `c' are valid. Return 0 otherwise.
+ */
+int policydb_context_isvalid(struct policydb *p, struct context *c)
+{
+ struct role_datum *role;
+ struct user_datum *usrdatum;
+
+ if (!c->role || c->role > p->p_roles.nprim)
+ return 0;
+
+ if (!c->user || c->user > p->p_users.nprim)
+ return 0;
+
+ if (!c->type || c->type > p->p_types.nprim)
+ return 0;
+
+ if (c->role != OBJECT_R_VAL) {
+ /*
+ * Role must be authorized for the type.
+ */
+ role = p->role_val_to_struct[c->role - 1];
+ if (!role || !ebitmap_get_bit(&role->types, c->type - 1))
+ /* role may not be associated with type */
+ return 0;
+
+ /*
+ * User must be authorized for the role.
+ */
+ usrdatum = p->user_val_to_struct[c->user - 1];
+ if (!usrdatum)
+ return 0;
+
+ if (!ebitmap_get_bit(&usrdatum->roles, c->role - 1))
+ /* user may not be associated with role */
+ return 0;
+ }
+
+ if (!mls_context_isvalid(p, c))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Read a MLS range structure from a policydb binary
+ * representation file.
+ */
+static int mls_read_range_helper(struct mls_range *r, void *fp)
+{
+ __le32 buf[2];
+ u32 items;
+ int rc;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ rc = -EINVAL;
+ items = le32_to_cpu(buf[0]);
+ if (items > ARRAY_SIZE(buf)) {
+ pr_err("SELinux: mls: range overflow\n");
+ goto out;
+ }
+
+ rc = next_entry(buf, fp, sizeof(u32) * items);
+ if (rc) {
+ pr_err("SELinux: mls: truncated range\n");
+ goto out;
+ }
+
+ r->level[0].sens = le32_to_cpu(buf[0]);
+ if (items > 1)
+ r->level[1].sens = le32_to_cpu(buf[1]);
+ else
+ r->level[1].sens = r->level[0].sens;
+
+ rc = ebitmap_read(&r->level[0].cat, fp);
+ if (rc) {
+ pr_err("SELinux: mls: error reading low categories\n");
+ goto out;
+ }
+ if (items > 1) {
+ rc = ebitmap_read(&r->level[1].cat, fp);
+ if (rc) {
+ pr_err("SELinux: mls: error reading high categories\n");
+ goto bad_high;
+ }
+ } else {
+ rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat);
+ if (rc) {
+ pr_err("SELinux: mls: out of memory\n");
+ goto bad_high;
+ }
+ }
+
+ return 0;
+bad_high:
+ ebitmap_destroy(&r->level[0].cat);
+out:
+ return rc;
+}
+
+/*
+ * Read and validate a security context structure
+ * from a policydb binary representation file.
+ */
+static int context_read_and_validate(struct context *c,
+ struct policydb *p,
+ void *fp)
+{
+ __le32 buf[3];
+ int rc;
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc) {
+ pr_err("SELinux: context truncated\n");
+ goto out;
+ }
+ c->user = le32_to_cpu(buf[0]);
+ c->role = le32_to_cpu(buf[1]);
+ c->type = le32_to_cpu(buf[2]);
+ if (p->policyvers >= POLICYDB_VERSION_MLS) {
+ rc = mls_read_range_helper(&c->range, fp);
+ if (rc) {
+ pr_err("SELinux: error reading MLS range of context\n");
+ goto out;
+ }
+ }
+
+ rc = -EINVAL;
+ if (!policydb_context_isvalid(p, c)) {
+ pr_err("SELinux: invalid security context\n");
+ context_destroy(c);
+ goto out;
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
+/*
+ * The following *_read functions are used to
+ * read the symbol data from a policy database
+ * binary representation file.
+ */
+
+static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
+{
+ int rc;
+ char *str;
+
+ if ((len == 0) || (len == (u32)-1))
+ return -EINVAL;
+
+ str = kmalloc(len + 1, flags | __GFP_NOWARN);
+ if (!str)
+ return -ENOMEM;
+
+ rc = next_entry(str, fp, len);
+ if (rc) {
+ kfree(str);
+ return rc;
+ }
+
+ str[len] = '\0';
+ *strp = str;
+ return 0;
+}
+
+static int perm_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct perm_datum *perdatum;
+ int rc;
+ __le32 buf[2];
+ u32 len;
+
+ perdatum = kzalloc(sizeof(*perdatum), GFP_KERNEL);
+ if (!perdatum)
+ return -ENOMEM;
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ perdatum->value = le32_to_cpu(buf[1]);
+
+ rc = str_read(&key, GFP_KERNEL, fp, len);
+ if (rc)
+ goto bad;
+
+ rc = symtab_insert(s, key, perdatum);
+ if (rc)
+ goto bad;
+
+ return 0;
+bad:
+ perm_destroy(key, perdatum, NULL);
+ return rc;
+}
+
+static int common_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct common_datum *comdatum;
+ __le32 buf[4];
+ u32 len, nel;
+ int i, rc;
+
+ comdatum = kzalloc(sizeof(*comdatum), GFP_KERNEL);
+ if (!comdatum)
+ return -ENOMEM;
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ comdatum->value = le32_to_cpu(buf[1]);
+ nel = le32_to_cpu(buf[3]);
+
+ rc = symtab_init(&comdatum->permissions, nel);
+ if (rc)
+ goto bad;
+ comdatum->permissions.nprim = le32_to_cpu(buf[2]);
+
+ rc = str_read(&key, GFP_KERNEL, fp, len);
+ if (rc)
+ goto bad;
+
+ for (i = 0; i < nel; i++) {
+ rc = perm_read(p, &comdatum->permissions, fp);
+ if (rc)
+ goto bad;
+ }
+
+ rc = symtab_insert(s, key, comdatum);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ common_destroy(key, comdatum, NULL);
+ return rc;
+}
+
+static void type_set_init(struct type_set *t)
+{
+ ebitmap_init(&t->types);
+ ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ if (ebitmap_read(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_read(&t->negset, fp))
+ return -EINVAL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc < 0)
+ return -EINVAL;
+ t->flags = le32_to_cpu(buf[0]);
+
+ return 0;
+}
+
+
+static int read_cons_helper(struct policydb *p,
+ struct constraint_node **nodep,
+ int ncons, int allowxtarget, void *fp)
+{
+ struct constraint_node *c, *lc;
+ struct constraint_expr *e, *le;
+ __le32 buf[3];
+ u32 nexpr;
+ int rc, i, j, depth;
+
+ lc = NULL;
+ for (i = 0; i < ncons; i++) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ if (lc)
+ lc->next = c;
+ else
+ *nodep = c;
+
+ rc = next_entry(buf, fp, (sizeof(u32) * 2));
+ if (rc)
+ return rc;
+ c->permissions = le32_to_cpu(buf[0]);
+ nexpr = le32_to_cpu(buf[1]);
+ le = NULL;
+ depth = -1;
+ for (j = 0; j < nexpr; j++) {
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ if (le)
+ le->next = e;
+ else
+ c->expr = e;
+
+ rc = next_entry(buf, fp, (sizeof(u32) * 3));
+ if (rc)
+ return rc;
+ e->expr_type = le32_to_cpu(buf[0]);
+ e->attr = le32_to_cpu(buf[1]);
+ e->op = le32_to_cpu(buf[2]);
+
+ switch (e->expr_type) {
+ case CEXPR_NOT:
+ if (depth < 0)
+ return -EINVAL;
+ break;
+ case CEXPR_AND:
+ case CEXPR_OR:
+ if (depth < 1)
+ return -EINVAL;
+ depth--;
+ break;
+ case CEXPR_ATTR:
+ if (depth == (CEXPR_MAXDEPTH - 1))
+ return -EINVAL;
+ depth++;
+ break;
+ case CEXPR_NAMES:
+ if (!allowxtarget && (e->attr & CEXPR_XTARGET))
+ return -EINVAL;
+ if (depth == (CEXPR_MAXDEPTH - 1))
+ return -EINVAL;
+ depth++;
+ rc = ebitmap_read(&e->names, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ e->type_names = kzalloc(sizeof
+ (*e->type_names), GFP_KERNEL);
+ if (!e->type_names)
+ return -ENOMEM;
+ type_set_init(e->type_names);
+ rc = type_set_read(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ le = e;
+ }
+ if (depth != 0)
+ return -EINVAL;
+ lc = c;
+ }
+
+ return 0;
+}
+
+static int class_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct class_datum *cladatum;
+ __le32 buf[6];
+ u32 len, len2, ncons, nel;
+ int i, rc;
+
+ cladatum = kzalloc(sizeof(*cladatum), GFP_KERNEL);
+ if (!cladatum)
+ return -ENOMEM;
+
+ rc = next_entry(buf, fp, sizeof(u32)*6);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ len2 = le32_to_cpu(buf[1]);
+ cladatum->value = le32_to_cpu(buf[2]);
+ nel = le32_to_cpu(buf[4]);
+
+ rc = symtab_init(&cladatum->permissions, nel);
+ if (rc)
+ goto bad;
+ cladatum->permissions.nprim = le32_to_cpu(buf[3]);
+
+ ncons = le32_to_cpu(buf[5]);
+
+ rc = str_read(&key, GFP_KERNEL, fp, len);
+ if (rc)
+ goto bad;
+
+ if (len2) {
+ rc = str_read(&cladatum->comkey, GFP_KERNEL, fp, len2);
+ if (rc)
+ goto bad;
+
+ rc = -EINVAL;
+ cladatum->comdatum = symtab_search(&p->p_commons,
+ cladatum->comkey);
+ if (!cladatum->comdatum) {
+ pr_err("SELinux: unknown common %s\n",
+ cladatum->comkey);
+ goto bad;
+ }
+ }
+ for (i = 0; i < nel; i++) {
+ rc = perm_read(p, &cladatum->permissions, fp);
+ if (rc)
+ goto bad;
+ }
+
+ rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
+ if (rc)
+ goto bad;
+
+ if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) {
+ /* grab the validatetrans rules */
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto bad;
+ ncons = le32_to_cpu(buf[0]);
+ rc = read_cons_helper(p, &cladatum->validatetrans,
+ ncons, 1, fp);
+ if (rc)
+ goto bad;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ rc = next_entry(buf, fp, sizeof(u32) * 3);
+ if (rc)
+ goto bad;
+
+ cladatum->default_user = le32_to_cpu(buf[0]);
+ cladatum->default_role = le32_to_cpu(buf[1]);
+ cladatum->default_range = le32_to_cpu(buf[2]);
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ rc = next_entry(buf, fp, sizeof(u32) * 1);
+ if (rc)
+ goto bad;
+ cladatum->default_type = le32_to_cpu(buf[0]);
+ }
+
+ rc = symtab_insert(s, key, cladatum);
+ if (rc)
+ goto bad;
+
+ return 0;
+bad:
+ cls_destroy(key, cladatum, NULL);
+ return rc;
+}
+
+static int role_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct role_datum *role;
+ int rc, to_read = 2;
+ __le32 buf[3];
+ u32 len;
+
+ role = kzalloc(sizeof(*role), GFP_KERNEL);
+ if (!role)
+ return -ENOMEM;
+
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ to_read = 3;
+
+ rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ role->value = le32_to_cpu(buf[1]);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ role->bounds = le32_to_cpu(buf[2]);
+
+ rc = str_read(&key, GFP_KERNEL, fp, len);
+ if (rc)
+ goto bad;
+
+ rc = ebitmap_read(&role->dominates, fp);
+ if (rc)
+ goto bad;
+
+ rc = ebitmap_read(&role->types, fp);
+ if (rc)
+ goto bad;
+
+ if (strcmp(key, OBJECT_R) == 0) {
+ rc = -EINVAL;
+ if (role->value != OBJECT_R_VAL) {
+ pr_err("SELinux: Role %s has wrong value %d\n",
+ OBJECT_R, role->value);
+ goto bad;
+ }
+ rc = 0;
+ goto bad;
+ }
+
+ rc = symtab_insert(s, key, role);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ role_destroy(key, role, NULL);
+ return rc;
+}
+
+static int type_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct type_datum *typdatum;
+ int rc, to_read = 3;
+ __le32 buf[4];
+ u32 len;
+
+ typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL);
+ if (!typdatum)
+ return -ENOMEM;
+
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ to_read = 4;
+
+ rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ typdatum->value = le32_to_cpu(buf[1]);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+ u32 prop = le32_to_cpu(buf[2]);
+
+ if (prop & TYPEDATUM_PROPERTY_PRIMARY)
+ typdatum->primary = 1;
+ if (prop & TYPEDATUM_PROPERTY_ATTRIBUTE)
+ typdatum->attribute = 1;
+
+ typdatum->bounds = le32_to_cpu(buf[3]);
+ } else {
+ typdatum->primary = le32_to_cpu(buf[2]);
+ }
+
+ rc = str_read(&key, GFP_KERNEL, fp, len);
+ if (rc)
+ goto bad;
+
+ rc = symtab_insert(s, key, typdatum);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ type_destroy(key, typdatum, NULL);
+ return rc;
+}
+
+
+/*
+ * Read a MLS level structure from a policydb binary
+ * representation file.
+ */
+static int mls_read_level(struct mls_level *lp, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ memset(lp, 0, sizeof(*lp));
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc) {
+ pr_err("SELinux: mls: truncated level\n");
+ return rc;
+ }
+ lp->sens = le32_to_cpu(buf[0]);
+
+ rc = ebitmap_read(&lp->cat, fp);
+ if (rc) {
+ pr_err("SELinux: mls: error reading level categories\n");
+ return rc;
+ }
+ return 0;
+}
+
+static int user_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct user_datum *usrdatum;
+ int rc, to_read = 2;
+ __le32 buf[3];
+ u32 len;
+
+ usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL);
+ if (!usrdatum)
+ return -ENOMEM;
+
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ to_read = 3;
+
+ rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ usrdatum->value = le32_to_cpu(buf[1]);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ usrdatum->bounds = le32_to_cpu(buf[2]);
+
+ rc = str_read(&key, GFP_KERNEL, fp, len);
+ if (rc)
+ goto bad;
+
+ rc = ebitmap_read(&usrdatum->roles, fp);
+ if (rc)
+ goto bad;
+
+ if (p->policyvers >= POLICYDB_VERSION_MLS) {
+ rc = mls_read_range_helper(&usrdatum->range, fp);
+ if (rc)
+ goto bad;
+ rc = mls_read_level(&usrdatum->dfltlevel, fp);
+ if (rc)
+ goto bad;
+ }
+
+ rc = symtab_insert(s, key, usrdatum);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ user_destroy(key, usrdatum, NULL);
+ return rc;
+}
+
+static int sens_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct level_datum *levdatum;
+ int rc;
+ __le32 buf[2];
+ u32 len;
+
+ levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC);
+ if (!levdatum)
+ return -ENOMEM;
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ levdatum->isalias = le32_to_cpu(buf[1]);
+
+ rc = str_read(&key, GFP_ATOMIC, fp, len);
+ if (rc)
+ goto bad;
+
+ rc = -ENOMEM;
+ levdatum->level = kmalloc(sizeof(*levdatum->level), GFP_ATOMIC);
+ if (!levdatum->level)
+ goto bad;
+
+ rc = mls_read_level(levdatum->level, fp);
+ if (rc)
+ goto bad;
+
+ rc = symtab_insert(s, key, levdatum);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ sens_destroy(key, levdatum, NULL);
+ return rc;
+}
+
+static int cat_read(struct policydb *p, struct symtab *s, void *fp)
+{
+ char *key = NULL;
+ struct cat_datum *catdatum;
+ int rc;
+ __le32 buf[3];
+ u32 len;
+
+ catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC);
+ if (!catdatum)
+ return -ENOMEM;
+
+ rc = next_entry(buf, fp, sizeof buf);
+ if (rc)
+ goto bad;
+
+ len = le32_to_cpu(buf[0]);
+ catdatum->value = le32_to_cpu(buf[1]);
+ catdatum->isalias = le32_to_cpu(buf[2]);
+
+ rc = str_read(&key, GFP_ATOMIC, fp, len);
+ if (rc)
+ goto bad;
+
+ rc = symtab_insert(s, key, catdatum);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ cat_destroy(key, catdatum, NULL);
+ return rc;
+}
+
+static int (*const read_f[SYM_NUM]) (struct policydb *p,
+ struct symtab *s, void *fp) = {
+ common_read,
+ class_read,
+ role_read,
+ type_read,
+ user_read,
+ cond_read_bool,
+ sens_read,
+ cat_read,
+};
+
+static int user_bounds_sanity_check(void *key, void *datum, void *datap)
+{
+ struct user_datum *upper, *user;
+ struct policydb *p = datap;
+ int depth = 0;
+
+ upper = user = datum;
+ while (upper->bounds) {
+ struct ebitmap_node *node;
+ unsigned long bit;
+
+ if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
+ pr_err("SELinux: user %s: "
+ "too deep or looped boundary",
+ (char *) key);
+ return -EINVAL;
+ }
+
+ upper = p->user_val_to_struct[upper->bounds - 1];
+ ebitmap_for_each_positive_bit(&user->roles, node, bit) {
+ if (ebitmap_get_bit(&upper->roles, bit))
+ continue;
+
+ pr_err("SELinux: boundary violated policy: "
+ "user=%s role=%s bounds=%s\n",
+ sym_name(p, SYM_USERS, user->value - 1),
+ sym_name(p, SYM_ROLES, bit),
+ sym_name(p, SYM_USERS, upper->value - 1));
+
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int role_bounds_sanity_check(void *key, void *datum, void *datap)
+{
+ struct role_datum *upper, *role;
+ struct policydb *p = datap;
+ int depth = 0;
+
+ upper = role = datum;
+ while (upper->bounds) {
+ struct ebitmap_node *node;
+ unsigned long bit;
+
+ if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
+ pr_err("SELinux: role %s: "
+ "too deep or looped bounds\n",
+ (char *) key);
+ return -EINVAL;
+ }
+
+ upper = p->role_val_to_struct[upper->bounds - 1];
+ ebitmap_for_each_positive_bit(&role->types, node, bit) {
+ if (ebitmap_get_bit(&upper->types, bit))
+ continue;
+
+ pr_err("SELinux: boundary violated policy: "
+ "role=%s type=%s bounds=%s\n",
+ sym_name(p, SYM_ROLES, role->value - 1),
+ sym_name(p, SYM_TYPES, bit),
+ sym_name(p, SYM_ROLES, upper->value - 1));
+
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int type_bounds_sanity_check(void *key, void *datum, void *datap)
+{
+ struct type_datum *upper;
+ struct policydb *p = datap;
+ int depth = 0;
+
+ upper = datum;
+ while (upper->bounds) {
+ if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
+ pr_err("SELinux: type %s: "
+ "too deep or looped boundary\n",
+ (char *) key);
+ return -EINVAL;
+ }
+
+ upper = p->type_val_to_struct[upper->bounds - 1];
+ BUG_ON(!upper);
+
+ if (upper->attribute) {
+ pr_err("SELinux: type %s: "
+ "bounded by attribute %s",
+ (char *) key,
+ sym_name(p, SYM_TYPES, upper->value - 1));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int policydb_bounds_sanity_check(struct policydb *p)
+{
+ int rc;
+
+ if (p->policyvers < POLICYDB_VERSION_BOUNDARY)
+ return 0;
+
+ rc = hashtab_map(&p->p_users.table, user_bounds_sanity_check, p);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(&p->p_roles.table, role_bounds_sanity_check, p);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(&p->p_types.table, type_bounds_sanity_check, p);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+u16 string_to_security_class(struct policydb *p, const char *name)
+{
+ struct class_datum *cladatum;
+
+ cladatum = symtab_search(&p->p_classes, name);
+ if (!cladatum)
+ return 0;
+
+ return cladatum->value;
+}
+
+u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
+{
+ struct class_datum *cladatum;
+ struct perm_datum *perdatum = NULL;
+ struct common_datum *comdatum;
+
+ if (!tclass || tclass > p->p_classes.nprim)
+ return 0;
+
+ cladatum = p->class_val_to_struct[tclass-1];
+ comdatum = cladatum->comdatum;
+ if (comdatum)
+ perdatum = symtab_search(&comdatum->permissions, name);
+ if (!perdatum)
+ perdatum = symtab_search(&cladatum->permissions, name);
+ if (!perdatum)
+ return 0;
+
+ return 1U << (perdatum->value-1);
+}
+
+static int range_read(struct policydb *p, void *fp)
+{
+ struct range_trans *rt = NULL;
+ struct mls_range *r = NULL;
+ int i, rc;
+ __le32 buf[2];
+ u32 nel;
+
+ if (p->policyvers < POLICYDB_VERSION_MLS)
+ return 0;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+
+ nel = le32_to_cpu(buf[0]);
+
+ rc = hashtab_init(&p->range_tr, nel);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ goto out;
+
+ rc = next_entry(buf, fp, (sizeof(u32) * 2));
+ if (rc)
+ goto out;
+
+ rt->source_type = le32_to_cpu(buf[0]);
+ rt->target_type = le32_to_cpu(buf[1]);
+ if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ rt->target_class = le32_to_cpu(buf[0]);
+ } else
+ rt->target_class = p->process_class;
+
+ rc = -EINVAL;
+ if (!policydb_type_isvalid(p, rt->source_type) ||
+ !policydb_type_isvalid(p, rt->target_type) ||
+ !policydb_class_isvalid(p, rt->target_class))
+ goto out;
+
+ rc = -ENOMEM;
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ goto out;
+
+ rc = mls_read_range_helper(r, fp);
+ if (rc)
+ goto out;
+
+ rc = -EINVAL;
+ if (!mls_range_isvalid(p, r)) {
+ pr_warn("SELinux: rangetrans: invalid range\n");
+ goto out;
+ }
+
+ rc = hashtab_insert(&p->range_tr, rt, r, rangetr_key_params);
+ if (rc)
+ goto out;
+
+ rt = NULL;
+ r = NULL;
+ }
+ hash_eval(&p->range_tr, "rangetr");
+ rc = 0;
+out:
+ kfree(rt);
+ kfree(r);
+ return rc;
+}
+
+static int filename_trans_read_helper_compat(struct policydb *p, void *fp)
+{
+ struct filename_trans_key key, *ft = NULL;
+ struct filename_trans_datum *last, *datum = NULL;
+ char *name = NULL;
+ u32 len, stype, otype;
+ __le32 buf[4];
+ int rc;
+
+ /* length of the path component string */
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+ len = le32_to_cpu(buf[0]);
+
+ /* path component string */
+ rc = str_read(&name, GFP_KERNEL, fp, len);
+ if (rc)
+ return rc;
+
+ rc = next_entry(buf, fp, sizeof(u32) * 4);
+ if (rc)
+ goto out;
+
+ stype = le32_to_cpu(buf[0]);
+ key.ttype = le32_to_cpu(buf[1]);
+ key.tclass = le32_to_cpu(buf[2]);
+ key.name = name;
+
+ otype = le32_to_cpu(buf[3]);
+
+ last = NULL;
+ datum = policydb_filenametr_search(p, &key);
+ while (datum) {
+ if (unlikely(ebitmap_get_bit(&datum->stypes, stype - 1))) {
+ /* conflicting/duplicate rules are ignored */
+ datum = NULL;
+ goto out;
+ }
+ if (likely(datum->otype == otype))
+ break;
+ last = datum;
+ datum = datum->next;
+ }
+ if (!datum) {
+ rc = -ENOMEM;
+ datum = kmalloc(sizeof(*datum), GFP_KERNEL);
+ if (!datum)
+ goto out;
+
+ ebitmap_init(&datum->stypes);
+ datum->otype = otype;
+ datum->next = NULL;
+
+ if (unlikely(last)) {
+ last->next = datum;
+ } else {
+ rc = -ENOMEM;
+ ft = kmemdup(&key, sizeof(key), GFP_KERNEL);
+ if (!ft)
+ goto out;
+
+ rc = hashtab_insert(&p->filename_trans, ft, datum,
+ filenametr_key_params);
+ if (rc)
+ goto out;
+ name = NULL;
+
+ rc = ebitmap_set_bit(&p->filename_trans_ttypes,
+ key.ttype, 1);
+ if (rc)
+ return rc;
+ }
+ }
+ kfree(name);
+ return ebitmap_set_bit(&datum->stypes, stype - 1, 1);
+
+out:
+ kfree(ft);
+ kfree(name);
+ kfree(datum);
+ return rc;
+}
+
+static int filename_trans_read_helper(struct policydb *p, void *fp)
+{
+ struct filename_trans_key *ft = NULL;
+ struct filename_trans_datum **dst, *datum, *first = NULL;
+ char *name = NULL;
+ u32 len, ttype, tclass, ndatum, i;
+ __le32 buf[3];
+ int rc;
+
+ /* length of the path component string */
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+ len = le32_to_cpu(buf[0]);
+
+ /* path component string */
+ rc = str_read(&name, GFP_KERNEL, fp, len);
+ if (rc)
+ return rc;
+
+ rc = next_entry(buf, fp, sizeof(u32) * 3);
+ if (rc)
+ goto out;
+
+ ttype = le32_to_cpu(buf[0]);
+ tclass = le32_to_cpu(buf[1]);
+
+ ndatum = le32_to_cpu(buf[2]);
+ if (ndatum == 0) {
+ pr_err("SELinux: Filename transition key with no datum\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ dst = &first;
+ for (i = 0; i < ndatum; i++) {
+ rc = -ENOMEM;
+ datum = kmalloc(sizeof(*datum), GFP_KERNEL);
+ if (!datum)
+ goto out;
+
+ datum->next = NULL;
+ *dst = datum;
+
+ /* ebitmap_read() will at least init the bitmap */
+ rc = ebitmap_read(&datum->stypes, fp);
+ if (rc)
+ goto out;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ datum->otype = le32_to_cpu(buf[0]);
+
+ dst = &datum->next;
+ }
+
+ rc = -ENOMEM;
+ ft = kmalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ goto out;
+
+ ft->ttype = ttype;
+ ft->tclass = tclass;
+ ft->name = name;
+
+ rc = hashtab_insert(&p->filename_trans, ft, first,
+ filenametr_key_params);
+ if (rc == -EEXIST)
+ pr_err("SELinux: Duplicate filename transition key\n");
+ if (rc)
+ goto out;
+
+ return ebitmap_set_bit(&p->filename_trans_ttypes, ttype, 1);
+
+out:
+ kfree(ft);
+ kfree(name);
+ while (first) {
+ datum = first;
+ first = first->next;
+
+ ebitmap_destroy(&datum->stypes);
+ kfree(datum);
+ }
+ return rc;
+}
+
+static int filename_trans_read(struct policydb *p, void *fp)
+{
+ u32 nel;
+ __le32 buf[1];
+ int rc, i;
+
+ if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+ return 0;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+ nel = le32_to_cpu(buf[0]);
+
+ if (p->policyvers < POLICYDB_VERSION_COMP_FTRANS) {
+ p->compat_filename_trans_count = nel;
+
+ rc = hashtab_init(&p->filename_trans, (1 << 11));
+ if (rc)
+ return rc;
+
+ for (i = 0; i < nel; i++) {
+ rc = filename_trans_read_helper_compat(p, fp);
+ if (rc)
+ return rc;
+ }
+ } else {
+ rc = hashtab_init(&p->filename_trans, nel);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < nel; i++) {
+ rc = filename_trans_read_helper(p, fp);
+ if (rc)
+ return rc;
+ }
+ }
+ hash_eval(&p->filename_trans, "filenametr");
+ return 0;
+}
+
+static int genfs_read(struct policydb *p, void *fp)
+{
+ int i, j, rc;
+ u32 nel, nel2, len, len2;
+ __le32 buf[1];
+ struct ocontext *l, *c;
+ struct ocontext *newc = NULL;
+ struct genfs *genfs_p, *genfs;
+ struct genfs *newgenfs = NULL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+ nel = le32_to_cpu(buf[0]);
+
+ for (i = 0; i < nel; i++) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ newgenfs = kzalloc(sizeof(*newgenfs), GFP_KERNEL);
+ if (!newgenfs)
+ goto out;
+
+ rc = str_read(&newgenfs->fstype, GFP_KERNEL, fp, len);
+ if (rc)
+ goto out;
+
+ for (genfs_p = NULL, genfs = p->genfs; genfs;
+ genfs_p = genfs, genfs = genfs->next) {
+ rc = -EINVAL;
+ if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
+ pr_err("SELinux: dup genfs fstype %s\n",
+ newgenfs->fstype);
+ goto out;
+ }
+ if (strcmp(newgenfs->fstype, genfs->fstype) < 0)
+ break;
+ }
+ newgenfs->next = genfs;
+ if (genfs_p)
+ genfs_p->next = newgenfs;
+ else
+ p->genfs = newgenfs;
+ genfs = newgenfs;
+ newgenfs = NULL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ nel2 = le32_to_cpu(buf[0]);
+ for (j = 0; j < nel2; j++) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ newc = kzalloc(sizeof(*newc), GFP_KERNEL);
+ if (!newc)
+ goto out;
+
+ rc = str_read(&newc->u.name, GFP_KERNEL, fp, len);
+ if (rc)
+ goto out;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ newc->v.sclass = le32_to_cpu(buf[0]);
+ rc = context_read_and_validate(&newc->context[0], p, fp);
+ if (rc)
+ goto out;
+
+ for (l = NULL, c = genfs->head; c;
+ l = c, c = c->next) {
+ rc = -EINVAL;
+ if (!strcmp(newc->u.name, c->u.name) &&
+ (!c->v.sclass || !newc->v.sclass ||
+ newc->v.sclass == c->v.sclass)) {
+ pr_err("SELinux: dup genfs entry (%s,%s)\n",
+ genfs->fstype, c->u.name);
+ goto out;
+ }
+ len = strlen(newc->u.name);
+ len2 = strlen(c->u.name);
+ if (len > len2)
+ break;
+ }
+
+ newc->next = c;
+ if (l)
+ l->next = newc;
+ else
+ genfs->head = newc;
+ newc = NULL;
+ }
+ }
+ rc = 0;
+out:
+ if (newgenfs) {
+ kfree(newgenfs->fstype);
+ kfree(newgenfs);
+ }
+ ocontext_destroy(newc, OCON_FSUSE);
+
+ return rc;
+}
+
+static int ocontext_read(struct policydb *p, const struct policydb_compat_info *info,
+ void *fp)
+{
+ int i, j, rc;
+ u32 nel, len;
+ __be64 prefixbuf[1];
+ __le32 buf[3];
+ struct ocontext *l, *c;
+ u32 nodebuf[8];
+
+ for (i = 0; i < info->ocon_num; i++) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ nel = le32_to_cpu(buf[0]);
+
+ l = NULL;
+ for (j = 0; j < nel; j++) {
+ rc = -ENOMEM;
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ goto out;
+ if (l)
+ l->next = c;
+ else
+ p->ocontexts[i] = c;
+ l = c;
+
+ switch (i) {
+ case OCON_ISID:
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ c->sid[0] = le32_to_cpu(buf[0]);
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_FS:
+ case OCON_NETIF:
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = str_read(&c->u.name, GFP_KERNEL, fp, len);
+ if (rc)
+ goto out;
+
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ rc = context_read_and_validate(&c->context[1], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_PORT:
+ rc = next_entry(buf, fp, sizeof(u32)*3);
+ if (rc)
+ goto out;
+ c->u.port.protocol = le32_to_cpu(buf[0]);
+ c->u.port.low_port = le32_to_cpu(buf[1]);
+ c->u.port.high_port = le32_to_cpu(buf[2]);
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_NODE:
+ rc = next_entry(nodebuf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto out;
+ c->u.node.addr = nodebuf[0]; /* network order */
+ c->u.node.mask = nodebuf[1]; /* network order */
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_FSUSE:
+ rc = next_entry(buf, fp, sizeof(u32)*2);
+ if (rc)
+ goto out;
+
+ rc = -EINVAL;
+ c->v.behavior = le32_to_cpu(buf[0]);
+ /* Determined at runtime, not in policy DB. */
+ if (c->v.behavior == SECURITY_FS_USE_MNTPOINT)
+ goto out;
+ if (c->v.behavior > SECURITY_FS_USE_MAX)
+ goto out;
+
+ len = le32_to_cpu(buf[1]);
+ rc = str_read(&c->u.name, GFP_KERNEL, fp, len);
+ if (rc)
+ goto out;
+
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_NODE6: {
+ int k;
+
+ rc = next_entry(nodebuf, fp, sizeof(u32) * 8);
+ if (rc)
+ goto out;
+ for (k = 0; k < 4; k++)
+ c->u.node6.addr[k] = nodebuf[k];
+ for (k = 0; k < 4; k++)
+ c->u.node6.mask[k] = nodebuf[k+4];
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ }
+ case OCON_IBPKEY: {
+ u32 pkey_lo, pkey_hi;
+
+ rc = next_entry(prefixbuf, fp, sizeof(u64));
+ if (rc)
+ goto out;
+
+ /* we need to have subnet_prefix in CPU order */
+ c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
+
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto out;
+
+ pkey_lo = le32_to_cpu(buf[0]);
+ pkey_hi = le32_to_cpu(buf[1]);
+
+ if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ c->u.ibpkey.low_pkey = pkey_lo;
+ c->u.ibpkey.high_pkey = pkey_hi;
+
+ rc = context_read_and_validate(&c->context[0],
+ p,
+ fp);
+ if (rc)
+ goto out;
+ break;
+ }
+ case OCON_IBENDPORT: {
+ u32 port;
+
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = str_read(&c->u.ibendport.dev_name, GFP_KERNEL, fp, len);
+ if (rc)
+ goto out;
+
+ port = le32_to_cpu(buf[1]);
+ if (port > U8_MAX || port == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ c->u.ibendport.port = port;
+
+ rc = context_read_and_validate(&c->context[0],
+ p,
+ fp);
+ if (rc)
+ goto out;
+ break;
+ } /* end case */
+ } /* end switch */
+ }
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
+/*
+ * Read the configuration data from a policy database binary
+ * representation file into a policy database structure.
+ */
+int policydb_read(struct policydb *p, void *fp)
+{
+ struct role_allow *ra, *lra;
+ struct role_trans_key *rtk = NULL;
+ struct role_trans_datum *rtd = NULL;
+ int i, j, rc;
+ __le32 buf[4];
+ u32 len, nprim, nel, perm;
+
+ char *policydb_str;
+ const struct policydb_compat_info *info;
+
+ policydb_init(p);
+
+ /* Read the magic number and string length. */
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto bad;
+
+ rc = -EINVAL;
+ if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
+ pr_err("SELinux: policydb magic number 0x%x does "
+ "not match expected magic number 0x%x\n",
+ le32_to_cpu(buf[0]), POLICYDB_MAGIC);
+ goto bad;
+ }
+
+ rc = -EINVAL;
+ len = le32_to_cpu(buf[1]);
+ if (len != strlen(POLICYDB_STRING)) {
+ pr_err("SELinux: policydb string length %d does not "
+ "match expected length %zu\n",
+ len, strlen(POLICYDB_STRING));
+ goto bad;
+ }
+
+ rc = -ENOMEM;
+ policydb_str = kmalloc(len + 1, GFP_KERNEL);
+ if (!policydb_str) {
+ pr_err("SELinux: unable to allocate memory for policydb "
+ "string of length %d\n", len);
+ goto bad;
+ }
+
+ rc = next_entry(policydb_str, fp, len);
+ if (rc) {
+ pr_err("SELinux: truncated policydb string identifier\n");
+ kfree(policydb_str);
+ goto bad;
+ }
+
+ rc = -EINVAL;
+ policydb_str[len] = '\0';
+ if (strcmp(policydb_str, POLICYDB_STRING)) {
+ pr_err("SELinux: policydb string %s does not match "
+ "my string %s\n", policydb_str, POLICYDB_STRING);
+ kfree(policydb_str);
+ goto bad;
+ }
+ /* Done with policydb_str. */
+ kfree(policydb_str);
+ policydb_str = NULL;
+
+ /* Read the version and table sizes. */
+ rc = next_entry(buf, fp, sizeof(u32)*4);
+ if (rc)
+ goto bad;
+
+ rc = -EINVAL;
+ p->policyvers = le32_to_cpu(buf[0]);
+ if (p->policyvers < POLICYDB_VERSION_MIN ||
+ p->policyvers > POLICYDB_VERSION_MAX) {
+ pr_err("SELinux: policydb version %d does not match "
+ "my version range %d-%d\n",
+ le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX);
+ goto bad;
+ }
+
+ if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
+ p->mls_enabled = 1;
+
+ rc = -EINVAL;
+ if (p->policyvers < POLICYDB_VERSION_MLS) {
+ pr_err("SELinux: security policydb version %d "
+ "(MLS) not backwards compatible\n",
+ p->policyvers);
+ goto bad;
+ }
+ }
+ p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
+ p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
+
+ if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+ rc = ebitmap_read(&p->policycaps, fp);
+ if (rc)
+ goto bad;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+ rc = ebitmap_read(&p->permissive_map, fp);
+ if (rc)
+ goto bad;
+ }
+
+ rc = -EINVAL;
+ info = policydb_lookup_compat(p->policyvers);
+ if (!info) {
+ pr_err("SELinux: unable to find policy compat info "
+ "for version %d\n", p->policyvers);
+ goto bad;
+ }
+
+ rc = -EINVAL;
+ if (le32_to_cpu(buf[2]) != info->sym_num ||
+ le32_to_cpu(buf[3]) != info->ocon_num) {
+ pr_err("SELinux: policydb table sizes (%d,%d) do "
+ "not match mine (%d,%d)\n", le32_to_cpu(buf[2]),
+ le32_to_cpu(buf[3]),
+ info->sym_num, info->ocon_num);
+ goto bad;
+ }
+
+ for (i = 0; i < info->sym_num; i++) {
+ rc = next_entry(buf, fp, sizeof(u32)*2);
+ if (rc)
+ goto bad;
+ nprim = le32_to_cpu(buf[0]);
+ nel = le32_to_cpu(buf[1]);
+
+ rc = symtab_init(&p->symtab[i], nel);
+ if (rc)
+ goto out;
+
+ if (i == SYM_ROLES) {
+ rc = roles_init(p);
+ if (rc)
+ goto out;
+ }
+
+ for (j = 0; j < nel; j++) {
+ rc = read_f[i](p, &p->symtab[i], fp);
+ if (rc)
+ goto bad;
+ }
+
+ p->symtab[i].nprim = nprim;
+ }
+
+ rc = -EINVAL;
+ p->process_class = string_to_security_class(p, "process");
+ if (!p->process_class) {
+ pr_err("SELinux: process class is required, not defined in policy\n");
+ goto bad;
+ }
+
+ rc = avtab_read(&p->te_avtab, fp, p);
+ if (rc)
+ goto bad;
+
+ if (p->policyvers >= POLICYDB_VERSION_BOOL) {
+ rc = cond_read_list(p, fp);
+ if (rc)
+ goto bad;
+ }
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto bad;
+ nel = le32_to_cpu(buf[0]);
+
+ rc = hashtab_init(&p->role_tr, nel);
+ if (rc)
+ goto bad;
+ for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
+ rtk = kmalloc(sizeof(*rtk), GFP_KERNEL);
+ if (!rtk)
+ goto bad;
+
+ rc = -ENOMEM;
+ rtd = kmalloc(sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ goto bad;
+
+ rc = next_entry(buf, fp, sizeof(u32)*3);
+ if (rc)
+ goto bad;
+
+ rtk->role = le32_to_cpu(buf[0]);
+ rtk->type = le32_to_cpu(buf[1]);
+ rtd->new_role = le32_to_cpu(buf[2]);
+ if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto bad;
+ rtk->tclass = le32_to_cpu(buf[0]);
+ } else
+ rtk->tclass = p->process_class;
+
+ rc = -EINVAL;
+ if (!policydb_role_isvalid(p, rtk->role) ||
+ !policydb_type_isvalid(p, rtk->type) ||
+ !policydb_class_isvalid(p, rtk->tclass) ||
+ !policydb_role_isvalid(p, rtd->new_role))
+ goto bad;
+
+ rc = hashtab_insert(&p->role_tr, rtk, rtd, roletr_key_params);
+ if (rc)
+ goto bad;
+
+ rtk = NULL;
+ rtd = NULL;
+ }
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto bad;
+ nel = le32_to_cpu(buf[0]);
+ lra = NULL;
+ for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
+ ra = kzalloc(sizeof(*ra), GFP_KERNEL);
+ if (!ra)
+ goto bad;
+ if (lra)
+ lra->next = ra;
+ else
+ p->role_allow = ra;
+ rc = next_entry(buf, fp, sizeof(u32)*2);
+ if (rc)
+ goto bad;
+
+ rc = -EINVAL;
+ ra->role = le32_to_cpu(buf[0]);
+ ra->new_role = le32_to_cpu(buf[1]);
+ if (!policydb_role_isvalid(p, ra->role) ||
+ !policydb_role_isvalid(p, ra->new_role))
+ goto bad;
+ lra = ra;
+ }
+
+ rc = filename_trans_read(p, fp);
+ if (rc)
+ goto bad;
+
+ rc = policydb_index(p);
+ if (rc)
+ goto bad;
+
+ rc = -EINVAL;
+ perm = string_to_av_perm(p, p->process_class, "transition");
+ if (!perm) {
+ pr_err("SELinux: process transition permission is required, not defined in policy\n");
+ goto bad;
+ }
+ p->process_trans_perms = perm;
+ perm = string_to_av_perm(p, p->process_class, "dyntransition");
+ if (!perm) {
+ pr_err("SELinux: process dyntransition permission is required, not defined in policy\n");
+ goto bad;
+ }
+ p->process_trans_perms |= perm;
+
+ rc = ocontext_read(p, info, fp);
+ if (rc)
+ goto bad;
+
+ rc = genfs_read(p, fp);
+ if (rc)
+ goto bad;
+
+ rc = range_read(p, fp);
+ if (rc)
+ goto bad;
+
+ rc = -ENOMEM;
+ p->type_attr_map_array = kvcalloc(p->p_types.nprim,
+ sizeof(*p->type_attr_map_array),
+ GFP_KERNEL);
+ if (!p->type_attr_map_array)
+ goto bad;
+
+ /* just in case ebitmap_init() becomes more than just a memset(0): */
+ for (i = 0; i < p->p_types.nprim; i++)
+ ebitmap_init(&p->type_attr_map_array[i]);
+
+ for (i = 0; i < p->p_types.nprim; i++) {
+ struct ebitmap *e = &p->type_attr_map_array[i];
+
+ if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
+ rc = ebitmap_read(e, fp);
+ if (rc)
+ goto bad;
+ }
+ /* add the type itself as the degenerate case */
+ rc = ebitmap_set_bit(e, i, 1);
+ if (rc)
+ goto bad;
+ }
+
+ rc = policydb_bounds_sanity_check(p);
+ if (rc)
+ goto bad;
+
+ rc = 0;
+out:
+ return rc;
+bad:
+ kfree(rtk);
+ kfree(rtd);
+ policydb_destroy(p);
+ goto out;
+}
+
+/*
+ * Write a MLS level structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_level(struct mls_level *l, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ buf[0] = cpu_to_le32(l->sens);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&l->cat, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Write a MLS range structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_range_helper(struct mls_range *r, void *fp)
+{
+ __le32 buf[3];
+ size_t items;
+ int rc, eq;
+
+ eq = mls_level_eq(&r->level[1], &r->level[0]);
+
+ if (eq)
+ items = 2;
+ else
+ items = 3;
+ buf[0] = cpu_to_le32(items-1);
+ buf[1] = cpu_to_le32(r->level[0].sens);
+ if (!eq)
+ buf[2] = cpu_to_le32(r->level[1].sens);
+
+ BUG_ON(items > ARRAY_SIZE(buf));
+
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&r->level[0].cat, fp);
+ if (rc)
+ return rc;
+ if (!eq) {
+ rc = ebitmap_write(&r->level[1].cat, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int sens_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct level_datum *levdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[2];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(levdatum->isalias);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_level(levdatum->level, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int cat_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct cat_datum *catdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(catdatum->value);
+ buf[2] = cpu_to_le32(catdatum->isalias);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int role_trans_write_one(void *key, void *datum, void *ptr)
+{
+ struct role_trans_key *rtk = key;
+ struct role_trans_datum *rtd = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ __le32 buf[3];
+ int rc;
+
+ buf[0] = cpu_to_le32(rtk->role);
+ buf[1] = cpu_to_le32(rtk->type);
+ buf[2] = cpu_to_le32(rtd->new_role);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
+ buf[0] = cpu_to_le32(rtk->tclass);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int role_trans_write(struct policydb *p, void *fp)
+{
+ struct policy_data pd = { .p = p, .fp = fp };
+ __le32 buf[1];
+ int rc;
+
+ buf[0] = cpu_to_le32(p->role_tr.nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ return hashtab_map(&p->role_tr, role_trans_write_one, &pd);
+}
+
+static int role_allow_write(struct role_allow *r, void *fp)
+{
+ struct role_allow *ra;
+ __le32 buf[2];
+ size_t nel;
+ int rc;
+
+ nel = 0;
+ for (ra = r; ra; ra = ra->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (ra = r; ra; ra = ra->next) {
+ buf[0] = cpu_to_le32(ra->role);
+ buf[1] = cpu_to_le32(ra->new_role);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Write a security context structure
+ * to a policydb binary representation file.
+ */
+static int context_write(struct policydb *p, struct context *c,
+ void *fp)
+{
+ int rc;
+ __le32 buf[3];
+
+ buf[0] = cpu_to_le32(c->user);
+ buf[1] = cpu_to_le32(c->role);
+ buf[2] = cpu_to_le32(c->type);
+
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_range_helper(&c->range, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * The following *_write functions are used to
+ * write the symbol data to a policy database
+ * binary representation file.
+ */
+
+static int perm_write(void *vkey, void *datum, void *fp)
+{
+ char *key = vkey;
+ struct perm_datum *perdatum = datum;
+ __le32 buf[2];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(perdatum->value);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int common_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct common_datum *comdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[4];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(comdatum->value);
+ buf[2] = cpu_to_le32(comdatum->permissions.nprim);
+ buf[3] = cpu_to_le32(comdatum->permissions.table.nel);
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(&comdatum->permissions.table, perm_write, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int type_set_write(struct type_set *t, void *fp)
+{
+ int rc;
+ __le32 buf[1];
+
+ if (ebitmap_write(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_write(&t->negset, fp))
+ return -EINVAL;
+
+ buf[0] = cpu_to_le32(t->flags);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int write_cons_helper(struct policydb *p, struct constraint_node *node,
+ void *fp)
+{
+ struct constraint_node *c;
+ struct constraint_expr *e;
+ __le32 buf[3];
+ u32 nel;
+ int rc;
+
+ for (c = node; c; c = c->next) {
+ nel = 0;
+ for (e = c->expr; e; e = e->next)
+ nel++;
+ buf[0] = cpu_to_le32(c->permissions);
+ buf[1] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ for (e = c->expr; e; e = e->next) {
+ buf[0] = cpu_to_le32(e->expr_type);
+ buf[1] = cpu_to_le32(e->attr);
+ buf[2] = cpu_to_le32(e->op);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ switch (e->expr_type) {
+ case CEXPR_NAMES:
+ rc = ebitmap_write(&e->names, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ rc = type_set_write(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int class_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct class_datum *cladatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ struct constraint_node *c;
+ __le32 buf[6];
+ u32 ncons;
+ size_t len, len2;
+ int rc;
+
+ len = strlen(key);
+ if (cladatum->comkey)
+ len2 = strlen(cladatum->comkey);
+ else
+ len2 = 0;
+
+ ncons = 0;
+ for (c = cladatum->constraints; c; c = c->next)
+ ncons++;
+
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(len2);
+ buf[2] = cpu_to_le32(cladatum->value);
+ buf[3] = cpu_to_le32(cladatum->permissions.nprim);
+ buf[4] = cpu_to_le32(cladatum->permissions.table.nel);
+ buf[5] = cpu_to_le32(ncons);
+ rc = put_entry(buf, sizeof(u32), 6, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ if (cladatum->comkey) {
+ rc = put_entry(cladatum->comkey, 1, len2, fp);
+ if (rc)
+ return rc;
+ }
+
+ rc = hashtab_map(&cladatum->permissions.table, perm_write, fp);
+ if (rc)
+ return rc;
+
+ rc = write_cons_helper(p, cladatum->constraints, fp);
+ if (rc)
+ return rc;
+
+ /* write out the validatetrans rule */
+ ncons = 0;
+ for (c = cladatum->validatetrans; c; c = c->next)
+ ncons++;
+
+ buf[0] = cpu_to_le32(ncons);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = write_cons_helper(p, cladatum->validatetrans, fp);
+ if (rc)
+ return rc;
+
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ buf[0] = cpu_to_le32(cladatum->default_user);
+ buf[1] = cpu_to_le32(cladatum->default_role);
+ buf[2] = cpu_to_le32(cladatum->default_range);
+
+ rc = put_entry(buf, sizeof(uint32_t), 3, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ buf[0] = cpu_to_le32(cladatum->default_type);
+ rc = put_entry(buf, sizeof(uint32_t), 1, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int role_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct role_datum *role = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ __le32 buf[3];
+ size_t items, len;
+ int rc;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(role->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ buf[items++] = cpu_to_le32(role->bounds);
+
+ BUG_ON(items > ARRAY_SIZE(buf));
+
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&role->dominates, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&role->types, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int type_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct type_datum *typdatum = datum;
+ struct policy_data *pd = ptr;
+ struct policydb *p = pd->p;
+ void *fp = pd->fp;
+ __le32 buf[4];
+ int rc;
+ size_t items, len;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(typdatum->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+ u32 properties = 0;
+
+ if (typdatum->primary)
+ properties |= TYPEDATUM_PROPERTY_PRIMARY;
+
+ if (typdatum->attribute)
+ properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
+
+ buf[items++] = cpu_to_le32(properties);
+ buf[items++] = cpu_to_le32(typdatum->bounds);
+ } else {
+ buf[items++] = cpu_to_le32(typdatum->primary);
+ }
+ BUG_ON(items > ARRAY_SIZE(buf));
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int user_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct user_datum *usrdatum = datum;
+ struct policy_data *pd = ptr;
+ struct policydb *p = pd->p;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ size_t items, len;
+ int rc;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(usrdatum->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ buf[items++] = cpu_to_le32(usrdatum->bounds);
+ BUG_ON(items > ARRAY_SIZE(buf));
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&usrdatum->roles, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_range_helper(&usrdatum->range, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_level(&usrdatum->dfltlevel, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int (*const write_f[SYM_NUM]) (void *key, void *datum, void *datap) = {
+ common_write,
+ class_write,
+ role_write,
+ type_write,
+ user_write,
+ cond_write_bool,
+ sens_write,
+ cat_write,
+};
+
+static int ocontext_write(struct policydb *p, const struct policydb_compat_info *info,
+ void *fp)
+{
+ unsigned int i, j, rc;
+ size_t nel, len;
+ __be64 prefixbuf[1];
+ __le32 buf[3];
+ u32 nodebuf[8];
+ struct ocontext *c;
+ for (i = 0; i < info->ocon_num; i++) {
+ nel = 0;
+ for (c = p->ocontexts[i]; c; c = c->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (c = p->ocontexts[i]; c; c = c->next) {
+ switch (i) {
+ case OCON_ISID:
+ buf[0] = cpu_to_le32(c->sid[0]);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_FS:
+ case OCON_NETIF:
+ len = strlen(c->u.name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[1], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_PORT:
+ buf[0] = cpu_to_le32(c->u.port.protocol);
+ buf[1] = cpu_to_le32(c->u.port.low_port);
+ buf[2] = cpu_to_le32(c->u.port.high_port);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_NODE:
+ nodebuf[0] = c->u.node.addr; /* network order */
+ nodebuf[1] = c->u.node.mask; /* network order */
+ rc = put_entry(nodebuf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_FSUSE:
+ buf[0] = cpu_to_le32(c->v.behavior);
+ len = strlen(c->u.name);
+ buf[1] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_NODE6:
+ for (j = 0; j < 4; j++)
+ nodebuf[j] = c->u.node6.addr[j]; /* network order */
+ for (j = 0; j < 4; j++)
+ nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
+ rc = put_entry(nodebuf, sizeof(u32), 8, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_IBPKEY:
+ /* subnet_prefix is in CPU order */
+ prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
+
+ rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
+ buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
+
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_IBENDPORT:
+ len = strlen(c->u.ibendport.dev_name);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(c->u.ibendport.port);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.ibendport.dev_name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int genfs_write(struct policydb *p, void *fp)
+{
+ struct genfs *genfs;
+ struct ocontext *c;
+ size_t len;
+ __le32 buf[1];
+ int rc;
+
+ len = 0;
+ for (genfs = p->genfs; genfs; genfs = genfs->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (genfs = p->genfs; genfs; genfs = genfs->next) {
+ len = strlen(genfs->fstype);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(genfs->fstype, 1, len, fp);
+ if (rc)
+ return rc;
+ len = 0;
+ for (c = genfs->head; c; c = c->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (c = genfs->head; c; c = c->next) {
+ len = strlen(c->u.name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ buf[0] = cpu_to_le32(c->v.sclass);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int range_write_helper(void *key, void *data, void *ptr)
+{
+ __le32 buf[2];
+ struct range_trans *rt = key;
+ struct mls_range *r = data;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ int rc;
+
+ buf[0] = cpu_to_le32(rt->source_type);
+ buf[1] = cpu_to_le32(rt->target_type);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+ buf[0] = cpu_to_le32(rt->target_class);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ }
+ rc = mls_write_range_helper(r, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int range_write(struct policydb *p, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+ struct policy_data pd;
+
+ pd.p = p;
+ pd.fp = fp;
+
+ buf[0] = cpu_to_le32(p->range_tr.nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ /* actually write all of the entries */
+ rc = hashtab_map(&p->range_tr, range_write_helper, &pd);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int filename_write_helper_compat(void *key, void *data, void *ptr)
+{
+ struct filename_trans_key *ft = key;
+ struct filename_trans_datum *datum = data;
+ struct ebitmap_node *node;
+ void *fp = ptr;
+ __le32 buf[4];
+ int rc;
+ u32 bit, len = strlen(ft->name);
+
+ do {
+ ebitmap_for_each_positive_bit(&datum->stypes, node, bit) {
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(ft->name, sizeof(char), len, fp);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(bit + 1);
+ buf[1] = cpu_to_le32(ft->ttype);
+ buf[2] = cpu_to_le32(ft->tclass);
+ buf[3] = cpu_to_le32(datum->otype);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+ }
+
+ datum = datum->next;
+ } while (unlikely(datum));
+
+ return 0;
+}
+
+static int filename_write_helper(void *key, void *data, void *ptr)
+{
+ struct filename_trans_key *ft = key;
+ struct filename_trans_datum *datum;
+ void *fp = ptr;
+ __le32 buf[3];
+ int rc;
+ u32 ndatum, len = strlen(ft->name);
+
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(ft->name, sizeof(char), len, fp);
+ if (rc)
+ return rc;
+
+ ndatum = 0;
+ datum = data;
+ do {
+ ndatum++;
+ datum = datum->next;
+ } while (unlikely(datum));
+
+ buf[0] = cpu_to_le32(ft->ttype);
+ buf[1] = cpu_to_le32(ft->tclass);
+ buf[2] = cpu_to_le32(ndatum);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ datum = data;
+ do {
+ rc = ebitmap_write(&datum->stypes, fp);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(datum->otype);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ datum = datum->next;
+ } while (unlikely(datum));
+
+ return 0;
+}
+
+static int filename_trans_write(struct policydb *p, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+ return 0;
+
+ if (p->policyvers < POLICYDB_VERSION_COMP_FTRANS) {
+ buf[0] = cpu_to_le32(p->compat_filename_trans_count);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(&p->filename_trans,
+ filename_write_helper_compat, fp);
+ } else {
+ buf[0] = cpu_to_le32(p->filename_trans.nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(&p->filename_trans, filename_write_helper, fp);
+ }
+ return rc;
+}
+
+/*
+ * Write the configuration data in a policy database
+ * structure to a policy database binary representation
+ * file.
+ */
+int policydb_write(struct policydb *p, void *fp)
+{
+ unsigned int i, num_syms;
+ int rc;
+ __le32 buf[4];
+ u32 config;
+ size_t len;
+ const struct policydb_compat_info *info;
+
+ /*
+ * refuse to write policy older than compressed avtab
+ * to simplify the writer. There are other tests dropped
+ * since we assume this throughout the writer code. Be
+ * careful if you ever try to remove this restriction
+ */
+ if (p->policyvers < POLICYDB_VERSION_AVTAB) {
+ pr_err("SELinux: refusing to write policy version %d."
+ " Because it is less than version %d\n", p->policyvers,
+ POLICYDB_VERSION_AVTAB);
+ return -EINVAL;
+ }
+
+ config = 0;
+ if (p->mls_enabled)
+ config |= POLICYDB_CONFIG_MLS;
+
+ if (p->reject_unknown)
+ config |= REJECT_UNKNOWN;
+ if (p->allow_unknown)
+ config |= ALLOW_UNKNOWN;
+
+ /* Write the magic number and string identifiers. */
+ buf[0] = cpu_to_le32(POLICYDB_MAGIC);
+ len = strlen(POLICYDB_STRING);
+ buf[1] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(POLICYDB_STRING, 1, len, fp);
+ if (rc)
+ return rc;
+
+ /* Write the version, config, and table sizes. */
+ info = policydb_lookup_compat(p->policyvers);
+ if (!info) {
+ pr_err("SELinux: compatibility lookup failed for policy "
+ "version %d", p->policyvers);
+ return -EINVAL;
+ }
+
+ buf[0] = cpu_to_le32(p->policyvers);
+ buf[1] = cpu_to_le32(config);
+ buf[2] = cpu_to_le32(info->sym_num);
+ buf[3] = cpu_to_le32(info->ocon_num);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+
+ if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+ rc = ebitmap_write(&p->policycaps, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+ rc = ebitmap_write(&p->permissive_map, fp);
+ if (rc)
+ return rc;
+ }
+
+ num_syms = info->sym_num;
+ for (i = 0; i < num_syms; i++) {
+ struct policy_data pd;
+
+ pd.fp = fp;
+ pd.p = p;
+
+ buf[0] = cpu_to_le32(p->symtab[i].nprim);
+ buf[1] = cpu_to_le32(p->symtab[i].table.nel);
+
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = hashtab_map(&p->symtab[i].table, write_f[i], &pd);
+ if (rc)
+ return rc;
+ }
+
+ rc = avtab_write(p, &p->te_avtab, fp);
+ if (rc)
+ return rc;
+
+ rc = cond_write_list(p, fp);
+ if (rc)
+ return rc;
+
+ rc = role_trans_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = role_allow_write(p->role_allow, fp);
+ if (rc)
+ return rc;
+
+ rc = filename_trans_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = ocontext_write(p, info, fp);
+ if (rc)
+ return rc;
+
+ rc = genfs_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = range_write(p, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < p->p_types.nprim; i++) {
+ struct ebitmap *e = &p->type_attr_map_array[i];
+
+ rc = ebitmap_write(e, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
new file mode 100644
index 000000000..ffc4e7bad
--- /dev/null
+++ b/security/selinux/ss/policydb.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * A policy database (policydb) specifies the
+ * configuration data for the security policy.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+
+/*
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ * Support for enhanced MLS infrastructure.
+ *
+ * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Added conditional policy language extensions
+ *
+ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+ * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ */
+
+#ifndef _SS_POLICYDB_H_
+#define _SS_POLICYDB_H_
+
+#include "symtab.h"
+#include "avtab.h"
+#include "sidtab.h"
+#include "ebitmap.h"
+#include "mls_types.h"
+#include "context.h"
+#include "constraint.h"
+
+/*
+ * A datum type is defined for each kind of symbol
+ * in the configuration data: individual permissions,
+ * common prefixes for access vectors, classes,
+ * users, roles, types, sensitivities, categories, etc.
+ */
+
+/* Permission attributes */
+struct perm_datum {
+ u32 value; /* permission bit + 1 */
+};
+
+/* Attributes of a common prefix for access vectors */
+struct common_datum {
+ u32 value; /* internal common value */
+ struct symtab permissions; /* common permissions */
+};
+
+/* Class attributes */
+struct class_datum {
+ u32 value; /* class value */
+ char *comkey; /* common name */
+ struct common_datum *comdatum; /* common datum */
+ struct symtab permissions; /* class-specific permission symbol table */
+ struct constraint_node *constraints; /* constraints on class permissions */
+ struct constraint_node *validatetrans; /* special transition rules */
+/* Options how a new object user, role, and type should be decided */
+#define DEFAULT_SOURCE 1
+#define DEFAULT_TARGET 2
+ char default_user;
+ char default_role;
+ char default_type;
+/* Options how a new object range should be decided */
+#define DEFAULT_SOURCE_LOW 1
+#define DEFAULT_SOURCE_HIGH 2
+#define DEFAULT_SOURCE_LOW_HIGH 3
+#define DEFAULT_TARGET_LOW 4
+#define DEFAULT_TARGET_HIGH 5
+#define DEFAULT_TARGET_LOW_HIGH 6
+#define DEFAULT_GLBLUB 7
+ char default_range;
+};
+
+/* Role attributes */
+struct role_datum {
+ u32 value; /* internal role value */
+ u32 bounds; /* boundary of role */
+ struct ebitmap dominates; /* set of roles dominated by this role */
+ struct ebitmap types; /* set of authorized types for role */
+};
+
+struct role_trans_key {
+ u32 role; /* current role */
+ u32 type; /* program executable type, or new object type */
+ u32 tclass; /* process class, or new object class */
+};
+
+struct role_trans_datum {
+ u32 new_role; /* new role */
+};
+
+struct filename_trans_key {
+ u32 ttype; /* parent dir context */
+ u16 tclass; /* class of new object */
+ const char *name; /* last path component */
+};
+
+struct filename_trans_datum {
+ struct ebitmap stypes; /* bitmap of source types for this otype */
+ u32 otype; /* resulting type of new object */
+ struct filename_trans_datum *next; /* record for next otype*/
+};
+
+struct role_allow {
+ u32 role; /* current role */
+ u32 new_role; /* new role */
+ struct role_allow *next;
+};
+
+/* Type attributes */
+struct type_datum {
+ u32 value; /* internal type value */
+ u32 bounds; /* boundary of type */
+ unsigned char primary; /* primary name? */
+ unsigned char attribute;/* attribute ?*/
+};
+
+/* User attributes */
+struct user_datum {
+ u32 value; /* internal user value */
+ u32 bounds; /* bounds of user */
+ struct ebitmap roles; /* set of authorized roles for user */
+ struct mls_range range; /* MLS range (min - max) for user */
+ struct mls_level dfltlevel; /* default login MLS level for user */
+};
+
+
+/* Sensitivity attributes */
+struct level_datum {
+ struct mls_level *level; /* sensitivity and associated categories */
+ unsigned char isalias; /* is this sensitivity an alias for another? */
+};
+
+/* Category attributes */
+struct cat_datum {
+ u32 value; /* internal category bit + 1 */
+ unsigned char isalias; /* is this category an alias for another? */
+};
+
+struct range_trans {
+ u32 source_type;
+ u32 target_type;
+ u32 target_class;
+};
+
+/* Boolean data type */
+struct cond_bool_datum {
+ __u32 value; /* internal type value */
+ int state;
+};
+
+struct cond_node;
+
+/*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+ struct ebitmap types;
+ struct ebitmap negset;
+ u32 flags;
+};
+
+/*
+ * The configuration data includes security contexts for
+ * initial SIDs, unlabeled file systems, TCP and UDP port numbers,
+ * network interfaces, and nodes. This structure stores the
+ * relevant data for one such entry. Entries of the same kind
+ * (e.g. all initial SIDs) are linked together into a list.
+ */
+struct ocontext {
+ union {
+ char *name; /* name of initial SID, fs, netif, fstype, path */
+ struct {
+ u8 protocol;
+ u16 low_port;
+ u16 high_port;
+ } port; /* TCP or UDP port information */
+ struct {
+ u32 addr;
+ u32 mask;
+ } node; /* node information */
+ struct {
+ u32 addr[4];
+ u32 mask[4];
+ } node6; /* IPv6 node information */
+ struct {
+ u64 subnet_prefix;
+ u16 low_pkey;
+ u16 high_pkey;
+ } ibpkey;
+ struct {
+ char *dev_name;
+ u8 port;
+ } ibendport;
+ } u;
+ union {
+ u32 sclass; /* security class for genfs */
+ u32 behavior; /* labeling behavior for fs_use */
+ } v;
+ struct context context[2]; /* security context(s) */
+ u32 sid[2]; /* SID(s) */
+ struct ocontext *next;
+};
+
+struct genfs {
+ char *fstype;
+ struct ocontext *head;
+ struct genfs *next;
+};
+
+/* symbol table array indices */
+#define SYM_COMMONS 0
+#define SYM_CLASSES 1
+#define SYM_ROLES 2
+#define SYM_TYPES 3
+#define SYM_USERS 4
+#define SYM_BOOLS 5
+#define SYM_LEVELS 6
+#define SYM_CATS 7
+#define SYM_NUM 8
+
+/* object context array indices */
+#define OCON_ISID 0 /* initial SIDs */
+#define OCON_FS 1 /* unlabeled file systems */
+#define OCON_PORT 2 /* TCP and UDP port numbers */
+#define OCON_NETIF 3 /* network interfaces */
+#define OCON_NODE 4 /* nodes */
+#define OCON_FSUSE 5 /* fs_use */
+#define OCON_NODE6 6 /* IPv6 nodes */
+#define OCON_IBPKEY 7 /* Infiniband PKeys */
+#define OCON_IBENDPORT 8 /* Infiniband end ports */
+#define OCON_NUM 9
+
+/* The policy database */
+struct policydb {
+ int mls_enabled;
+
+ /* symbol tables */
+ struct symtab symtab[SYM_NUM];
+#define p_commons symtab[SYM_COMMONS]
+#define p_classes symtab[SYM_CLASSES]
+#define p_roles symtab[SYM_ROLES]
+#define p_types symtab[SYM_TYPES]
+#define p_users symtab[SYM_USERS]
+#define p_bools symtab[SYM_BOOLS]
+#define p_levels symtab[SYM_LEVELS]
+#define p_cats symtab[SYM_CATS]
+
+ /* symbol names indexed by (value - 1) */
+ char **sym_val_to_name[SYM_NUM];
+
+ /* class, role, and user attributes indexed by (value - 1) */
+ struct class_datum **class_val_to_struct;
+ struct role_datum **role_val_to_struct;
+ struct user_datum **user_val_to_struct;
+ struct type_datum **type_val_to_struct;
+
+ /* type enforcement access vectors and transitions */
+ struct avtab te_avtab;
+
+ /* role transitions */
+ struct hashtab role_tr;
+
+ /* file transitions with the last path component */
+ /* quickly exclude lookups when parent ttype has no rules */
+ struct ebitmap filename_trans_ttypes;
+ /* actual set of filename_trans rules */
+ struct hashtab filename_trans;
+ /* only used if policyvers < POLICYDB_VERSION_COMP_FTRANS */
+ u32 compat_filename_trans_count;
+
+ /* bools indexed by (value - 1) */
+ struct cond_bool_datum **bool_val_to_struct;
+ /* type enforcement conditional access vectors and transitions */
+ struct avtab te_cond_avtab;
+ /* array indexing te_cond_avtab by conditional */
+ struct cond_node *cond_list;
+ u32 cond_list_len;
+
+ /* role allows */
+ struct role_allow *role_allow;
+
+ /* security contexts of initial SIDs, unlabeled file systems,
+ TCP or UDP port numbers, network interfaces and nodes */
+ struct ocontext *ocontexts[OCON_NUM];
+
+ /* security contexts for files in filesystems that cannot support
+ a persistent label mapping or use another
+ fixed labeling behavior. */
+ struct genfs *genfs;
+
+ /* range transitions table (range_trans_key -> mls_range) */
+ struct hashtab range_tr;
+
+ /* type -> attribute reverse mapping */
+ struct ebitmap *type_attr_map_array;
+
+ struct ebitmap policycaps;
+
+ struct ebitmap permissive_map;
+
+ /* length of this policy when it was loaded */
+ size_t len;
+
+ unsigned int policyvers;
+
+ unsigned int reject_unknown : 1;
+ unsigned int allow_unknown : 1;
+
+ u16 process_class;
+ u32 process_trans_perms;
+} __randomize_layout;
+
+extern void policydb_destroy(struct policydb *p);
+extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
+extern int policydb_context_isvalid(struct policydb *p, struct context *c);
+extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
+extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
+extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
+extern int policydb_read(struct policydb *p, void *fp);
+extern int policydb_write(struct policydb *p, void *fp);
+
+extern struct filename_trans_datum *policydb_filenametr_search(
+ struct policydb *p, struct filename_trans_key *key);
+
+extern struct mls_range *policydb_rangetr_search(
+ struct policydb *p, struct range_trans *key);
+
+extern struct role_trans_datum *policydb_roletr_search(
+ struct policydb *p, struct role_trans_key *key);
+
+#define POLICYDB_CONFIG_MLS 1
+
+/* the config flags related to unknown classes/perms are bits 2 and 3 */
+#define REJECT_UNKNOWN 0x00000002
+#define ALLOW_UNKNOWN 0x00000004
+
+#define OBJECT_R "object_r"
+#define OBJECT_R_VAL 1
+
+#define POLICYDB_MAGIC SELINUX_MAGIC
+#define POLICYDB_STRING "SE Linux"
+
+struct policy_file {
+ char *data;
+ size_t len;
+};
+
+struct policy_data {
+ struct policydb *p;
+ void *fp;
+};
+
+static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
+{
+ if (bytes > fp->len)
+ return -EINVAL;
+
+ memcpy(buf, fp->data, bytes);
+ fp->data += bytes;
+ fp->len -= bytes;
+ return 0;
+}
+
+static inline int put_entry(const void *buf, size_t bytes, int num, struct policy_file *fp)
+{
+ size_t len = bytes * num;
+
+ if (len > fp->len)
+ return -EINVAL;
+ memcpy(fp->data, buf, len);
+ fp->data += len;
+ fp->len -= len;
+
+ return 0;
+}
+
+static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
+{
+ return p->sym_val_to_name[sym_num][element_nr];
+}
+
+extern u16 string_to_security_class(struct policydb *p, const char *name);
+extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
+
+#endif /* _SS_POLICYDB_H_ */
+
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
new file mode 100644
index 000000000..64a6a37dc
--- /dev/null
+++ b/security/selinux/ss/services.c
@@ -0,0 +1,4072 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the security services.
+ *
+ * Authors : Stephen Smalley, <sds@tycho.nsa.gov>
+ * James Morris <jmorris@redhat.com>
+ *
+ * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ *
+ * Support for enhanced MLS infrastructure.
+ * Support for context based audit filters.
+ *
+ * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
+ *
+ * Added conditional policy language extensions
+ *
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
+ *
+ * Added support for NetLabel
+ * Added support for the policy capability bitmap
+ *
+ * Updated: Chad Sellers <csellers@tresys.com>
+ *
+ * Added validation of kernel classes and permissions
+ *
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Added support for bounds domain and audit messaged on masked permissions
+ *
+ * Updated: Guido Trentalancia <guido@trentalancia.com>
+ *
+ * Added support for runtime switching of the policy type
+ *
+ * Copyright (C) 2008, 2009 NEC Corporation
+ * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+ * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
+ * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/sched.h>
+#include <linux/audit.h>
+#include <linux/vmalloc.h>
+#include <linux/lsm_hooks.h>
+#include <net/netlabel.h>
+
+#include "flask.h"
+#include "avc.h"
+#include "avc_ss.h"
+#include "security.h"
+#include "context.h"
+#include "policydb.h"
+#include "sidtab.h"
+#include "services.h"
+#include "conditional.h"
+#include "mls.h"
+#include "objsec.h"
+#include "netlabel.h"
+#include "xfrm.h"
+#include "ebitmap.h"
+#include "audit.h"
+#include "policycap_names.h"
+#include "ima.h"
+
+struct convert_context_args {
+ struct selinux_state *state;
+ struct policydb *oldp;
+ struct policydb *newp;
+};
+
+struct selinux_policy_convert_data {
+ struct convert_context_args args;
+ struct sidtab_convert_params sidtab_params;
+};
+
+/* Forward declaration. */
+static int context_struct_to_string(struct policydb *policydb,
+ struct context *context,
+ char **scontext,
+ u32 *scontext_len);
+
+static int sidtab_entry_to_string(struct policydb *policydb,
+ struct sidtab *sidtab,
+ struct sidtab_entry *entry,
+ char **scontext,
+ u32 *scontext_len);
+
+static void context_struct_compute_av(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd,
+ struct extended_perms *xperms);
+
+static int selinux_set_mapping(struct policydb *pol,
+ const struct security_class_mapping *map,
+ struct selinux_map *out_map)
+{
+ u16 i, j;
+ unsigned k;
+ bool print_unknown_handle = false;
+
+ /* Find number of classes in the input mapping */
+ if (!map)
+ return -EINVAL;
+ i = 0;
+ while (map[i].name)
+ i++;
+
+ /* Allocate space for the class records, plus one for class zero */
+ out_map->mapping = kcalloc(++i, sizeof(*out_map->mapping), GFP_ATOMIC);
+ if (!out_map->mapping)
+ return -ENOMEM;
+
+ /* Store the raw class and permission values */
+ j = 0;
+ while (map[j].name) {
+ const struct security_class_mapping *p_in = map + (j++);
+ struct selinux_mapping *p_out = out_map->mapping + j;
+
+ /* An empty class string skips ahead */
+ if (!strcmp(p_in->name, "")) {
+ p_out->num_perms = 0;
+ continue;
+ }
+
+ p_out->value = string_to_security_class(pol, p_in->name);
+ if (!p_out->value) {
+ pr_info("SELinux: Class %s not defined in policy.\n",
+ p_in->name);
+ if (pol->reject_unknown)
+ goto err;
+ p_out->num_perms = 0;
+ print_unknown_handle = true;
+ continue;
+ }
+
+ k = 0;
+ while (p_in->perms[k]) {
+ /* An empty permission string skips ahead */
+ if (!*p_in->perms[k]) {
+ k++;
+ continue;
+ }
+ p_out->perms[k] = string_to_av_perm(pol, p_out->value,
+ p_in->perms[k]);
+ if (!p_out->perms[k]) {
+ pr_info("SELinux: Permission %s in class %s not defined in policy.\n",
+ p_in->perms[k], p_in->name);
+ if (pol->reject_unknown)
+ goto err;
+ print_unknown_handle = true;
+ }
+
+ k++;
+ }
+ p_out->num_perms = k;
+ }
+
+ if (print_unknown_handle)
+ pr_info("SELinux: the above unknown classes and permissions will be %s\n",
+ pol->allow_unknown ? "allowed" : "denied");
+
+ out_map->size = i;
+ return 0;
+err:
+ kfree(out_map->mapping);
+ out_map->mapping = NULL;
+ return -EINVAL;
+}
+
+/*
+ * Get real, policy values from mapped values
+ */
+
+static u16 unmap_class(struct selinux_map *map, u16 tclass)
+{
+ if (tclass < map->size)
+ return map->mapping[tclass].value;
+
+ return tclass;
+}
+
+/*
+ * Get kernel value for class from its policy value
+ */
+static u16 map_class(struct selinux_map *map, u16 pol_value)
+{
+ u16 i;
+
+ for (i = 1; i < map->size; i++) {
+ if (map->mapping[i].value == pol_value)
+ return i;
+ }
+
+ return SECCLASS_NULL;
+}
+
+static void map_decision(struct selinux_map *map,
+ u16 tclass, struct av_decision *avd,
+ int allow_unknown)
+{
+ if (tclass < map->size) {
+ struct selinux_mapping *mapping = &map->mapping[tclass];
+ unsigned int i, n = mapping->num_perms;
+ u32 result;
+
+ for (i = 0, result = 0; i < n; i++) {
+ if (avd->allowed & mapping->perms[i])
+ result |= 1<<i;
+ if (allow_unknown && !mapping->perms[i])
+ result |= 1<<i;
+ }
+ avd->allowed = result;
+
+ for (i = 0, result = 0; i < n; i++)
+ if (avd->auditallow & mapping->perms[i])
+ result |= 1<<i;
+ avd->auditallow = result;
+
+ for (i = 0, result = 0; i < n; i++) {
+ if (avd->auditdeny & mapping->perms[i])
+ result |= 1<<i;
+ if (!allow_unknown && !mapping->perms[i])
+ result |= 1<<i;
+ }
+ /*
+ * In case the kernel has a bug and requests a permission
+ * between num_perms and the maximum permission number, we
+ * should audit that denial
+ */
+ for (; i < (sizeof(u32)*8); i++)
+ result |= 1<<i;
+ avd->auditdeny = result;
+ }
+}
+
+int security_mls_enabled(struct selinux_state *state)
+{
+ int mls_enabled;
+ struct selinux_policy *policy;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ mls_enabled = policy->policydb.mls_enabled;
+ rcu_read_unlock();
+ return mls_enabled;
+}
+
+/*
+ * Return the boolean value of a constraint expression
+ * when it is applied to the specified source and target
+ * security contexts.
+ *
+ * xcontext is a special beast... It is used by the validatetrans rules
+ * only. For these rules, scontext is the context before the transition,
+ * tcontext is the context after the transition, and xcontext is the context
+ * of the process performing the transition. All other callers of
+ * constraint_expr_eval should pass in NULL for xcontext.
+ */
+static int constraint_expr_eval(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ struct context *xcontext,
+ struct constraint_expr *cexpr)
+{
+ u32 val1, val2;
+ struct context *c;
+ struct role_datum *r1, *r2;
+ struct mls_level *l1, *l2;
+ struct constraint_expr *e;
+ int s[CEXPR_MAXDEPTH];
+ int sp = -1;
+
+ for (e = cexpr; e; e = e->next) {
+ switch (e->expr_type) {
+ case CEXPR_NOT:
+ BUG_ON(sp < 0);
+ s[sp] = !s[sp];
+ break;
+ case CEXPR_AND:
+ BUG_ON(sp < 1);
+ sp--;
+ s[sp] &= s[sp + 1];
+ break;
+ case CEXPR_OR:
+ BUG_ON(sp < 1);
+ sp--;
+ s[sp] |= s[sp + 1];
+ break;
+ case CEXPR_ATTR:
+ if (sp == (CEXPR_MAXDEPTH - 1))
+ return 0;
+ switch (e->attr) {
+ case CEXPR_USER:
+ val1 = scontext->user;
+ val2 = tcontext->user;
+ break;
+ case CEXPR_TYPE:
+ val1 = scontext->type;
+ val2 = tcontext->type;
+ break;
+ case CEXPR_ROLE:
+ val1 = scontext->role;
+ val2 = tcontext->role;
+ r1 = policydb->role_val_to_struct[val1 - 1];
+ r2 = policydb->role_val_to_struct[val2 - 1];
+ switch (e->op) {
+ case CEXPR_DOM:
+ s[++sp] = ebitmap_get_bit(&r1->dominates,
+ val2 - 1);
+ continue;
+ case CEXPR_DOMBY:
+ s[++sp] = ebitmap_get_bit(&r2->dominates,
+ val1 - 1);
+ continue;
+ case CEXPR_INCOMP:
+ s[++sp] = (!ebitmap_get_bit(&r1->dominates,
+ val2 - 1) &&
+ !ebitmap_get_bit(&r2->dominates,
+ val1 - 1));
+ continue;
+ default:
+ break;
+ }
+ break;
+ case CEXPR_L1L2:
+ l1 = &(scontext->range.level[0]);
+ l2 = &(tcontext->range.level[0]);
+ goto mls_ops;
+ case CEXPR_L1H2:
+ l1 = &(scontext->range.level[0]);
+ l2 = &(tcontext->range.level[1]);
+ goto mls_ops;
+ case CEXPR_H1L2:
+ l1 = &(scontext->range.level[1]);
+ l2 = &(tcontext->range.level[0]);
+ goto mls_ops;
+ case CEXPR_H1H2:
+ l1 = &(scontext->range.level[1]);
+ l2 = &(tcontext->range.level[1]);
+ goto mls_ops;
+ case CEXPR_L1H1:
+ l1 = &(scontext->range.level[0]);
+ l2 = &(scontext->range.level[1]);
+ goto mls_ops;
+ case CEXPR_L2H2:
+ l1 = &(tcontext->range.level[0]);
+ l2 = &(tcontext->range.level[1]);
+ goto mls_ops;
+mls_ops:
+ switch (e->op) {
+ case CEXPR_EQ:
+ s[++sp] = mls_level_eq(l1, l2);
+ continue;
+ case CEXPR_NEQ:
+ s[++sp] = !mls_level_eq(l1, l2);
+ continue;
+ case CEXPR_DOM:
+ s[++sp] = mls_level_dom(l1, l2);
+ continue;
+ case CEXPR_DOMBY:
+ s[++sp] = mls_level_dom(l2, l1);
+ continue;
+ case CEXPR_INCOMP:
+ s[++sp] = mls_level_incomp(l2, l1);
+ continue;
+ default:
+ BUG();
+ return 0;
+ }
+ break;
+ default:
+ BUG();
+ return 0;
+ }
+
+ switch (e->op) {
+ case CEXPR_EQ:
+ s[++sp] = (val1 == val2);
+ break;
+ case CEXPR_NEQ:
+ s[++sp] = (val1 != val2);
+ break;
+ default:
+ BUG();
+ return 0;
+ }
+ break;
+ case CEXPR_NAMES:
+ if (sp == (CEXPR_MAXDEPTH-1))
+ return 0;
+ c = scontext;
+ if (e->attr & CEXPR_TARGET)
+ c = tcontext;
+ else if (e->attr & CEXPR_XTARGET) {
+ c = xcontext;
+ if (!c) {
+ BUG();
+ return 0;
+ }
+ }
+ if (e->attr & CEXPR_USER)
+ val1 = c->user;
+ else if (e->attr & CEXPR_ROLE)
+ val1 = c->role;
+ else if (e->attr & CEXPR_TYPE)
+ val1 = c->type;
+ else {
+ BUG();
+ return 0;
+ }
+
+ switch (e->op) {
+ case CEXPR_EQ:
+ s[++sp] = ebitmap_get_bit(&e->names, val1 - 1);
+ break;
+ case CEXPR_NEQ:
+ s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1);
+ break;
+ default:
+ BUG();
+ return 0;
+ }
+ break;
+ default:
+ BUG();
+ return 0;
+ }
+ }
+
+ BUG_ON(sp != 0);
+ return s[0];
+}
+
+/*
+ * security_dump_masked_av - dumps masked permissions during
+ * security_compute_av due to RBAC, MLS/Constraint and Type bounds.
+ */
+static int dump_masked_av_helper(void *k, void *d, void *args)
+{
+ struct perm_datum *pdatum = d;
+ char **permission_names = args;
+
+ BUG_ON(pdatum->value < 1 || pdatum->value > 32);
+
+ permission_names[pdatum->value - 1] = (char *)k;
+
+ return 0;
+}
+
+static void security_dump_masked_av(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ u32 permissions,
+ const char *reason)
+{
+ struct common_datum *common_dat;
+ struct class_datum *tclass_dat;
+ struct audit_buffer *ab;
+ char *tclass_name;
+ char *scontext_name = NULL;
+ char *tcontext_name = NULL;
+ char *permission_names[32];
+ int index;
+ u32 length;
+ bool need_comma = false;
+
+ if (!permissions)
+ return;
+
+ tclass_name = sym_name(policydb, SYM_CLASSES, tclass - 1);
+ tclass_dat = policydb->class_val_to_struct[tclass - 1];
+ common_dat = tclass_dat->comdatum;
+
+ /* init permission_names */
+ if (common_dat &&
+ hashtab_map(&common_dat->permissions.table,
+ dump_masked_av_helper, permission_names) < 0)
+ goto out;
+
+ if (hashtab_map(&tclass_dat->permissions.table,
+ dump_masked_av_helper, permission_names) < 0)
+ goto out;
+
+ /* get scontext/tcontext in text form */
+ if (context_struct_to_string(policydb, scontext,
+ &scontext_name, &length) < 0)
+ goto out;
+
+ if (context_struct_to_string(policydb, tcontext,
+ &tcontext_name, &length) < 0)
+ goto out;
+
+ /* audit a message */
+ ab = audit_log_start(audit_context(),
+ GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ if (!ab)
+ goto out;
+
+ audit_log_format(ab, "op=security_compute_av reason=%s "
+ "scontext=%s tcontext=%s tclass=%s perms=",
+ reason, scontext_name, tcontext_name, tclass_name);
+
+ for (index = 0; index < 32; index++) {
+ u32 mask = (1 << index);
+
+ if ((mask & permissions) == 0)
+ continue;
+
+ audit_log_format(ab, "%s%s",
+ need_comma ? "," : "",
+ permission_names[index]
+ ? permission_names[index] : "????");
+ need_comma = true;
+ }
+ audit_log_end(ab);
+out:
+ /* release scontext/tcontext */
+ kfree(tcontext_name);
+ kfree(scontext_name);
+}
+
+/*
+ * security_boundary_permission - drops violated permissions
+ * on boundary constraint.
+ */
+static void type_attribute_bounds_av(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd)
+{
+ struct context lo_scontext;
+ struct context lo_tcontext, *tcontextp = tcontext;
+ struct av_decision lo_avd;
+ struct type_datum *source;
+ struct type_datum *target;
+ u32 masked = 0;
+
+ source = policydb->type_val_to_struct[scontext->type - 1];
+ BUG_ON(!source);
+
+ if (!source->bounds)
+ return;
+
+ target = policydb->type_val_to_struct[tcontext->type - 1];
+ BUG_ON(!target);
+
+ memset(&lo_avd, 0, sizeof(lo_avd));
+
+ memcpy(&lo_scontext, scontext, sizeof(lo_scontext));
+ lo_scontext.type = source->bounds;
+
+ if (target->bounds) {
+ memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext));
+ lo_tcontext.type = target->bounds;
+ tcontextp = &lo_tcontext;
+ }
+
+ context_struct_compute_av(policydb, &lo_scontext,
+ tcontextp,
+ tclass,
+ &lo_avd,
+ NULL);
+
+ masked = ~lo_avd.allowed & avd->allowed;
+
+ if (likely(!masked))
+ return; /* no masked permission */
+
+ /* mask violated permissions */
+ avd->allowed &= ~masked;
+
+ /* audit masked permissions */
+ security_dump_masked_av(policydb, scontext, tcontext,
+ tclass, masked, "bounds");
+}
+
+/*
+ * flag which drivers have permissions
+ * only looking for ioctl based extended permssions
+ */
+void services_compute_xperms_drivers(
+ struct extended_perms *xperms,
+ struct avtab_node *node)
+{
+ unsigned int i;
+
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+ /* if one or more driver has all permissions allowed */
+ for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
+ xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
+ } else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+ /* if allowing permissions within a driver */
+ security_xperm_set(xperms->drivers.p,
+ node->datum.u.xperms->driver);
+ }
+
+ xperms->len = 1;
+}
+
+/*
+ * Compute access vectors and extended permissions based on a context
+ * structure pair for the permissions in a particular class.
+ */
+static void context_struct_compute_av(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd,
+ struct extended_perms *xperms)
+{
+ struct constraint_node *constraint;
+ struct role_allow *ra;
+ struct avtab_key avkey;
+ struct avtab_node *node;
+ struct class_datum *tclass_datum;
+ struct ebitmap *sattr, *tattr;
+ struct ebitmap_node *snode, *tnode;
+ unsigned int i, j;
+
+ avd->allowed = 0;
+ avd->auditallow = 0;
+ avd->auditdeny = 0xffffffff;
+ if (xperms) {
+ memset(&xperms->drivers, 0, sizeof(xperms->drivers));
+ xperms->len = 0;
+ }
+
+ if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
+ if (printk_ratelimit())
+ pr_warn("SELinux: Invalid class %hu\n", tclass);
+ return;
+ }
+
+ tclass_datum = policydb->class_val_to_struct[tclass - 1];
+
+ /*
+ * If a specific type enforcement rule was defined for
+ * this permission check, then use it.
+ */
+ avkey.target_class = tclass;
+ avkey.specified = AVTAB_AV | AVTAB_XPERMS;
+ sattr = &policydb->type_attr_map_array[scontext->type - 1];
+ tattr = &policydb->type_attr_map_array[tcontext->type - 1];
+ ebitmap_for_each_positive_bit(sattr, snode, i) {
+ ebitmap_for_each_positive_bit(tattr, tnode, j) {
+ avkey.source_type = i + 1;
+ avkey.target_type = j + 1;
+ for (node = avtab_search_node(&policydb->te_avtab,
+ &avkey);
+ node;
+ node = avtab_search_node_next(node, avkey.specified)) {
+ if (node->key.specified == AVTAB_ALLOWED)
+ avd->allowed |= node->datum.u.data;
+ else if (node->key.specified == AVTAB_AUDITALLOW)
+ avd->auditallow |= node->datum.u.data;
+ else if (node->key.specified == AVTAB_AUDITDENY)
+ avd->auditdeny &= node->datum.u.data;
+ else if (xperms && (node->key.specified & AVTAB_XPERMS))
+ services_compute_xperms_drivers(xperms, node);
+ }
+
+ /* Check conditional av table for additional permissions */
+ cond_compute_av(&policydb->te_cond_avtab, &avkey,
+ avd, xperms);
+
+ }
+ }
+
+ /*
+ * Remove any permissions prohibited by a constraint (this includes
+ * the MLS policy).
+ */
+ constraint = tclass_datum->constraints;
+ while (constraint) {
+ if ((constraint->permissions & (avd->allowed)) &&
+ !constraint_expr_eval(policydb, scontext, tcontext, NULL,
+ constraint->expr)) {
+ avd->allowed &= ~(constraint->permissions);
+ }
+ constraint = constraint->next;
+ }
+
+ /*
+ * If checking process transition permission and the
+ * role is changing, then check the (current_role, new_role)
+ * pair.
+ */
+ if (tclass == policydb->process_class &&
+ (avd->allowed & policydb->process_trans_perms) &&
+ scontext->role != tcontext->role) {
+ for (ra = policydb->role_allow; ra; ra = ra->next) {
+ if (scontext->role == ra->role &&
+ tcontext->role == ra->new_role)
+ break;
+ }
+ if (!ra)
+ avd->allowed &= ~policydb->process_trans_perms;
+ }
+
+ /*
+ * If the given source and target types have boundary
+ * constraint, lazy checks have to mask any violated
+ * permission and notice it to userspace via audit.
+ */
+ type_attribute_bounds_av(policydb, scontext, tcontext,
+ tclass, avd);
+}
+
+static int security_validtrans_handle_fail(struct selinux_state *state,
+ struct selinux_policy *policy,
+ struct sidtab_entry *oentry,
+ struct sidtab_entry *nentry,
+ struct sidtab_entry *tentry,
+ u16 tclass)
+{
+ struct policydb *p = &policy->policydb;
+ struct sidtab *sidtab = policy->sidtab;
+ char *o = NULL, *n = NULL, *t = NULL;
+ u32 olen, nlen, tlen;
+
+ if (sidtab_entry_to_string(p, sidtab, oentry, &o, &olen))
+ goto out;
+ if (sidtab_entry_to_string(p, sidtab, nentry, &n, &nlen))
+ goto out;
+ if (sidtab_entry_to_string(p, sidtab, tentry, &t, &tlen))
+ goto out;
+ audit_log(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "op=security_validate_transition seresult=denied"
+ " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
+ o, n, t, sym_name(p, SYM_CLASSES, tclass-1));
+out:
+ kfree(o);
+ kfree(n);
+ kfree(t);
+
+ if (!enforcing_enabled(state))
+ return 0;
+ return -EPERM;
+}
+
+static int security_compute_validatetrans(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
+ u16 orig_tclass, bool user)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct sidtab_entry *oentry;
+ struct sidtab_entry *nentry;
+ struct sidtab_entry *tentry;
+ struct class_datum *tclass_datum;
+ struct constraint_node *constraint;
+ u16 tclass;
+ int rc = 0;
+
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ if (!user)
+ tclass = unmap_class(&policy->map, orig_tclass);
+ else
+ tclass = orig_tclass;
+
+ if (!tclass || tclass > policydb->p_classes.nprim) {
+ rc = -EINVAL;
+ goto out;
+ }
+ tclass_datum = policydb->class_val_to_struct[tclass - 1];
+
+ oentry = sidtab_search_entry(sidtab, oldsid);
+ if (!oentry) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, oldsid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nentry = sidtab_search_entry(sidtab, newsid);
+ if (!nentry) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, newsid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ tentry = sidtab_search_entry(sidtab, tasksid);
+ if (!tentry) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, tasksid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ constraint = tclass_datum->validatetrans;
+ while (constraint) {
+ if (!constraint_expr_eval(policydb, &oentry->context,
+ &nentry->context, &tentry->context,
+ constraint->expr)) {
+ if (user)
+ rc = -EPERM;
+ else
+ rc = security_validtrans_handle_fail(state,
+ policy,
+ oentry,
+ nentry,
+ tentry,
+ tclass);
+ goto out;
+ }
+ constraint = constraint->next;
+ }
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+int security_validate_transition_user(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
+ u16 tclass)
+{
+ return security_compute_validatetrans(state, oldsid, newsid, tasksid,
+ tclass, true);
+}
+
+int security_validate_transition(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
+ u16 orig_tclass)
+{
+ return security_compute_validatetrans(state, oldsid, newsid, tasksid,
+ orig_tclass, false);
+}
+
+/*
+ * security_bounded_transition - check whether the given
+ * transition is directed to bounded, or not.
+ * It returns 0, if @newsid is bounded by @oldsid.
+ * Otherwise, it returns error code.
+ *
+ * @state: SELinux state
+ * @oldsid : current security identifier
+ * @newsid : destinated security identifier
+ */
+int security_bounded_transition(struct selinux_state *state,
+ u32 old_sid, u32 new_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct sidtab_entry *old_entry, *new_entry;
+ struct type_datum *type;
+ int index;
+ int rc;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ rc = -EINVAL;
+ old_entry = sidtab_search_entry(sidtab, old_sid);
+ if (!old_entry) {
+ pr_err("SELinux: %s: unrecognized SID %u\n",
+ __func__, old_sid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ new_entry = sidtab_search_entry(sidtab, new_sid);
+ if (!new_entry) {
+ pr_err("SELinux: %s: unrecognized SID %u\n",
+ __func__, new_sid);
+ goto out;
+ }
+
+ rc = 0;
+ /* type/domain unchanged */
+ if (old_entry->context.type == new_entry->context.type)
+ goto out;
+
+ index = new_entry->context.type;
+ while (true) {
+ type = policydb->type_val_to_struct[index - 1];
+ BUG_ON(!type);
+
+ /* not bounded anymore */
+ rc = -EPERM;
+ if (!type->bounds)
+ break;
+
+ /* @newsid is bounded by @oldsid */
+ rc = 0;
+ if (type->bounds == old_entry->context.type)
+ break;
+
+ index = type->bounds;
+ }
+
+ if (rc) {
+ char *old_name = NULL;
+ char *new_name = NULL;
+ u32 length;
+
+ if (!sidtab_entry_to_string(policydb, sidtab, old_entry,
+ &old_name, &length) &&
+ !sidtab_entry_to_string(policydb, sidtab, new_entry,
+ &new_name, &length)) {
+ audit_log(audit_context(),
+ GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "op=security_bounded_transition "
+ "seresult=denied "
+ "oldcontext=%s newcontext=%s",
+ old_name, new_name);
+ }
+ kfree(new_name);
+ kfree(old_name);
+ }
+out:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static void avd_init(struct selinux_policy *policy, struct av_decision *avd)
+{
+ avd->allowed = 0;
+ avd->auditallow = 0;
+ avd->auditdeny = 0xffffffff;
+ if (policy)
+ avd->seqno = policy->latest_granting;
+ else
+ avd->seqno = 0;
+ avd->flags = 0;
+}
+
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ struct avtab_node *node)
+{
+ unsigned int i;
+
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+ if (xpermd->driver != node->datum.u.xperms->driver)
+ return;
+ } else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+ if (!security_xperm_test(node->datum.u.xperms->perms.p,
+ xpermd->driver))
+ return;
+ } else {
+ BUG();
+ }
+
+ if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+ xpermd->used |= XPERMS_ALLOWED;
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+ memset(xpermd->allowed->p, 0xff,
+ sizeof(xpermd->allowed->p));
+ }
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+ for (i = 0; i < ARRAY_SIZE(xpermd->allowed->p); i++)
+ xpermd->allowed->p[i] |=
+ node->datum.u.xperms->perms.p[i];
+ }
+ } else if (node->key.specified == AVTAB_XPERMS_AUDITALLOW) {
+ xpermd->used |= XPERMS_AUDITALLOW;
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+ memset(xpermd->auditallow->p, 0xff,
+ sizeof(xpermd->auditallow->p));
+ }
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+ for (i = 0; i < ARRAY_SIZE(xpermd->auditallow->p); i++)
+ xpermd->auditallow->p[i] |=
+ node->datum.u.xperms->perms.p[i];
+ }
+ } else if (node->key.specified == AVTAB_XPERMS_DONTAUDIT) {
+ xpermd->used |= XPERMS_DONTAUDIT;
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+ memset(xpermd->dontaudit->p, 0xff,
+ sizeof(xpermd->dontaudit->p));
+ }
+ if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+ for (i = 0; i < ARRAY_SIZE(xpermd->dontaudit->p); i++)
+ xpermd->dontaudit->p[i] |=
+ node->datum.u.xperms->perms.p[i];
+ }
+ } else {
+ BUG();
+ }
+}
+
+void security_compute_xperms_decision(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ u8 driver,
+ struct extended_perms_decision *xpermd)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ u16 tclass;
+ struct context *scontext, *tcontext;
+ struct avtab_key avkey;
+ struct avtab_node *node;
+ struct ebitmap *sattr, *tattr;
+ struct ebitmap_node *snode, *tnode;
+ unsigned int i, j;
+
+ xpermd->driver = driver;
+ xpermd->used = 0;
+ memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
+ memset(xpermd->auditallow->p, 0, sizeof(xpermd->auditallow->p));
+ memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
+
+ rcu_read_lock();
+ if (!selinux_initialized(state))
+ goto allow;
+
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
+ if (!scontext) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ tcontext = sidtab_search(sidtab, tsid);
+ if (!tcontext) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ tclass = unmap_class(&policy->map, orig_tclass);
+ if (unlikely(orig_tclass && !tclass)) {
+ if (policydb->allow_unknown)
+ goto allow;
+ goto out;
+ }
+
+
+ if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
+ pr_warn_ratelimited("SELinux: Invalid class %hu\n", tclass);
+ goto out;
+ }
+
+ avkey.target_class = tclass;
+ avkey.specified = AVTAB_XPERMS;
+ sattr = &policydb->type_attr_map_array[scontext->type - 1];
+ tattr = &policydb->type_attr_map_array[tcontext->type - 1];
+ ebitmap_for_each_positive_bit(sattr, snode, i) {
+ ebitmap_for_each_positive_bit(tattr, tnode, j) {
+ avkey.source_type = i + 1;
+ avkey.target_type = j + 1;
+ for (node = avtab_search_node(&policydb->te_avtab,
+ &avkey);
+ node;
+ node = avtab_search_node_next(node, avkey.specified))
+ services_compute_xperms_decision(xpermd, node);
+
+ cond_compute_xperms(&policydb->te_cond_avtab,
+ &avkey, xpermd);
+ }
+ }
+out:
+ rcu_read_unlock();
+ return;
+allow:
+ memset(xpermd->allowed->p, 0xff, sizeof(xpermd->allowed->p));
+ goto out;
+}
+
+/**
+ * security_compute_av - Compute access vector decisions.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @orig_tclass: target security class
+ * @avd: access vector decisions
+ * @xperms: extended permissions
+ *
+ * Compute a set of access vector decisions based on the
+ * SID pair (@ssid, @tsid) for the permissions in @tclass.
+ */
+void security_compute_av(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ struct av_decision *avd,
+ struct extended_perms *xperms)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ u16 tclass;
+ struct context *scontext = NULL, *tcontext = NULL;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ avd_init(policy, avd);
+ xperms->len = 0;
+ if (!selinux_initialized(state))
+ goto allow;
+
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
+ if (!scontext) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb->permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(sidtab, tsid);
+ if (!tcontext) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ tclass = unmap_class(&policy->map, orig_tclass);
+ if (unlikely(orig_tclass && !tclass)) {
+ if (policydb->allow_unknown)
+ goto allow;
+ goto out;
+ }
+ context_struct_compute_av(policydb, scontext, tcontext, tclass, avd,
+ xperms);
+ map_decision(&policy->map, orig_tclass, avd,
+ policydb->allow_unknown);
+out:
+ rcu_read_unlock();
+ return;
+allow:
+ avd->allowed = 0xffffffff;
+ goto out;
+}
+
+void security_compute_av_user(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 tclass,
+ struct av_decision *avd)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct context *scontext = NULL, *tcontext = NULL;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ avd_init(policy, avd);
+ if (!selinux_initialized(state))
+ goto allow;
+
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
+ if (!scontext) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb->permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(sidtab, tsid);
+ if (!tcontext) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ if (unlikely(!tclass)) {
+ if (policydb->allow_unknown)
+ goto allow;
+ goto out;
+ }
+
+ context_struct_compute_av(policydb, scontext, tcontext, tclass, avd,
+ NULL);
+ out:
+ rcu_read_unlock();
+ return;
+allow:
+ avd->allowed = 0xffffffff;
+ goto out;
+}
+
+/*
+ * Write the security context string representation of
+ * the context structure `context' into a dynamically
+ * allocated string of the correct size. Set `*scontext'
+ * to point to this string and set `*scontext_len' to
+ * the length of the string.
+ */
+static int context_struct_to_string(struct policydb *p,
+ struct context *context,
+ char **scontext, u32 *scontext_len)
+{
+ char *scontextp;
+
+ if (scontext)
+ *scontext = NULL;
+ *scontext_len = 0;
+
+ if (context->len) {
+ *scontext_len = context->len;
+ if (scontext) {
+ *scontext = kstrdup(context->str, GFP_ATOMIC);
+ if (!(*scontext))
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ /* Compute the size of the context. */
+ *scontext_len += strlen(sym_name(p, SYM_USERS, context->user - 1)) + 1;
+ *scontext_len += strlen(sym_name(p, SYM_ROLES, context->role - 1)) + 1;
+ *scontext_len += strlen(sym_name(p, SYM_TYPES, context->type - 1)) + 1;
+ *scontext_len += mls_compute_context_len(p, context);
+
+ if (!scontext)
+ return 0;
+
+ /* Allocate space for the context; caller must free this space. */
+ scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
+ if (!scontextp)
+ return -ENOMEM;
+ *scontext = scontextp;
+
+ /*
+ * Copy the user name, role name and type name into the context.
+ */
+ scontextp += sprintf(scontextp, "%s:%s:%s",
+ sym_name(p, SYM_USERS, context->user - 1),
+ sym_name(p, SYM_ROLES, context->role - 1),
+ sym_name(p, SYM_TYPES, context->type - 1));
+
+ mls_sid_to_context(p, context, &scontextp);
+
+ *scontextp = 0;
+
+ return 0;
+}
+
+static int sidtab_entry_to_string(struct policydb *p,
+ struct sidtab *sidtab,
+ struct sidtab_entry *entry,
+ char **scontext, u32 *scontext_len)
+{
+ int rc = sidtab_sid2str_get(sidtab, entry, scontext, scontext_len);
+
+ if (rc != -ENOENT)
+ return rc;
+
+ rc = context_struct_to_string(p, &entry->context, scontext,
+ scontext_len);
+ if (!rc && scontext)
+ sidtab_sid2str_put(sidtab, entry, *scontext, *scontext_len);
+ return rc;
+}
+
+#include "initial_sid_to_string.h"
+
+int security_sidtab_hash_stats(struct selinux_state *state, char *page)
+{
+ struct selinux_policy *policy;
+ int rc;
+
+ if (!selinux_initialized(state)) {
+ pr_err("SELinux: %s: called before initial load_policy\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ rc = sidtab_hash_stats(policy->sidtab, page);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+const char *security_get_initial_sid_context(u32 sid)
+{
+ if (unlikely(sid > SECINITSID_NUM))
+ return NULL;
+ return initial_sid_to_string[sid];
+}
+
+static int security_sid_to_context_core(struct selinux_state *state,
+ u32 sid, char **scontext,
+ u32 *scontext_len, int force,
+ int only_invalid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct sidtab_entry *entry;
+ int rc = 0;
+
+ if (scontext)
+ *scontext = NULL;
+ *scontext_len = 0;
+
+ if (!selinux_initialized(state)) {
+ if (sid <= SECINITSID_NUM) {
+ char *scontextp;
+ const char *s = initial_sid_to_string[sid];
+
+ if (!s)
+ return -EINVAL;
+ *scontext_len = strlen(s) + 1;
+ if (!scontext)
+ return 0;
+ scontextp = kmemdup(s, *scontext_len, GFP_ATOMIC);
+ if (!scontextp)
+ return -ENOMEM;
+ *scontext = scontextp;
+ return 0;
+ }
+ pr_err("SELinux: %s: called before initial "
+ "load_policy on unknown SID %d\n", __func__, sid);
+ return -EINVAL;
+ }
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ if (force)
+ entry = sidtab_search_entry_force(sidtab, sid);
+ else
+ entry = sidtab_search_entry(sidtab, sid);
+ if (!entry) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, sid);
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ if (only_invalid && !entry->context.len)
+ goto out_unlock;
+
+ rc = sidtab_entry_to_string(policydb, sidtab, entry, scontext,
+ scontext_len);
+
+out_unlock:
+ rcu_read_unlock();
+ return rc;
+
+}
+
+/**
+ * security_sid_to_context - Obtain a context for a given SID.
+ * @state: SELinux state
+ * @sid: security identifier, SID
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ *
+ * Write the string representation of the context associated with @sid
+ * into a dynamically allocated string of the correct size. Set @scontext
+ * to point to this string and set @scontext_len to the length of the string.
+ */
+int security_sid_to_context(struct selinux_state *state,
+ u32 sid, char **scontext, u32 *scontext_len)
+{
+ return security_sid_to_context_core(state, sid, scontext,
+ scontext_len, 0, 0);
+}
+
+int security_sid_to_context_force(struct selinux_state *state, u32 sid,
+ char **scontext, u32 *scontext_len)
+{
+ return security_sid_to_context_core(state, sid, scontext,
+ scontext_len, 1, 0);
+}
+
+/**
+ * security_sid_to_context_inval - Obtain a context for a given SID if it
+ * is invalid.
+ * @state: SELinux state
+ * @sid: security identifier, SID
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ *
+ * Write the string representation of the context associated with @sid
+ * into a dynamically allocated string of the correct size, but only if the
+ * context is invalid in the current policy. Set @scontext to point to
+ * this string (or NULL if the context is valid) and set @scontext_len to
+ * the length of the string (or 0 if the context is valid).
+ */
+int security_sid_to_context_inval(struct selinux_state *state, u32 sid,
+ char **scontext, u32 *scontext_len)
+{
+ return security_sid_to_context_core(state, sid, scontext,
+ scontext_len, 1, 1);
+}
+
+/*
+ * Caveat: Mutates scontext.
+ */
+static int string_to_context_struct(struct policydb *pol,
+ struct sidtab *sidtabp,
+ char *scontext,
+ struct context *ctx,
+ u32 def_sid)
+{
+ struct role_datum *role;
+ struct type_datum *typdatum;
+ struct user_datum *usrdatum;
+ char *scontextp, *p, oldc;
+ int rc = 0;
+
+ context_init(ctx);
+
+ /* Parse the security context. */
+
+ rc = -EINVAL;
+ scontextp = scontext;
+
+ /* Extract the user. */
+ p = scontextp;
+ while (*p && *p != ':')
+ p++;
+
+ if (*p == 0)
+ goto out;
+
+ *p++ = 0;
+
+ usrdatum = symtab_search(&pol->p_users, scontextp);
+ if (!usrdatum)
+ goto out;
+
+ ctx->user = usrdatum->value;
+
+ /* Extract role. */
+ scontextp = p;
+ while (*p && *p != ':')
+ p++;
+
+ if (*p == 0)
+ goto out;
+
+ *p++ = 0;
+
+ role = symtab_search(&pol->p_roles, scontextp);
+ if (!role)
+ goto out;
+ ctx->role = role->value;
+
+ /* Extract type. */
+ scontextp = p;
+ while (*p && *p != ':')
+ p++;
+ oldc = *p;
+ *p++ = 0;
+
+ typdatum = symtab_search(&pol->p_types, scontextp);
+ if (!typdatum || typdatum->attribute)
+ goto out;
+
+ ctx->type = typdatum->value;
+
+ rc = mls_context_to_sid(pol, oldc, p, ctx, sidtabp, def_sid);
+ if (rc)
+ goto out;
+
+ /* Check the validity of the new context. */
+ rc = -EINVAL;
+ if (!policydb_context_isvalid(pol, ctx))
+ goto out;
+ rc = 0;
+out:
+ if (rc)
+ context_destroy(ctx);
+ return rc;
+}
+
+static int security_context_to_sid_core(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
+ u32 *sid, u32 def_sid, gfp_t gfp_flags,
+ int force)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ char *scontext2, *str = NULL;
+ struct context context;
+ int rc = 0;
+
+ /* An empty security context is never valid. */
+ if (!scontext_len)
+ return -EINVAL;
+
+ /* Copy the string to allow changes and ensure a NUL terminator */
+ scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
+ if (!scontext2)
+ return -ENOMEM;
+
+ if (!selinux_initialized(state)) {
+ int i;
+
+ for (i = 1; i < SECINITSID_NUM; i++) {
+ const char *s = initial_sid_to_string[i];
+
+ if (s && !strcmp(s, scontext2)) {
+ *sid = i;
+ goto out;
+ }
+ }
+ *sid = SECINITSID_KERNEL;
+ goto out;
+ }
+ *sid = SECSID_NULL;
+
+ if (force) {
+ /* Save another copy for storing in uninterpreted form */
+ rc = -ENOMEM;
+ str = kstrdup(scontext2, gfp_flags);
+ if (!str)
+ goto out;
+ }
+retry:
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+ rc = string_to_context_struct(policydb, sidtab, scontext2,
+ &context, def_sid);
+ if (rc == -EINVAL && force) {
+ context.str = str;
+ context.len = strlen(str) + 1;
+ str = NULL;
+ } else if (rc)
+ goto out_unlock;
+ rc = sidtab_context_to_sid(sidtab, &context, sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ if (context.str) {
+ str = context.str;
+ context.str = NULL;
+ }
+ context_destroy(&context);
+ goto retry;
+ }
+ context_destroy(&context);
+out_unlock:
+ rcu_read_unlock();
+out:
+ kfree(scontext2);
+ kfree(str);
+ return rc;
+}
+
+/**
+ * security_context_to_sid - Obtain a SID for a given security context.
+ * @state: SELinux state
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ * @sid: security identifier, SID
+ * @gfp: context for the allocation
+ *
+ * Obtains a SID associated with the security context that
+ * has the string representation specified by @scontext.
+ * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
+ * memory is available, or 0 on success.
+ */
+int security_context_to_sid(struct selinux_state *state,
+ const char *scontext, u32 scontext_len, u32 *sid,
+ gfp_t gfp)
+{
+ return security_context_to_sid_core(state, scontext, scontext_len,
+ sid, SECSID_NULL, gfp, 0);
+}
+
+int security_context_str_to_sid(struct selinux_state *state,
+ const char *scontext, u32 *sid, gfp_t gfp)
+{
+ return security_context_to_sid(state, scontext, strlen(scontext),
+ sid, gfp);
+}
+
+/**
+ * security_context_to_sid_default - Obtain a SID for a given security context,
+ * falling back to specified default if needed.
+ *
+ * @state: SELinux state
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ * @sid: security identifier, SID
+ * @def_sid: default SID to assign on error
+ * @gfp_flags: the allocator get-free-page (GFP) flags
+ *
+ * Obtains a SID associated with the security context that
+ * has the string representation specified by @scontext.
+ * The default SID is passed to the MLS layer to be used to allow
+ * kernel labeling of the MLS field if the MLS field is not present
+ * (for upgrading to MLS without full relabel).
+ * Implicitly forces adding of the context even if it cannot be mapped yet.
+ * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
+ * memory is available, or 0 on success.
+ */
+int security_context_to_sid_default(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
+ u32 *sid, u32 def_sid, gfp_t gfp_flags)
+{
+ return security_context_to_sid_core(state, scontext, scontext_len,
+ sid, def_sid, gfp_flags, 1);
+}
+
+int security_context_to_sid_force(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
+ u32 *sid)
+{
+ return security_context_to_sid_core(state, scontext, scontext_len,
+ sid, SECSID_NULL, GFP_KERNEL, 1);
+}
+
+static int compute_sid_handle_invalid_context(
+ struct selinux_state *state,
+ struct selinux_policy *policy,
+ struct sidtab_entry *sentry,
+ struct sidtab_entry *tentry,
+ u16 tclass,
+ struct context *newcontext)
+{
+ struct policydb *policydb = &policy->policydb;
+ struct sidtab *sidtab = policy->sidtab;
+ char *s = NULL, *t = NULL, *n = NULL;
+ u32 slen, tlen, nlen;
+ struct audit_buffer *ab;
+
+ if (sidtab_entry_to_string(policydb, sidtab, sentry, &s, &slen))
+ goto out;
+ if (sidtab_entry_to_string(policydb, sidtab, tentry, &t, &tlen))
+ goto out;
+ if (context_struct_to_string(policydb, newcontext, &n, &nlen))
+ goto out;
+ ab = audit_log_start(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ if (!ab)
+ goto out;
+ audit_log_format(ab,
+ "op=security_compute_sid invalid_context=");
+ /* no need to record the NUL with untrusted strings */
+ audit_log_n_untrustedstring(ab, n, nlen - 1);
+ audit_log_format(ab, " scontext=%s tcontext=%s tclass=%s",
+ s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
+ audit_log_end(ab);
+out:
+ kfree(s);
+ kfree(t);
+ kfree(n);
+ if (!enforcing_enabled(state))
+ return 0;
+ return -EACCES;
+}
+
+static void filename_compute_type(struct policydb *policydb,
+ struct context *newcontext,
+ u32 stype, u32 ttype, u16 tclass,
+ const char *objname)
+{
+ struct filename_trans_key ft;
+ struct filename_trans_datum *datum;
+
+ /*
+ * Most filename trans rules are going to live in specific directories
+ * like /dev or /var/run. This bitmap will quickly skip rule searches
+ * if the ttype does not contain any rules.
+ */
+ if (!ebitmap_get_bit(&policydb->filename_trans_ttypes, ttype))
+ return;
+
+ ft.ttype = ttype;
+ ft.tclass = tclass;
+ ft.name = objname;
+
+ datum = policydb_filenametr_search(policydb, &ft);
+ while (datum) {
+ if (ebitmap_get_bit(&datum->stypes, stype - 1)) {
+ newcontext->type = datum->otype;
+ return;
+ }
+ datum = datum->next;
+ }
+}
+
+static int security_compute_sid(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ u32 specified,
+ const char *objname,
+ u32 *out_sid,
+ bool kern)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct class_datum *cladatum;
+ struct context *scontext, *tcontext, newcontext;
+ struct sidtab_entry *sentry, *tentry;
+ struct avtab_key avkey;
+ struct avtab_datum *avdatum;
+ struct avtab_node *node;
+ u16 tclass;
+ int rc = 0;
+ bool sock;
+
+ if (!selinux_initialized(state)) {
+ switch (orig_tclass) {
+ case SECCLASS_PROCESS: /* kernel value */
+ *out_sid = ssid;
+ break;
+ default:
+ *out_sid = tsid;
+ break;
+ }
+ goto out;
+ }
+
+retry:
+ cladatum = NULL;
+ context_init(&newcontext);
+
+ rcu_read_lock();
+
+ policy = rcu_dereference(state->policy);
+
+ if (kern) {
+ tclass = unmap_class(&policy->map, orig_tclass);
+ sock = security_is_socket_class(orig_tclass);
+ } else {
+ tclass = orig_tclass;
+ sock = security_is_socket_class(map_class(&policy->map,
+ tclass));
+ }
+
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ sentry = sidtab_search_entry(sidtab, ssid);
+ if (!sentry) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ tentry = sidtab_search_entry(sidtab, tsid);
+ if (!tentry) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ scontext = &sentry->context;
+ tcontext = &tentry->context;
+
+ if (tclass && tclass <= policydb->p_classes.nprim)
+ cladatum = policydb->class_val_to_struct[tclass - 1];
+
+ /* Set the user identity. */
+ switch (specified) {
+ case AVTAB_TRANSITION:
+ case AVTAB_CHANGE:
+ if (cladatum && cladatum->default_user == DEFAULT_TARGET) {
+ newcontext.user = tcontext->user;
+ } else {
+ /* notice this gets both DEFAULT_SOURCE and unset */
+ /* Use the process user identity. */
+ newcontext.user = scontext->user;
+ }
+ break;
+ case AVTAB_MEMBER:
+ /* Use the related object owner. */
+ newcontext.user = tcontext->user;
+ break;
+ }
+
+ /* Set the role to default values. */
+ if (cladatum && cladatum->default_role == DEFAULT_SOURCE) {
+ newcontext.role = scontext->role;
+ } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
+ newcontext.role = tcontext->role;
+ } else {
+ if ((tclass == policydb->process_class) || sock)
+ newcontext.role = scontext->role;
+ else
+ newcontext.role = OBJECT_R_VAL;
+ }
+
+ /* Set the type to default values. */
+ if (cladatum && cladatum->default_type == DEFAULT_SOURCE) {
+ newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
+ newcontext.type = tcontext->type;
+ } else {
+ if ((tclass == policydb->process_class) || sock) {
+ /* Use the type of process. */
+ newcontext.type = scontext->type;
+ } else {
+ /* Use the type of the related object. */
+ newcontext.type = tcontext->type;
+ }
+ }
+
+ /* Look for a type transition/member/change rule. */
+ avkey.source_type = scontext->type;
+ avkey.target_type = tcontext->type;
+ avkey.target_class = tclass;
+ avkey.specified = specified;
+ avdatum = avtab_search(&policydb->te_avtab, &avkey);
+
+ /* If no permanent rule, also check for enabled conditional rules */
+ if (!avdatum) {
+ node = avtab_search_node(&policydb->te_cond_avtab, &avkey);
+ for (; node; node = avtab_search_node_next(node, specified)) {
+ if (node->key.specified & AVTAB_ENABLED) {
+ avdatum = &node->datum;
+ break;
+ }
+ }
+ }
+
+ if (avdatum) {
+ /* Use the type from the type transition/member/change rule. */
+ newcontext.type = avdatum->u.data;
+ }
+
+ /* if we have a objname this is a file trans check so check those rules */
+ if (objname)
+ filename_compute_type(policydb, &newcontext, scontext->type,
+ tcontext->type, tclass, objname);
+
+ /* Check for class-specific changes. */
+ if (specified & AVTAB_TRANSITION) {
+ /* Look for a role transition rule. */
+ struct role_trans_datum *rtd;
+ struct role_trans_key rtk = {
+ .role = scontext->role,
+ .type = tcontext->type,
+ .tclass = tclass,
+ };
+
+ rtd = policydb_roletr_search(policydb, &rtk);
+ if (rtd)
+ newcontext.role = rtd->new_role;
+ }
+
+ /* Set the MLS attributes.
+ This is done last because it may allocate memory. */
+ rc = mls_compute_sid(policydb, scontext, tcontext, tclass, specified,
+ &newcontext, sock);
+ if (rc)
+ goto out_unlock;
+
+ /* Check the validity of the context. */
+ if (!policydb_context_isvalid(policydb, &newcontext)) {
+ rc = compute_sid_handle_invalid_context(state, policy, sentry,
+ tentry, tclass,
+ &newcontext);
+ if (rc)
+ goto out_unlock;
+ }
+ /* Obtain the sid for the context. */
+ rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcontext);
+ goto retry;
+ }
+out_unlock:
+ rcu_read_unlock();
+ context_destroy(&newcontext);
+out:
+ return rc;
+}
+
+/**
+ * security_transition_sid - Compute the SID for a new subject/object.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @qstr: object name
+ * @out_sid: security identifier for new subject/object
+ *
+ * Compute a SID to use for labeling a new subject or object in the
+ * class @tclass based on a SID pair (@ssid, @tsid).
+ * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
+ * if insufficient memory is available, or %0 if the new SID was
+ * computed successfully.
+ */
+int security_transition_sid(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ const struct qstr *qstr, u32 *out_sid)
+{
+ return security_compute_sid(state, ssid, tsid, tclass,
+ AVTAB_TRANSITION,
+ qstr ? qstr->name : NULL, out_sid, true);
+}
+
+int security_transition_sid_user(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ const char *objname, u32 *out_sid)
+{
+ return security_compute_sid(state, ssid, tsid, tclass,
+ AVTAB_TRANSITION,
+ objname, out_sid, false);
+}
+
+/**
+ * security_member_sid - Compute the SID for member selection.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @out_sid: security identifier for selected member
+ *
+ * Compute a SID to use when selecting a member of a polyinstantiated
+ * object of class @tclass based on a SID pair (@ssid, @tsid).
+ * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
+ * if insufficient memory is available, or %0 if the SID was
+ * computed successfully.
+ */
+int security_member_sid(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 tclass,
+ u32 *out_sid)
+{
+ return security_compute_sid(state, ssid, tsid, tclass,
+ AVTAB_MEMBER, NULL,
+ out_sid, false);
+}
+
+/**
+ * security_change_sid - Compute the SID for object relabeling.
+ * @state: SELinux state
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @out_sid: security identifier for selected member
+ *
+ * Compute a SID to use for relabeling an object of class @tclass
+ * based on a SID pair (@ssid, @tsid).
+ * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
+ * if insufficient memory is available, or %0 if the SID was
+ * computed successfully.
+ */
+int security_change_sid(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 tclass,
+ u32 *out_sid)
+{
+ return security_compute_sid(state,
+ ssid, tsid, tclass, AVTAB_CHANGE, NULL,
+ out_sid, false);
+}
+
+static inline int convert_context_handle_invalid_context(
+ struct selinux_state *state,
+ struct policydb *policydb,
+ struct context *context)
+{
+ char *s;
+ u32 len;
+
+ if (enforcing_enabled(state))
+ return -EINVAL;
+
+ if (!context_struct_to_string(policydb, context, &s, &len)) {
+ pr_warn("SELinux: Context %s would be invalid if enforcing\n",
+ s);
+ kfree(s);
+ }
+ return 0;
+}
+
+/*
+ * Convert the values in the security context
+ * structure `oldc' from the values specified
+ * in the policy `p->oldp' to the values specified
+ * in the policy `p->newp', storing the new context
+ * in `newc'. Verify that the context is valid
+ * under the new policy.
+ */
+static int convert_context(struct context *oldc, struct context *newc, void *p,
+ gfp_t gfp_flags)
+{
+ struct convert_context_args *args;
+ struct ocontext *oc;
+ struct role_datum *role;
+ struct type_datum *typdatum;
+ struct user_datum *usrdatum;
+ char *s;
+ u32 len;
+ int rc;
+
+ args = p;
+
+ if (oldc->str) {
+ s = kstrdup(oldc->str, gfp_flags);
+ if (!s)
+ return -ENOMEM;
+
+ rc = string_to_context_struct(args->newp, NULL, s,
+ newc, SECSID_NULL);
+ if (rc == -EINVAL) {
+ /*
+ * Retain string representation for later mapping.
+ *
+ * IMPORTANT: We need to copy the contents of oldc->str
+ * back into s again because string_to_context_struct()
+ * may have garbled it.
+ */
+ memcpy(s, oldc->str, oldc->len);
+ context_init(newc);
+ newc->str = s;
+ newc->len = oldc->len;
+ return 0;
+ }
+ kfree(s);
+ if (rc) {
+ /* Other error condition, e.g. ENOMEM. */
+ pr_err("SELinux: Unable to map context %s, rc = %d.\n",
+ oldc->str, -rc);
+ return rc;
+ }
+ pr_info("SELinux: Context %s became valid (mapped).\n",
+ oldc->str);
+ return 0;
+ }
+
+ context_init(newc);
+
+ /* Convert the user. */
+ usrdatum = symtab_search(&args->newp->p_users,
+ sym_name(args->oldp,
+ SYM_USERS, oldc->user - 1));
+ if (!usrdatum)
+ goto bad;
+ newc->user = usrdatum->value;
+
+ /* Convert the role. */
+ role = symtab_search(&args->newp->p_roles,
+ sym_name(args->oldp, SYM_ROLES, oldc->role - 1));
+ if (!role)
+ goto bad;
+ newc->role = role->value;
+
+ /* Convert the type. */
+ typdatum = symtab_search(&args->newp->p_types,
+ sym_name(args->oldp,
+ SYM_TYPES, oldc->type - 1));
+ if (!typdatum)
+ goto bad;
+ newc->type = typdatum->value;
+
+ /* Convert the MLS fields if dealing with MLS policies */
+ if (args->oldp->mls_enabled && args->newp->mls_enabled) {
+ rc = mls_convert_context(args->oldp, args->newp, oldc, newc);
+ if (rc)
+ goto bad;
+ } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
+ /*
+ * Switching between non-MLS and MLS policy:
+ * ensure that the MLS fields of the context for all
+ * existing entries in the sidtab are filled in with a
+ * suitable default value, likely taken from one of the
+ * initial SIDs.
+ */
+ oc = args->newp->ocontexts[OCON_ISID];
+ while (oc && oc->sid[0] != SECINITSID_UNLABELED)
+ oc = oc->next;
+ if (!oc) {
+ pr_err("SELinux: unable to look up"
+ " the initial SIDs list\n");
+ goto bad;
+ }
+ rc = mls_range_set(newc, &oc->context[0].range);
+ if (rc)
+ goto bad;
+ }
+
+ /* Check the validity of the new context. */
+ if (!policydb_context_isvalid(args->newp, newc)) {
+ rc = convert_context_handle_invalid_context(args->state,
+ args->oldp,
+ oldc);
+ if (rc)
+ goto bad;
+ }
+
+ return 0;
+bad:
+ /* Map old representation to string and save it. */
+ rc = context_struct_to_string(args->oldp, oldc, &s, &len);
+ if (rc)
+ return rc;
+ context_destroy(newc);
+ newc->str = s;
+ newc->len = len;
+ pr_info("SELinux: Context %s became invalid (unmapped).\n",
+ newc->str);
+ return 0;
+}
+
+static void security_load_policycaps(struct selinux_state *state,
+ struct selinux_policy *policy)
+{
+ struct policydb *p;
+ unsigned int i;
+ struct ebitmap_node *node;
+
+ p = &policy->policydb;
+
+ for (i = 0; i < ARRAY_SIZE(state->policycap); i++)
+ WRITE_ONCE(state->policycap[i],
+ ebitmap_get_bit(&p->policycaps, i));
+
+ for (i = 0; i < ARRAY_SIZE(selinux_policycap_names); i++)
+ pr_info("SELinux: policy capability %s=%d\n",
+ selinux_policycap_names[i],
+ ebitmap_get_bit(&p->policycaps, i));
+
+ ebitmap_for_each_positive_bit(&p->policycaps, node, i) {
+ if (i >= ARRAY_SIZE(selinux_policycap_names))
+ pr_info("SELinux: unknown policy capability %u\n",
+ i);
+ }
+}
+
+static int security_preserve_bools(struct selinux_policy *oldpolicy,
+ struct selinux_policy *newpolicy);
+
+static void selinux_policy_free(struct selinux_policy *policy)
+{
+ if (!policy)
+ return;
+
+ sidtab_destroy(policy->sidtab);
+ kfree(policy->map.mapping);
+ policydb_destroy(&policy->policydb);
+ kfree(policy->sidtab);
+ kfree(policy);
+}
+
+static void selinux_policy_cond_free(struct selinux_policy *policy)
+{
+ cond_policydb_destroy_dup(&policy->policydb);
+ kfree(policy);
+}
+
+void selinux_policy_cancel(struct selinux_state *state,
+ struct selinux_load_state *load_state)
+{
+ struct selinux_policy *oldpolicy;
+
+ oldpolicy = rcu_dereference_protected(state->policy,
+ lockdep_is_held(&state->policy_mutex));
+
+ sidtab_cancel_convert(oldpolicy->sidtab);
+ selinux_policy_free(load_state->policy);
+ kfree(load_state->convert_data);
+}
+
+static void selinux_notify_policy_change(struct selinux_state *state,
+ u32 seqno)
+{
+ /* Flush external caches and notify userspace of policy load */
+ avc_ss_reset(state->avc, seqno);
+ selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(state, seqno);
+ selinux_netlbl_cache_invalidate();
+ selinux_xfrm_notify_policyload();
+ selinux_ima_measure_state_locked(state);
+}
+
+void selinux_policy_commit(struct selinux_state *state,
+ struct selinux_load_state *load_state)
+{
+ struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+ unsigned long flags;
+ u32 seqno;
+
+ oldpolicy = rcu_dereference_protected(state->policy,
+ lockdep_is_held(&state->policy_mutex));
+
+ /* If switching between different policy types, log MLS status */
+ if (oldpolicy) {
+ if (oldpolicy->policydb.mls_enabled && !newpolicy->policydb.mls_enabled)
+ pr_info("SELinux: Disabling MLS support...\n");
+ else if (!oldpolicy->policydb.mls_enabled && newpolicy->policydb.mls_enabled)
+ pr_info("SELinux: Enabling MLS support...\n");
+ }
+
+ /* Set latest granting seqno for new policy. */
+ if (oldpolicy)
+ newpolicy->latest_granting = oldpolicy->latest_granting + 1;
+ else
+ newpolicy->latest_granting = 1;
+ seqno = newpolicy->latest_granting;
+
+ /* Install the new policy. */
+ if (oldpolicy) {
+ sidtab_freeze_begin(oldpolicy->sidtab, &flags);
+ rcu_assign_pointer(state->policy, newpolicy);
+ sidtab_freeze_end(oldpolicy->sidtab, &flags);
+ } else {
+ rcu_assign_pointer(state->policy, newpolicy);
+ }
+
+ /* Load the policycaps from the new policy */
+ security_load_policycaps(state, newpolicy);
+
+ if (!selinux_initialized(state)) {
+ /*
+ * After first policy load, the security server is
+ * marked as initialized and ready to handle requests and
+ * any objects created prior to policy load are then labeled.
+ */
+ selinux_mark_initialized(state);
+ selinux_complete_init();
+ }
+
+ /* Free the old policy */
+ synchronize_rcu();
+ selinux_policy_free(oldpolicy);
+ kfree(load_state->convert_data);
+
+ /* Notify others of the policy change */
+ selinux_notify_policy_change(state, seqno);
+}
+
+/**
+ * security_load_policy - Load a security policy configuration.
+ * @state: SELinux state
+ * @data: binary policy data
+ * @len: length of data in bytes
+ * @load_state: policy load state
+ *
+ * Load a new set of security policy configuration data,
+ * validate it and convert the SID table as necessary.
+ * This function will flush the access vector cache after
+ * loading the new policy.
+ */
+int security_load_policy(struct selinux_state *state, void *data, size_t len,
+ struct selinux_load_state *load_state)
+{
+ struct selinux_policy *newpolicy, *oldpolicy;
+ struct selinux_policy_convert_data *convert_data;
+ int rc = 0;
+ struct policy_file file = { data, len }, *fp = &file;
+
+ newpolicy = kzalloc(sizeof(*newpolicy), GFP_KERNEL);
+ if (!newpolicy)
+ return -ENOMEM;
+
+ newpolicy->sidtab = kzalloc(sizeof(*newpolicy->sidtab), GFP_KERNEL);
+ if (!newpolicy->sidtab) {
+ rc = -ENOMEM;
+ goto err_policy;
+ }
+
+ rc = policydb_read(&newpolicy->policydb, fp);
+ if (rc)
+ goto err_sidtab;
+
+ newpolicy->policydb.len = len;
+ rc = selinux_set_mapping(&newpolicy->policydb, secclass_map,
+ &newpolicy->map);
+ if (rc)
+ goto err_policydb;
+
+ rc = policydb_load_isids(&newpolicy->policydb, newpolicy->sidtab);
+ if (rc) {
+ pr_err("SELinux: unable to load the initial SIDs\n");
+ goto err_mapping;
+ }
+
+ if (!selinux_initialized(state)) {
+ /* First policy load, so no need to preserve state from old policy */
+ load_state->policy = newpolicy;
+ load_state->convert_data = NULL;
+ return 0;
+ }
+
+ oldpolicy = rcu_dereference_protected(state->policy,
+ lockdep_is_held(&state->policy_mutex));
+
+ /* Preserve active boolean values from the old policy */
+ rc = security_preserve_bools(oldpolicy, newpolicy);
+ if (rc) {
+ pr_err("SELinux: unable to preserve booleans\n");
+ goto err_free_isids;
+ }
+
+ convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL);
+ if (!convert_data) {
+ rc = -ENOMEM;
+ goto err_free_isids;
+ }
+
+ /*
+ * Convert the internal representations of contexts
+ * in the new SID table.
+ */
+ convert_data->args.state = state;
+ convert_data->args.oldp = &oldpolicy->policydb;
+ convert_data->args.newp = &newpolicy->policydb;
+
+ convert_data->sidtab_params.func = convert_context;
+ convert_data->sidtab_params.args = &convert_data->args;
+ convert_data->sidtab_params.target = newpolicy->sidtab;
+
+ rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params);
+ if (rc) {
+ pr_err("SELinux: unable to convert the internal"
+ " representation of contexts in the new SID"
+ " table\n");
+ goto err_free_convert_data;
+ }
+
+ load_state->policy = newpolicy;
+ load_state->convert_data = convert_data;
+ return 0;
+
+err_free_convert_data:
+ kfree(convert_data);
+err_free_isids:
+ sidtab_destroy(newpolicy->sidtab);
+err_mapping:
+ kfree(newpolicy->map.mapping);
+err_policydb:
+ policydb_destroy(&newpolicy->policydb);
+err_sidtab:
+ kfree(newpolicy->sidtab);
+err_policy:
+ kfree(newpolicy);
+
+ return rc;
+}
+
+/**
+ * ocontext_to_sid - Helper to safely get sid for an ocontext
+ * @sidtab: SID table
+ * @c: ocontext structure
+ * @index: index of the context entry (0 or 1)
+ * @out_sid: pointer to the resulting SID value
+ *
+ * For all ocontexts except OCON_ISID the SID fields are populated
+ * on-demand when needed. Since updating the SID value is an SMP-sensitive
+ * operation, this helper must be used to do that safely.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
+ */
+static int ocontext_to_sid(struct sidtab *sidtab, struct ocontext *c,
+ size_t index, u32 *out_sid)
+{
+ int rc;
+ u32 sid;
+
+ /* Ensure the associated sidtab entry is visible to this thread. */
+ sid = smp_load_acquire(&c->sid[index]);
+ if (!sid) {
+ rc = sidtab_context_to_sid(sidtab, &c->context[index], &sid);
+ if (rc)
+ return rc;
+
+ /*
+ * Ensure the new sidtab entry is visible to other threads
+ * when they see the SID.
+ */
+ smp_store_release(&c->sid[index], sid);
+ }
+ *out_sid = sid;
+ return 0;
+}
+
+/**
+ * security_port_sid - Obtain the SID for a port.
+ * @state: SELinux state
+ * @protocol: protocol number
+ * @port: port number
+ * @out_sid: security identifier
+ */
+int security_port_sid(struct selinux_state *state,
+ u8 protocol, u16 port, u32 *out_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct ocontext *c;
+ int rc;
+
+ if (!selinux_initialized(state)) {
+ *out_sid = SECINITSID_PORT;
+ return 0;
+ }
+
+retry:
+ rc = 0;
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ c = policydb->ocontexts[OCON_PORT];
+ while (c) {
+ if (c->u.port.protocol == protocol &&
+ c->u.port.low_port <= port &&
+ c->u.port.high_port >= port)
+ break;
+ c = c->next;
+ }
+
+ if (c) {
+ rc = ocontext_to_sid(sidtab, c, 0, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+ } else {
+ *out_sid = SECINITSID_PORT;
+ }
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+/**
+ * security_ib_pkey_sid - Obtain the SID for a pkey.
+ * @state: SELinux state
+ * @subnet_prefix: Subnet Prefix
+ * @pkey_num: pkey number
+ * @out_sid: security identifier
+ */
+int security_ib_pkey_sid(struct selinux_state *state,
+ u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct ocontext *c;
+ int rc;
+
+ if (!selinux_initialized(state)) {
+ *out_sid = SECINITSID_UNLABELED;
+ return 0;
+ }
+
+retry:
+ rc = 0;
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ c = policydb->ocontexts[OCON_IBPKEY];
+ while (c) {
+ if (c->u.ibpkey.low_pkey <= pkey_num &&
+ c->u.ibpkey.high_pkey >= pkey_num &&
+ c->u.ibpkey.subnet_prefix == subnet_prefix)
+ break;
+
+ c = c->next;
+ }
+
+ if (c) {
+ rc = ocontext_to_sid(sidtab, c, 0, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+ } else
+ *out_sid = SECINITSID_UNLABELED;
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+/**
+ * security_ib_endport_sid - Obtain the SID for a subnet management interface.
+ * @state: SELinux state
+ * @dev_name: device name
+ * @port_num: port number
+ * @out_sid: security identifier
+ */
+int security_ib_endport_sid(struct selinux_state *state,
+ const char *dev_name, u8 port_num, u32 *out_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct ocontext *c;
+ int rc;
+
+ if (!selinux_initialized(state)) {
+ *out_sid = SECINITSID_UNLABELED;
+ return 0;
+ }
+
+retry:
+ rc = 0;
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ c = policydb->ocontexts[OCON_IBENDPORT];
+ while (c) {
+ if (c->u.ibendport.port == port_num &&
+ !strncmp(c->u.ibendport.dev_name,
+ dev_name,
+ IB_DEVICE_NAME_MAX))
+ break;
+
+ c = c->next;
+ }
+
+ if (c) {
+ rc = ocontext_to_sid(sidtab, c, 0, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+ } else
+ *out_sid = SECINITSID_UNLABELED;
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+/**
+ * security_netif_sid - Obtain the SID for a network interface.
+ * @state: SELinux state
+ * @name: interface name
+ * @if_sid: interface SID
+ */
+int security_netif_sid(struct selinux_state *state,
+ char *name, u32 *if_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ int rc;
+ struct ocontext *c;
+
+ if (!selinux_initialized(state)) {
+ *if_sid = SECINITSID_NETIF;
+ return 0;
+ }
+
+retry:
+ rc = 0;
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ c = policydb->ocontexts[OCON_NETIF];
+ while (c) {
+ if (strcmp(name, c->u.name) == 0)
+ break;
+ c = c->next;
+ }
+
+ if (c) {
+ rc = ocontext_to_sid(sidtab, c, 0, if_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+ } else
+ *if_sid = SECINITSID_NETIF;
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
+{
+ int i, fail = 0;
+
+ for (i = 0; i < 4; i++)
+ if (addr[i] != (input[i] & mask[i])) {
+ fail = 1;
+ break;
+ }
+
+ return !fail;
+}
+
+/**
+ * security_node_sid - Obtain the SID for a node (host).
+ * @state: SELinux state
+ * @domain: communication domain aka address family
+ * @addrp: address
+ * @addrlen: address length in bytes
+ * @out_sid: security identifier
+ */
+int security_node_sid(struct selinux_state *state,
+ u16 domain,
+ void *addrp,
+ u32 addrlen,
+ u32 *out_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ int rc;
+ struct ocontext *c;
+
+ if (!selinux_initialized(state)) {
+ *out_sid = SECINITSID_NODE;
+ return 0;
+ }
+
+retry:
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ switch (domain) {
+ case AF_INET: {
+ u32 addr;
+
+ rc = -EINVAL;
+ if (addrlen != sizeof(u32))
+ goto out;
+
+ addr = *((u32 *)addrp);
+
+ c = policydb->ocontexts[OCON_NODE];
+ while (c) {
+ if (c->u.node.addr == (addr & c->u.node.mask))
+ break;
+ c = c->next;
+ }
+ break;
+ }
+
+ case AF_INET6:
+ rc = -EINVAL;
+ if (addrlen != sizeof(u64) * 2)
+ goto out;
+ c = policydb->ocontexts[OCON_NODE6];
+ while (c) {
+ if (match_ipv6_addrmask(addrp, c->u.node6.addr,
+ c->u.node6.mask))
+ break;
+ c = c->next;
+ }
+ break;
+
+ default:
+ rc = 0;
+ *out_sid = SECINITSID_NODE;
+ goto out;
+ }
+
+ if (c) {
+ rc = ocontext_to_sid(sidtab, c, 0, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+ } else {
+ *out_sid = SECINITSID_NODE;
+ }
+
+ rc = 0;
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+#define SIDS_NEL 25
+
+/**
+ * security_get_user_sids - Obtain reachable SIDs for a user.
+ * @state: SELinux state
+ * @fromsid: starting SID
+ * @username: username
+ * @sids: array of reachable SIDs for user
+ * @nel: number of elements in @sids
+ *
+ * Generate the set of SIDs for legal security contexts
+ * for a given user that can be reached by @fromsid.
+ * Set *@sids to point to a dynamically allocated
+ * array containing the set of SIDs. Set *@nel to the
+ * number of elements in the array.
+ */
+
+int security_get_user_sids(struct selinux_state *state,
+ u32 fromsid,
+ char *username,
+ u32 **sids,
+ u32 *nel)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct context *fromcon, usercon;
+ u32 *mysids = NULL, *mysids2, sid;
+ u32 i, j, mynel, maxnel = SIDS_NEL;
+ struct user_datum *user;
+ struct role_datum *role;
+ struct ebitmap_node *rnode, *tnode;
+ int rc;
+
+ *sids = NULL;
+ *nel = 0;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
+ if (!mysids)
+ return -ENOMEM;
+
+retry:
+ mynel = 0;
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ context_init(&usercon);
+
+ rc = -EINVAL;
+ fromcon = sidtab_search(sidtab, fromsid);
+ if (!fromcon)
+ goto out_unlock;
+
+ rc = -EINVAL;
+ user = symtab_search(&policydb->p_users, username);
+ if (!user)
+ goto out_unlock;
+
+ usercon.user = user->value;
+
+ ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
+ role = policydb->role_val_to_struct[i];
+ usercon.role = i + 1;
+ ebitmap_for_each_positive_bit(&role->types, tnode, j) {
+ usercon.type = j + 1;
+
+ if (mls_setup_user_range(policydb, fromcon, user,
+ &usercon))
+ continue;
+
+ rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out_unlock;
+ if (mynel < maxnel) {
+ mysids[mynel++] = sid;
+ } else {
+ rc = -ENOMEM;
+ maxnel += SIDS_NEL;
+ mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
+ if (!mysids2)
+ goto out_unlock;
+ memcpy(mysids2, mysids, mynel * sizeof(*mysids2));
+ kfree(mysids);
+ mysids = mysids2;
+ mysids[mynel++] = sid;
+ }
+ }
+ }
+ rc = 0;
+out_unlock:
+ rcu_read_unlock();
+ if (rc || !mynel) {
+ kfree(mysids);
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
+ if (!mysids2) {
+ kfree(mysids);
+ return rc;
+ }
+ for (i = 0, j = 0; i < mynel; i++) {
+ struct av_decision dummy_avd;
+ rc = avc_has_perm_noaudit(state,
+ fromsid, mysids[i],
+ SECCLASS_PROCESS, /* kernel value */
+ PROCESS__TRANSITION, AVC_STRICT,
+ &dummy_avd);
+ if (!rc)
+ mysids2[j++] = mysids[i];
+ cond_resched();
+ }
+ kfree(mysids);
+ *sids = mysids2;
+ *nel = j;
+ return 0;
+}
+
+/**
+ * __security_genfs_sid - Helper to obtain a SID for a file in a filesystem
+ * @policy: policy
+ * @fstype: filesystem type
+ * @path: path from root of mount
+ * @orig_sclass: file security class
+ * @sid: SID for path
+ *
+ * Obtain a SID to use for a file in a filesystem that
+ * cannot support xattr or use a fixed labeling behavior like
+ * transition SIDs or task SIDs.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
+ */
+static inline int __security_genfs_sid(struct selinux_policy *policy,
+ const char *fstype,
+ const char *path,
+ u16 orig_sclass,
+ u32 *sid)
+{
+ struct policydb *policydb = &policy->policydb;
+ struct sidtab *sidtab = policy->sidtab;
+ int len;
+ u16 sclass;
+ struct genfs *genfs;
+ struct ocontext *c;
+ int cmp = 0;
+
+ while (path[0] == '/' && path[1] == '/')
+ path++;
+
+ sclass = unmap_class(&policy->map, orig_sclass);
+ *sid = SECINITSID_UNLABELED;
+
+ for (genfs = policydb->genfs; genfs; genfs = genfs->next) {
+ cmp = strcmp(fstype, genfs->fstype);
+ if (cmp <= 0)
+ break;
+ }
+
+ if (!genfs || cmp)
+ return -ENOENT;
+
+ for (c = genfs->head; c; c = c->next) {
+ len = strlen(c->u.name);
+ if ((!c->v.sclass || sclass == c->v.sclass) &&
+ (strncmp(c->u.name, path, len) == 0))
+ break;
+ }
+
+ if (!c)
+ return -ENOENT;
+
+ return ocontext_to_sid(sidtab, c, 0, sid);
+}
+
+/**
+ * security_genfs_sid - Obtain a SID for a file in a filesystem
+ * @state: SELinux state
+ * @fstype: filesystem type
+ * @path: path from root of mount
+ * @orig_sclass: file security class
+ * @sid: SID for path
+ *
+ * Acquire policy_rwlock before calling __security_genfs_sid() and release
+ * it afterward.
+ */
+int security_genfs_sid(struct selinux_state *state,
+ const char *fstype,
+ const char *path,
+ u16 orig_sclass,
+ u32 *sid)
+{
+ struct selinux_policy *policy;
+ int retval;
+
+ if (!selinux_initialized(state)) {
+ *sid = SECINITSID_UNLABELED;
+ return 0;
+ }
+
+ do {
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ retval = __security_genfs_sid(policy, fstype, path,
+ orig_sclass, sid);
+ rcu_read_unlock();
+ } while (retval == -ESTALE);
+ return retval;
+}
+
+int selinux_policy_genfs_sid(struct selinux_policy *policy,
+ const char *fstype,
+ const char *path,
+ u16 orig_sclass,
+ u32 *sid)
+{
+ /* no lock required, policy is not yet accessible by other threads */
+ return __security_genfs_sid(policy, fstype, path, orig_sclass, sid);
+}
+
+/**
+ * security_fs_use - Determine how to handle labeling for a filesystem.
+ * @state: SELinux state
+ * @sb: superblock in question
+ */
+int security_fs_use(struct selinux_state *state, struct super_block *sb)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ int rc;
+ struct ocontext *c;
+ struct superblock_security_struct *sbsec = selinux_superblock(sb);
+ const char *fstype = sb->s_type->name;
+
+ if (!selinux_initialized(state)) {
+ sbsec->behavior = SECURITY_FS_USE_NONE;
+ sbsec->sid = SECINITSID_UNLABELED;
+ return 0;
+ }
+
+retry:
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ c = policydb->ocontexts[OCON_FSUSE];
+ while (c) {
+ if (strcmp(fstype, c->u.name) == 0)
+ break;
+ c = c->next;
+ }
+
+ if (c) {
+ sbsec->behavior = c->v.behavior;
+ rc = ocontext_to_sid(sidtab, c, 0, &sbsec->sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+ } else {
+ rc = __security_genfs_sid(policy, fstype, "/",
+ SECCLASS_DIR, &sbsec->sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc) {
+ sbsec->behavior = SECURITY_FS_USE_NONE;
+ rc = 0;
+ } else {
+ sbsec->behavior = SECURITY_FS_USE_GENFS;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+int security_get_bools(struct selinux_policy *policy,
+ u32 *len, char ***names, int **values)
+{
+ struct policydb *policydb;
+ u32 i;
+ int rc;
+
+ policydb = &policy->policydb;
+
+ *names = NULL;
+ *values = NULL;
+
+ rc = 0;
+ *len = policydb->p_bools.nprim;
+ if (!*len)
+ goto out;
+
+ rc = -ENOMEM;
+ *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
+ if (!*names)
+ goto err;
+
+ rc = -ENOMEM;
+ *values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
+ if (!*values)
+ goto err;
+
+ for (i = 0; i < *len; i++) {
+ (*values)[i] = policydb->bool_val_to_struct[i]->state;
+
+ rc = -ENOMEM;
+ (*names)[i] = kstrdup(sym_name(policydb, SYM_BOOLS, i),
+ GFP_ATOMIC);
+ if (!(*names)[i])
+ goto err;
+ }
+ rc = 0;
+out:
+ return rc;
+err:
+ if (*names) {
+ for (i = 0; i < *len; i++)
+ kfree((*names)[i]);
+ kfree(*names);
+ }
+ kfree(*values);
+ *len = 0;
+ *names = NULL;
+ *values = NULL;
+ goto out;
+}
+
+
+int security_set_bools(struct selinux_state *state, u32 len, int *values)
+{
+ struct selinux_policy *newpolicy, *oldpolicy;
+ int rc;
+ u32 i, seqno = 0;
+
+ if (!selinux_initialized(state))
+ return -EINVAL;
+
+ oldpolicy = rcu_dereference_protected(state->policy,
+ lockdep_is_held(&state->policy_mutex));
+
+ /* Consistency check on number of booleans, should never fail */
+ if (WARN_ON(len != oldpolicy->policydb.p_bools.nprim))
+ return -EINVAL;
+
+ newpolicy = kmemdup(oldpolicy, sizeof(*newpolicy), GFP_KERNEL);
+ if (!newpolicy)
+ return -ENOMEM;
+
+ /*
+ * Deep copy only the parts of the policydb that might be
+ * modified as a result of changing booleans.
+ */
+ rc = cond_policydb_dup(&newpolicy->policydb, &oldpolicy->policydb);
+ if (rc) {
+ kfree(newpolicy);
+ return -ENOMEM;
+ }
+
+ /* Update the boolean states in the copy */
+ for (i = 0; i < len; i++) {
+ int new_state = !!values[i];
+ int old_state = newpolicy->policydb.bool_val_to_struct[i]->state;
+
+ if (new_state != old_state) {
+ audit_log(audit_context(), GFP_ATOMIC,
+ AUDIT_MAC_CONFIG_CHANGE,
+ "bool=%s val=%d old_val=%d auid=%u ses=%u",
+ sym_name(&newpolicy->policydb, SYM_BOOLS, i),
+ new_state,
+ old_state,
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
+ newpolicy->policydb.bool_val_to_struct[i]->state = new_state;
+ }
+ }
+
+ /* Re-evaluate the conditional rules in the copy */
+ evaluate_cond_nodes(&newpolicy->policydb);
+
+ /* Set latest granting seqno for new policy */
+ newpolicy->latest_granting = oldpolicy->latest_granting + 1;
+ seqno = newpolicy->latest_granting;
+
+ /* Install the new policy */
+ rcu_assign_pointer(state->policy, newpolicy);
+
+ /*
+ * Free the conditional portions of the old policydb
+ * that were copied for the new policy, and the oldpolicy
+ * structure itself but not what it references.
+ */
+ synchronize_rcu();
+ selinux_policy_cond_free(oldpolicy);
+
+ /* Notify others of the policy change */
+ selinux_notify_policy_change(state, seqno);
+ return 0;
+}
+
+int security_get_bool_value(struct selinux_state *state,
+ u32 index)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ int rc;
+ u32 len;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+
+ rc = -EFAULT;
+ len = policydb->p_bools.nprim;
+ if (index >= len)
+ goto out;
+
+ rc = policydb->bool_val_to_struct[index]->state;
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int security_preserve_bools(struct selinux_policy *oldpolicy,
+ struct selinux_policy *newpolicy)
+{
+ int rc, *bvalues = NULL;
+ char **bnames = NULL;
+ struct cond_bool_datum *booldatum;
+ u32 i, nbools = 0;
+
+ rc = security_get_bools(oldpolicy, &nbools, &bnames, &bvalues);
+ if (rc)
+ goto out;
+ for (i = 0; i < nbools; i++) {
+ booldatum = symtab_search(&newpolicy->policydb.p_bools,
+ bnames[i]);
+ if (booldatum)
+ booldatum->state = bvalues[i];
+ }
+ evaluate_cond_nodes(&newpolicy->policydb);
+
+out:
+ if (bnames) {
+ for (i = 0; i < nbools; i++)
+ kfree(bnames[i]);
+ }
+ kfree(bnames);
+ kfree(bvalues);
+ return rc;
+}
+
+/*
+ * security_sid_mls_copy() - computes a new sid based on the given
+ * sid and the mls portion of mls_sid.
+ */
+int security_sid_mls_copy(struct selinux_state *state,
+ u32 sid, u32 mls_sid, u32 *new_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ struct context *context1;
+ struct context *context2;
+ struct context newcon;
+ char *s;
+ u32 len;
+ int rc;
+
+ if (!selinux_initialized(state)) {
+ *new_sid = sid;
+ return 0;
+ }
+
+retry:
+ rc = 0;
+ context_init(&newcon);
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ if (!policydb->mls_enabled) {
+ *new_sid = sid;
+ goto out_unlock;
+ }
+
+ rc = -EINVAL;
+ context1 = sidtab_search(sidtab, sid);
+ if (!context1) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, sid);
+ goto out_unlock;
+ }
+
+ rc = -EINVAL;
+ context2 = sidtab_search(sidtab, mls_sid);
+ if (!context2) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, mls_sid);
+ goto out_unlock;
+ }
+
+ newcon.user = context1->user;
+ newcon.role = context1->role;
+ newcon.type = context1->type;
+ rc = mls_context_cpy(&newcon, context2);
+ if (rc)
+ goto out_unlock;
+
+ /* Check the validity of the new context. */
+ if (!policydb_context_isvalid(policydb, &newcon)) {
+ rc = convert_context_handle_invalid_context(state, policydb,
+ &newcon);
+ if (rc) {
+ if (!context_struct_to_string(policydb, &newcon, &s,
+ &len)) {
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(audit_context(),
+ GFP_ATOMIC,
+ AUDIT_SELINUX_ERR);
+ audit_log_format(ab,
+ "op=security_sid_mls_copy invalid_context=");
+ /* don't record NUL with untrusted strings */
+ audit_log_n_untrustedstring(ab, s, len - 1);
+ audit_log_end(ab);
+ kfree(s);
+ }
+ goto out_unlock;
+ }
+ }
+ rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcon);
+ goto retry;
+ }
+out_unlock:
+ rcu_read_unlock();
+ context_destroy(&newcon);
+ return rc;
+}
+
+/**
+ * security_net_peersid_resolve - Compare and resolve two network peer SIDs
+ * @state: SELinux state
+ * @nlbl_sid: NetLabel SID
+ * @nlbl_type: NetLabel labeling protocol type
+ * @xfrm_sid: XFRM SID
+ * @peer_sid: network peer sid
+ *
+ * Description:
+ * Compare the @nlbl_sid and @xfrm_sid values and if the two SIDs can be
+ * resolved into a single SID it is returned via @peer_sid and the function
+ * returns zero. Otherwise @peer_sid is set to SECSID_NULL and the function
+ * returns a negative value. A table summarizing the behavior is below:
+ *
+ * | function return | @sid
+ * ------------------------------+-----------------+-----------------
+ * no peer labels | 0 | SECSID_NULL
+ * single peer label | 0 | <peer_label>
+ * multiple, consistent labels | 0 | <peer_label>
+ * multiple, inconsistent labels | -<errno> | SECSID_NULL
+ *
+ */
+int security_net_peersid_resolve(struct selinux_state *state,
+ u32 nlbl_sid, u32 nlbl_type,
+ u32 xfrm_sid,
+ u32 *peer_sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ int rc;
+ struct context *nlbl_ctx;
+ struct context *xfrm_ctx;
+
+ *peer_sid = SECSID_NULL;
+
+ /* handle the common (which also happens to be the set of easy) cases
+ * right away, these two if statements catch everything involving a
+ * single or absent peer SID/label */
+ if (xfrm_sid == SECSID_NULL) {
+ *peer_sid = nlbl_sid;
+ return 0;
+ }
+ /* NOTE: an nlbl_type == NETLBL_NLTYPE_UNLABELED is a "fallback" label
+ * and is treated as if nlbl_sid == SECSID_NULL when a XFRM SID/label
+ * is present */
+ if (nlbl_sid == SECSID_NULL || nlbl_type == NETLBL_NLTYPE_UNLABELED) {
+ *peer_sid = xfrm_sid;
+ return 0;
+ }
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ /*
+ * We don't need to check initialized here since the only way both
+ * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
+ * security server was initialized and state->initialized was true.
+ */
+ if (!policydb->mls_enabled) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = -EINVAL;
+ nlbl_ctx = sidtab_search(sidtab, nlbl_sid);
+ if (!nlbl_ctx) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, nlbl_sid);
+ goto out;
+ }
+ rc = -EINVAL;
+ xfrm_ctx = sidtab_search(sidtab, xfrm_sid);
+ if (!xfrm_ctx) {
+ pr_err("SELinux: %s: unrecognized SID %d\n",
+ __func__, xfrm_sid);
+ goto out;
+ }
+ rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
+ if (rc)
+ goto out;
+
+ /* at present NetLabel SIDs/labels really only carry MLS
+ * information so if the MLS portion of the NetLabel SID
+ * matches the MLS portion of the labeled XFRM SID/label
+ * then pass along the XFRM SID as it is the most
+ * expressive */
+ *peer_sid = xfrm_sid;
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int get_classes_callback(void *k, void *d, void *args)
+{
+ struct class_datum *datum = d;
+ char *name = k, **classes = args;
+ int value = datum->value - 1;
+
+ classes[value] = kstrdup(name, GFP_ATOMIC);
+ if (!classes[value])
+ return -ENOMEM;
+
+ return 0;
+}
+
+int security_get_classes(struct selinux_policy *policy,
+ char ***classes, int *nclasses)
+{
+ struct policydb *policydb;
+ int rc;
+
+ policydb = &policy->policydb;
+
+ rc = -ENOMEM;
+ *nclasses = policydb->p_classes.nprim;
+ *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
+ if (!*classes)
+ goto out;
+
+ rc = hashtab_map(&policydb->p_classes.table, get_classes_callback,
+ *classes);
+ if (rc) {
+ int i;
+ for (i = 0; i < *nclasses; i++)
+ kfree((*classes)[i]);
+ kfree(*classes);
+ }
+
+out:
+ return rc;
+}
+
+static int get_permissions_callback(void *k, void *d, void *args)
+{
+ struct perm_datum *datum = d;
+ char *name = k, **perms = args;
+ int value = datum->value - 1;
+
+ perms[value] = kstrdup(name, GFP_ATOMIC);
+ if (!perms[value])
+ return -ENOMEM;
+
+ return 0;
+}
+
+int security_get_permissions(struct selinux_policy *policy,
+ char *class, char ***perms, int *nperms)
+{
+ struct policydb *policydb;
+ int rc, i;
+ struct class_datum *match;
+
+ policydb = &policy->policydb;
+
+ rc = -EINVAL;
+ match = symtab_search(&policydb->p_classes, class);
+ if (!match) {
+ pr_err("SELinux: %s: unrecognized class %s\n",
+ __func__, class);
+ goto out;
+ }
+
+ rc = -ENOMEM;
+ *nperms = match->permissions.nprim;
+ *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
+ if (!*perms)
+ goto out;
+
+ if (match->comdatum) {
+ rc = hashtab_map(&match->comdatum->permissions.table,
+ get_permissions_callback, *perms);
+ if (rc)
+ goto err;
+ }
+
+ rc = hashtab_map(&match->permissions.table, get_permissions_callback,
+ *perms);
+ if (rc)
+ goto err;
+
+out:
+ return rc;
+
+err:
+ for (i = 0; i < *nperms; i++)
+ kfree((*perms)[i]);
+ kfree(*perms);
+ return rc;
+}
+
+int security_get_reject_unknown(struct selinux_state *state)
+{
+ struct selinux_policy *policy;
+ int value;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ value = policy->policydb.reject_unknown;
+ rcu_read_unlock();
+ return value;
+}
+
+int security_get_allow_unknown(struct selinux_state *state)
+{
+ struct selinux_policy *policy;
+ int value;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ value = policy->policydb.allow_unknown;
+ rcu_read_unlock();
+ return value;
+}
+
+/**
+ * security_policycap_supported - Check for a specific policy capability
+ * @state: SELinux state
+ * @req_cap: capability
+ *
+ * Description:
+ * This function queries the currently loaded policy to see if it supports the
+ * capability specified by @req_cap. Returns true (1) if the capability is
+ * supported, false (0) if it isn't supported.
+ *
+ */
+int security_policycap_supported(struct selinux_state *state,
+ unsigned int req_cap)
+{
+ struct selinux_policy *policy;
+ int rc;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ rc = ebitmap_get_bit(&policy->policydb.policycaps, req_cap);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+struct selinux_audit_rule {
+ u32 au_seqno;
+ struct context au_ctxt;
+};
+
+void selinux_audit_rule_free(void *vrule)
+{
+ struct selinux_audit_rule *rule = vrule;
+
+ if (rule) {
+ context_destroy(&rule->au_ctxt);
+ kfree(rule);
+ }
+}
+
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+{
+ struct selinux_state *state = &selinux_state;
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct selinux_audit_rule *tmprule;
+ struct role_datum *roledatum;
+ struct type_datum *typedatum;
+ struct user_datum *userdatum;
+ struct selinux_audit_rule **rule = (struct selinux_audit_rule **)vrule;
+ int rc = 0;
+
+ *rule = NULL;
+
+ if (!selinux_initialized(state))
+ return -EOPNOTSUPP;
+
+ switch (field) {
+ case AUDIT_SUBJ_USER:
+ case AUDIT_SUBJ_ROLE:
+ case AUDIT_SUBJ_TYPE:
+ case AUDIT_OBJ_USER:
+ case AUDIT_OBJ_ROLE:
+ case AUDIT_OBJ_TYPE:
+ /* only 'equals' and 'not equals' fit user, role, and type */
+ if (op != Audit_equal && op != Audit_not_equal)
+ return -EINVAL;
+ break;
+ case AUDIT_SUBJ_SEN:
+ case AUDIT_SUBJ_CLR:
+ case AUDIT_OBJ_LEV_LOW:
+ case AUDIT_OBJ_LEV_HIGH:
+ /* we do not allow a range, indicated by the presence of '-' */
+ if (strchr(rulestr, '-'))
+ return -EINVAL;
+ break;
+ default:
+ /* only the above fields are valid */
+ return -EINVAL;
+ }
+
+ tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
+ if (!tmprule)
+ return -ENOMEM;
+
+ context_init(&tmprule->au_ctxt);
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+
+ tmprule->au_seqno = policy->latest_granting;
+
+ switch (field) {
+ case AUDIT_SUBJ_USER:
+ case AUDIT_OBJ_USER:
+ rc = -EINVAL;
+ userdatum = symtab_search(&policydb->p_users, rulestr);
+ if (!userdatum)
+ goto out;
+ tmprule->au_ctxt.user = userdatum->value;
+ break;
+ case AUDIT_SUBJ_ROLE:
+ case AUDIT_OBJ_ROLE:
+ rc = -EINVAL;
+ roledatum = symtab_search(&policydb->p_roles, rulestr);
+ if (!roledatum)
+ goto out;
+ tmprule->au_ctxt.role = roledatum->value;
+ break;
+ case AUDIT_SUBJ_TYPE:
+ case AUDIT_OBJ_TYPE:
+ rc = -EINVAL;
+ typedatum = symtab_search(&policydb->p_types, rulestr);
+ if (!typedatum)
+ goto out;
+ tmprule->au_ctxt.type = typedatum->value;
+ break;
+ case AUDIT_SUBJ_SEN:
+ case AUDIT_SUBJ_CLR:
+ case AUDIT_OBJ_LEV_LOW:
+ case AUDIT_OBJ_LEV_HIGH:
+ rc = mls_from_string(policydb, rulestr, &tmprule->au_ctxt,
+ GFP_ATOMIC);
+ if (rc)
+ goto out;
+ break;
+ }
+ rc = 0;
+out:
+ rcu_read_unlock();
+
+ if (rc) {
+ selinux_audit_rule_free(tmprule);
+ tmprule = NULL;
+ }
+
+ *rule = tmprule;
+
+ return rc;
+}
+
+/* Check to see if the rule contains any selinux fields */
+int selinux_audit_rule_known(struct audit_krule *rule)
+{
+ int i;
+
+ for (i = 0; i < rule->field_count; i++) {
+ struct audit_field *f = &rule->fields[i];
+ switch (f->type) {
+ case AUDIT_SUBJ_USER:
+ case AUDIT_SUBJ_ROLE:
+ case AUDIT_SUBJ_TYPE:
+ case AUDIT_SUBJ_SEN:
+ case AUDIT_SUBJ_CLR:
+ case AUDIT_OBJ_USER:
+ case AUDIT_OBJ_ROLE:
+ case AUDIT_OBJ_TYPE:
+ case AUDIT_OBJ_LEV_LOW:
+ case AUDIT_OBJ_LEV_HIGH:
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule)
+{
+ struct selinux_state *state = &selinux_state;
+ struct selinux_policy *policy;
+ struct context *ctxt;
+ struct mls_level *level;
+ struct selinux_audit_rule *rule = vrule;
+ int match = 0;
+
+ if (unlikely(!rule)) {
+ WARN_ONCE(1, "selinux_audit_rule_match: missing rule\n");
+ return -ENOENT;
+ }
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+
+ policy = rcu_dereference(state->policy);
+
+ if (rule->au_seqno < policy->latest_granting) {
+ match = -ESTALE;
+ goto out;
+ }
+
+ ctxt = sidtab_search(policy->sidtab, sid);
+ if (unlikely(!ctxt)) {
+ WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n",
+ sid);
+ match = -ENOENT;
+ goto out;
+ }
+
+ /* a field/op pair that is not caught here will simply fall through
+ without a match */
+ switch (field) {
+ case AUDIT_SUBJ_USER:
+ case AUDIT_OBJ_USER:
+ switch (op) {
+ case Audit_equal:
+ match = (ctxt->user == rule->au_ctxt.user);
+ break;
+ case Audit_not_equal:
+ match = (ctxt->user != rule->au_ctxt.user);
+ break;
+ }
+ break;
+ case AUDIT_SUBJ_ROLE:
+ case AUDIT_OBJ_ROLE:
+ switch (op) {
+ case Audit_equal:
+ match = (ctxt->role == rule->au_ctxt.role);
+ break;
+ case Audit_not_equal:
+ match = (ctxt->role != rule->au_ctxt.role);
+ break;
+ }
+ break;
+ case AUDIT_SUBJ_TYPE:
+ case AUDIT_OBJ_TYPE:
+ switch (op) {
+ case Audit_equal:
+ match = (ctxt->type == rule->au_ctxt.type);
+ break;
+ case Audit_not_equal:
+ match = (ctxt->type != rule->au_ctxt.type);
+ break;
+ }
+ break;
+ case AUDIT_SUBJ_SEN:
+ case AUDIT_SUBJ_CLR:
+ case AUDIT_OBJ_LEV_LOW:
+ case AUDIT_OBJ_LEV_HIGH:
+ level = ((field == AUDIT_SUBJ_SEN ||
+ field == AUDIT_OBJ_LEV_LOW) ?
+ &ctxt->range.level[0] : &ctxt->range.level[1]);
+ switch (op) {
+ case Audit_equal:
+ match = mls_level_eq(&rule->au_ctxt.range.level[0],
+ level);
+ break;
+ case Audit_not_equal:
+ match = !mls_level_eq(&rule->au_ctxt.range.level[0],
+ level);
+ break;
+ case Audit_lt:
+ match = (mls_level_dom(&rule->au_ctxt.range.level[0],
+ level) &&
+ !mls_level_eq(&rule->au_ctxt.range.level[0],
+ level));
+ break;
+ case Audit_le:
+ match = mls_level_dom(&rule->au_ctxt.range.level[0],
+ level);
+ break;
+ case Audit_gt:
+ match = (mls_level_dom(level,
+ &rule->au_ctxt.range.level[0]) &&
+ !mls_level_eq(level,
+ &rule->au_ctxt.range.level[0]));
+ break;
+ case Audit_ge:
+ match = mls_level_dom(level,
+ &rule->au_ctxt.range.level[0]);
+ break;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ return match;
+}
+
+static int aurule_avc_callback(u32 event)
+{
+ if (event == AVC_CALLBACK_RESET)
+ return audit_update_lsm_rules();
+ return 0;
+}
+
+static int __init aurule_init(void)
+{
+ int err;
+
+ err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET);
+ if (err)
+ panic("avc_add_callback() failed, error %d\n", err);
+
+ return err;
+}
+__initcall(aurule_init);
+
+#ifdef CONFIG_NETLABEL
+/**
+ * security_netlbl_cache_add - Add an entry to the NetLabel cache
+ * @secattr: the NetLabel packet security attributes
+ * @sid: the SELinux SID
+ *
+ * Description:
+ * Attempt to cache the context in @ctx, which was derived from the packet in
+ * @skb, in the NetLabel subsystem cache. This function assumes @secattr has
+ * already been initialized.
+ *
+ */
+static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
+ u32 sid)
+{
+ u32 *sid_cache;
+
+ sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC);
+ if (sid_cache == NULL)
+ return;
+ secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
+ if (secattr->cache == NULL) {
+ kfree(sid_cache);
+ return;
+ }
+
+ *sid_cache = sid;
+ secattr->cache->free = kfree;
+ secattr->cache->data = sid_cache;
+ secattr->flags |= NETLBL_SECATTR_CACHE;
+}
+
+/**
+ * security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID
+ * @state: SELinux state
+ * @secattr: the NetLabel packet security attributes
+ * @sid: the SELinux SID
+ *
+ * Description:
+ * Convert the given NetLabel security attributes in @secattr into a
+ * SELinux SID. If the @secattr field does not contain a full SELinux
+ * SID/context then use SECINITSID_NETMSG as the foundation. If possible the
+ * 'cache' field of @secattr is set and the CACHE flag is set; this is to
+ * allow the @secattr to be used by NetLabel to cache the secattr to SID
+ * conversion for future lookups. Returns zero on success, negative values on
+ * failure.
+ *
+ */
+int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ struct netlbl_lsm_secattr *secattr,
+ u32 *sid)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ struct sidtab *sidtab;
+ int rc;
+ struct context *ctx;
+ struct context ctx_new;
+
+ if (!selinux_initialized(state)) {
+ *sid = SECSID_NULL;
+ return 0;
+ }
+
+retry:
+ rc = 0;
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+ sidtab = policy->sidtab;
+
+ if (secattr->flags & NETLBL_SECATTR_CACHE)
+ *sid = *(u32 *)secattr->cache->data;
+ else if (secattr->flags & NETLBL_SECATTR_SECID)
+ *sid = secattr->attr.secid;
+ else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+ rc = -EIDRM;
+ ctx = sidtab_search(sidtab, SECINITSID_NETMSG);
+ if (ctx == NULL)
+ goto out;
+
+ context_init(&ctx_new);
+ ctx_new.user = ctx->user;
+ ctx_new.role = ctx->role;
+ ctx_new.type = ctx->type;
+ mls_import_netlbl_lvl(policydb, &ctx_new, secattr);
+ if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
+ rc = mls_import_netlbl_cat(policydb, &ctx_new, secattr);
+ if (rc)
+ goto out;
+ }
+ rc = -EIDRM;
+ if (!mls_context_isvalid(policydb, &ctx_new)) {
+ ebitmap_destroy(&ctx_new.range.level[0].cat);
+ goto out;
+ }
+
+ rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+ ebitmap_destroy(&ctx_new.range.level[0].cat);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ if (rc)
+ goto out;
+
+ security_netlbl_cache_add(secattr, *sid);
+ } else
+ *sid = SECSID_NULL;
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+/**
+ * security_netlbl_sid_to_secattr - Convert a SELinux SID to a NetLabel secattr
+ * @state: SELinux state
+ * @sid: the SELinux SID
+ * @secattr: the NetLabel packet security attributes
+ *
+ * Description:
+ * Convert the given SELinux SID in @sid into a NetLabel security attribute.
+ * Returns zero on success, negative values on failure.
+ *
+ */
+int security_netlbl_sid_to_secattr(struct selinux_state *state,
+ u32 sid, struct netlbl_lsm_secattr *secattr)
+{
+ struct selinux_policy *policy;
+ struct policydb *policydb;
+ int rc;
+ struct context *ctx;
+
+ if (!selinux_initialized(state))
+ return 0;
+
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ policydb = &policy->policydb;
+
+ rc = -ENOENT;
+ ctx = sidtab_search(policy->sidtab, sid);
+ if (ctx == NULL)
+ goto out;
+
+ rc = -ENOMEM;
+ secattr->domain = kstrdup(sym_name(policydb, SYM_TYPES, ctx->type - 1),
+ GFP_ATOMIC);
+ if (secattr->domain == NULL)
+ goto out;
+
+ secattr->attr.secid = sid;
+ secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID;
+ mls_export_netlbl_lvl(policydb, ctx, secattr);
+ rc = mls_export_netlbl_cat(policydb, ctx, secattr);
+out:
+ rcu_read_unlock();
+ return rc;
+}
+#endif /* CONFIG_NETLABEL */
+
+/**
+ * __security_read_policy - read the policy.
+ * @policy: SELinux policy
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ */
+static int __security_read_policy(struct selinux_policy *policy,
+ void *data, size_t *len)
+{
+ int rc;
+ struct policy_file fp;
+
+ fp.data = data;
+ fp.len = *len;
+
+ rc = policydb_write(&policy->policydb, &fp);
+ if (rc)
+ return rc;
+
+ *len = (unsigned long)fp.data - (unsigned long)data;
+ return 0;
+}
+
+/**
+ * security_read_policy - read the policy.
+ * @state: selinux_state
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ */
+int security_read_policy(struct selinux_state *state,
+ void **data, size_t *len)
+{
+ struct selinux_policy *policy;
+
+ policy = rcu_dereference_protected(
+ state->policy, lockdep_is_held(&state->policy_mutex));
+ if (!policy)
+ return -EINVAL;
+
+ *len = policy->policydb.len;
+ *data = vmalloc_user(*len);
+ if (!*data)
+ return -ENOMEM;
+
+ return __security_read_policy(policy, *data, len);
+}
+
+/**
+ * security_read_state_kernel - read the policy.
+ * @state: selinux_state
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ * Allocates kernel memory for reading SELinux policy.
+ * This function is for internal use only and should not
+ * be used for returning data to user space.
+ *
+ * This function must be called with policy_mutex held.
+ */
+int security_read_state_kernel(struct selinux_state *state,
+ void **data, size_t *len)
+{
+ int err;
+ struct selinux_policy *policy;
+
+ policy = rcu_dereference_protected(
+ state->policy, lockdep_is_held(&state->policy_mutex));
+ if (!policy)
+ return -EINVAL;
+
+ *len = policy->policydb.len;
+ *data = vmalloc(*len);
+ if (!*data)
+ return -ENOMEM;
+
+ err = __security_read_policy(policy, *data, len);
+ if (err) {
+ vfree(*data);
+ *data = NULL;
+ *len = 0;
+ }
+ return err;
+}
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
new file mode 100644
index 000000000..9555ad074
--- /dev/null
+++ b/security/selinux/ss/services.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Implementation of the security services.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SS_SERVICES_H_
+#define _SS_SERVICES_H_
+
+#include "policydb.h"
+
+/* Mapping for a single class */
+struct selinux_mapping {
+ u16 value; /* policy value for class */
+ unsigned int num_perms; /* number of permissions in class */
+ u32 perms[sizeof(u32) * 8]; /* policy values for permissions */
+};
+
+/* Map for all of the classes, with array size */
+struct selinux_map {
+ struct selinux_mapping *mapping; /* indexed by class */
+ u16 size; /* array size of mapping */
+};
+
+struct selinux_policy {
+ struct sidtab *sidtab;
+ struct policydb policydb;
+ struct selinux_map map;
+ u32 latest_granting;
+} __randomize_layout;
+
+void services_compute_xperms_drivers(struct extended_perms *xperms,
+ struct avtab_node *node);
+
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ struct avtab_node *node);
+
+#endif /* _SS_SERVICES_H_ */
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
new file mode 100644
index 000000000..db5cce385
--- /dev/null
+++ b/security/selinux/ss/sidtab.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the SID table type.
+ *
+ * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
+ *
+ * Copyright (C) 2018 Red Hat, Inc.
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include "flask.h"
+#include "security.h"
+#include "sidtab.h"
+
+struct sidtab_str_cache {
+ struct rcu_head rcu_member;
+ struct list_head lru_member;
+ struct sidtab_entry *parent;
+ u32 len;
+ char str[];
+};
+
+#define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
+#define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
+
+int sidtab_init(struct sidtab *s)
+{
+ u32 i;
+
+ memset(s->roots, 0, sizeof(s->roots));
+
+ for (i = 0; i < SECINITSID_NUM; i++)
+ s->isids[i].set = 0;
+
+ s->frozen = false;
+ s->count = 0;
+ s->convert = NULL;
+ hash_init(s->context_to_sid);
+
+ spin_lock_init(&s->lock);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
+ INIT_LIST_HEAD(&s->cache_lru_list);
+ spin_lock_init(&s->cache_lock);
+#endif
+
+ return 0;
+}
+
+static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
+{
+ struct sidtab_entry *entry;
+ u32 sid = 0;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
+ if (entry->hash != hash)
+ continue;
+ if (context_cmp(&entry->context, context)) {
+ sid = entry->sid;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return sid;
+}
+
+int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
+{
+ struct sidtab_isid_entry *isid;
+ u32 hash;
+ int rc;
+
+ if (sid == 0 || sid > SECINITSID_NUM)
+ return -EINVAL;
+
+ isid = &s->isids[sid - 1];
+
+ rc = context_cpy(&isid->entry.context, context);
+ if (rc)
+ return rc;
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ isid->entry.cache = NULL;
+#endif
+ isid->set = 1;
+
+ hash = context_compute_hash(context);
+
+ /*
+ * Multiple initial sids may map to the same context. Check that this
+ * context is not already represented in the context_to_sid hashtable
+ * to avoid duplicate entries and long linked lists upon hash
+ * collision.
+ */
+ if (!context_to_sid(s, context, hash)) {
+ isid->entry.sid = sid;
+ isid->entry.hash = hash;
+ hash_add(s->context_to_sid, &isid->entry.list, hash);
+ }
+
+ return 0;
+}
+
+int sidtab_hash_stats(struct sidtab *sidtab, char *page)
+{
+ int i;
+ int chain_len = 0;
+ int slots_used = 0;
+ int entries = 0;
+ int max_chain_len = 0;
+ int cur_bucket = 0;
+ struct sidtab_entry *entry;
+
+ rcu_read_lock();
+ hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
+ entries++;
+ if (i == cur_bucket) {
+ chain_len++;
+ if (chain_len == 1)
+ slots_used++;
+ } else {
+ cur_bucket = i;
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ chain_len = 0;
+ }
+ }
+ rcu_read_unlock();
+
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+
+ return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+ "longest chain: %d\n", entries,
+ slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
+}
+
+static u32 sidtab_level_from_count(u32 count)
+{
+ u32 capacity = SIDTAB_LEAF_ENTRIES;
+ u32 level = 0;
+
+ while (count > capacity) {
+ capacity <<= SIDTAB_INNER_SHIFT;
+ ++level;
+ }
+ return level;
+}
+
+static int sidtab_alloc_roots(struct sidtab *s, u32 level)
+{
+ u32 l;
+
+ if (!s->roots[0].ptr_leaf) {
+ s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!s->roots[0].ptr_leaf)
+ return -ENOMEM;
+ }
+ for (l = 1; l <= level; ++l)
+ if (!s->roots[l].ptr_inner) {
+ s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!s->roots[l].ptr_inner)
+ return -ENOMEM;
+ s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
+ }
+ return 0;
+}
+
+static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
+ int alloc)
+{
+ union sidtab_entry_inner *entry;
+ u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
+
+ /* find the level of the subtree we need */
+ level = sidtab_level_from_count(index + 1);
+ capacity_shift = level * SIDTAB_INNER_SHIFT;
+
+ /* allocate roots if needed */
+ if (alloc && sidtab_alloc_roots(s, level) != 0)
+ return NULL;
+
+ /* lookup inside the subtree */
+ entry = &s->roots[level];
+ while (level != 0) {
+ capacity_shift -= SIDTAB_INNER_SHIFT;
+ --level;
+
+ entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
+ leaf_index &= ((u32)1 << capacity_shift) - 1;
+
+ if (!entry->ptr_inner) {
+ if (alloc)
+ entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!entry->ptr_inner)
+ return NULL;
+ }
+ }
+ if (!entry->ptr_leaf) {
+ if (alloc)
+ entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!entry->ptr_leaf)
+ return NULL;
+ }
+ return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
+}
+
+static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
+{
+ /* read entries only after reading count */
+ u32 count = smp_load_acquire(&s->count);
+
+ if (index >= count)
+ return NULL;
+
+ return sidtab_do_lookup(s, index, 0);
+}
+
+static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+{
+ return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
+}
+
+static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
+ int force)
+{
+ if (sid != 0) {
+ struct sidtab_entry *entry;
+
+ if (sid > SECINITSID_NUM)
+ entry = sidtab_lookup(s, sid_to_index(sid));
+ else
+ entry = sidtab_lookup_initial(s, sid);
+ if (entry && (!entry->context.len || force))
+ return entry;
+ }
+
+ return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
+}
+
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
+{
+ return sidtab_search_core(s, sid, 0);
+}
+
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
+{
+ return sidtab_search_core(s, sid, 1);
+}
+
+int sidtab_context_to_sid(struct sidtab *s, struct context *context,
+ u32 *sid)
+{
+ unsigned long flags;
+ u32 count, hash = context_compute_hash(context);
+ struct sidtab_convert_params *convert;
+ struct sidtab_entry *dst, *dst_convert;
+ int rc;
+
+ *sid = context_to_sid(s, context, hash);
+ if (*sid)
+ return 0;
+
+ /* lock-free search failed: lock, re-search, and insert if not found */
+ spin_lock_irqsave(&s->lock, flags);
+
+ rc = 0;
+ *sid = context_to_sid(s, context, hash);
+ if (*sid)
+ goto out_unlock;
+
+ if (unlikely(s->frozen)) {
+ /*
+ * This sidtab is now frozen - tell the caller to abort and
+ * get the new one.
+ */
+ rc = -ESTALE;
+ goto out_unlock;
+ }
+
+ count = s->count;
+ convert = s->convert;
+
+ /* bail out if we already reached max entries */
+ rc = -EOVERFLOW;
+ if (count >= SIDTAB_MAX)
+ goto out_unlock;
+
+ /* insert context into new entry */
+ rc = -ENOMEM;
+ dst = sidtab_do_lookup(s, count, 1);
+ if (!dst)
+ goto out_unlock;
+
+ dst->sid = index_to_sid(count);
+ dst->hash = hash;
+
+ rc = context_cpy(&dst->context, context);
+ if (rc)
+ goto out_unlock;
+
+ /*
+ * if we are building a new sidtab, we need to convert the context
+ * and insert it there as well
+ */
+ if (convert) {
+ rc = -ENOMEM;
+ dst_convert = sidtab_do_lookup(convert->target, count, 1);
+ if (!dst_convert) {
+ context_destroy(&dst->context);
+ goto out_unlock;
+ }
+
+ rc = convert->func(context, &dst_convert->context,
+ convert->args, GFP_ATOMIC);
+ if (rc) {
+ context_destroy(&dst->context);
+ goto out_unlock;
+ }
+ dst_convert->sid = index_to_sid(count);
+ dst_convert->hash = context_compute_hash(&dst_convert->context);
+ convert->target->count = count + 1;
+
+ hash_add_rcu(convert->target->context_to_sid,
+ &dst_convert->list, dst_convert->hash);
+ }
+
+ if (context->len)
+ pr_info("SELinux: Context %s is not valid (left unmapped).\n",
+ context->str);
+
+ *sid = index_to_sid(count);
+
+ /* write entries before updating count */
+ smp_store_release(&s->count, count + 1);
+ hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
+
+ rc = 0;
+out_unlock:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
+}
+
+static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
+{
+ struct sidtab_entry *entry;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ entry = sidtab_do_lookup(s, i, 0);
+ entry->sid = index_to_sid(i);
+ entry->hash = context_compute_hash(&entry->context);
+
+ hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
+ }
+}
+
+static int sidtab_convert_tree(union sidtab_entry_inner *edst,
+ union sidtab_entry_inner *esrc,
+ u32 *pos, u32 count, u32 level,
+ struct sidtab_convert_params *convert)
+{
+ int rc;
+ u32 i;
+
+ if (level != 0) {
+ if (!edst->ptr_inner) {
+ edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!edst->ptr_inner)
+ return -ENOMEM;
+ }
+ i = 0;
+ while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
+ rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
+ &esrc->ptr_inner->entries[i],
+ pos, count, level - 1,
+ convert);
+ if (rc)
+ return rc;
+ i++;
+ }
+ } else {
+ if (!edst->ptr_leaf) {
+ edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!edst->ptr_leaf)
+ return -ENOMEM;
+ }
+ i = 0;
+ while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
+ rc = convert->func(&esrc->ptr_leaf->entries[i].context,
+ &edst->ptr_leaf->entries[i].context,
+ convert->args, GFP_KERNEL);
+ if (rc)
+ return rc;
+ (*pos)++;
+ i++;
+ }
+ cond_resched();
+ }
+ return 0;
+}
+
+int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
+{
+ unsigned long flags;
+ u32 count, level, pos;
+ int rc;
+
+ spin_lock_irqsave(&s->lock, flags);
+
+ /* concurrent policy loads are not allowed */
+ if (s->convert) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return -EBUSY;
+ }
+
+ count = s->count;
+ level = sidtab_level_from_count(count);
+
+ /* allocate last leaf in the new sidtab (to avoid race with
+ * live convert)
+ */
+ rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
+ if (rc) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
+ }
+
+ /* set count in case no new entries are added during conversion */
+ params->target->count = count;
+
+ /* enable live convert of new entries */
+ s->convert = params;
+
+ /* we can safely convert the tree outside the lock */
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ pr_info("SELinux: Converting %u SID table entries...\n", count);
+
+ /* convert all entries not covered by live convert */
+ pos = 0;
+ rc = sidtab_convert_tree(&params->target->roots[level],
+ &s->roots[level], &pos, count, level, params);
+ if (rc) {
+ /* we need to keep the old table - disable live convert */
+ spin_lock_irqsave(&s->lock, flags);
+ s->convert = NULL;
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
+ }
+ /*
+ * The hashtable can also be modified in sidtab_context_to_sid()
+ * so we must re-acquire the lock here.
+ */
+ spin_lock_irqsave(&s->lock, flags);
+ sidtab_convert_hashtable(params->target, count);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+void sidtab_cancel_convert(struct sidtab *s)
+{
+ unsigned long flags;
+
+ /* cancelling policy load - disable live convert of sidtab */
+ spin_lock_irqsave(&s->lock, flags);
+ s->convert = NULL;
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+ spin_lock_irqsave(&s->lock, *flags);
+ s->frozen = true;
+ s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+ spin_unlock_irqrestore(&s->lock, *flags);
+}
+
+static void sidtab_destroy_entry(struct sidtab_entry *entry)
+{
+ context_destroy(&entry->context);
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ kfree(rcu_dereference_raw(entry->cache));
+#endif
+}
+
+static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
+{
+ u32 i;
+
+ if (level != 0) {
+ struct sidtab_node_inner *node = entry.ptr_inner;
+
+ if (!node)
+ return;
+
+ for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
+ sidtab_destroy_tree(node->entries[i], level - 1);
+ kfree(node);
+ } else {
+ struct sidtab_node_leaf *node = entry.ptr_leaf;
+
+ if (!node)
+ return;
+
+ for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
+ sidtab_destroy_entry(&node->entries[i]);
+ kfree(node);
+ }
+}
+
+void sidtab_destroy(struct sidtab *s)
+{
+ u32 i, level;
+
+ for (i = 0; i < SECINITSID_NUM; i++)
+ if (s->isids[i].set)
+ sidtab_destroy_entry(&s->isids[i].entry);
+
+ level = SIDTAB_MAX_LEVEL;
+ while (level && !s->roots[level].ptr_inner)
+ --level;
+
+ sidtab_destroy_tree(s->roots[level], level);
+ /*
+ * The context_to_sid hashtable's objects are all shared
+ * with the isids array and context tree, and so don't need
+ * to be cleaned up here.
+ */
+}
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+ const char *str, u32 str_len)
+{
+ struct sidtab_str_cache *cache, *victim = NULL;
+ unsigned long flags;
+
+ /* do not cache invalid contexts */
+ if (entry->context.len)
+ return;
+
+ spin_lock_irqsave(&s->cache_lock, flags);
+
+ cache = rcu_dereference_protected(entry->cache,
+ lockdep_is_held(&s->cache_lock));
+ if (cache) {
+ /* entry in cache - just bump to the head of LRU list */
+ list_move(&cache->lru_member, &s->cache_lru_list);
+ goto out_unlock;
+ }
+
+ cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
+ if (!cache)
+ goto out_unlock;
+
+ if (s->cache_free_slots == 0) {
+ /* pop a cache entry from the tail and free it */
+ victim = container_of(s->cache_lru_list.prev,
+ struct sidtab_str_cache, lru_member);
+ list_del(&victim->lru_member);
+ rcu_assign_pointer(victim->parent->cache, NULL);
+ } else {
+ s->cache_free_slots--;
+ }
+ cache->parent = entry;
+ cache->len = str_len;
+ memcpy(cache->str, str, str_len);
+ list_add(&cache->lru_member, &s->cache_lru_list);
+
+ rcu_assign_pointer(entry->cache, cache);
+
+out_unlock:
+ spin_unlock_irqrestore(&s->cache_lock, flags);
+ kfree_rcu(victim, rcu_member);
+}
+
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+ char **out, u32 *out_len)
+{
+ struct sidtab_str_cache *cache;
+ int rc = 0;
+
+ if (entry->context.len)
+ return -ENOENT; /* do not cache invalid contexts */
+
+ rcu_read_lock();
+
+ cache = rcu_dereference(entry->cache);
+ if (!cache) {
+ rc = -ENOENT;
+ } else {
+ *out_len = cache->len;
+ if (out) {
+ *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
+ if (!*out)
+ rc = -ENOMEM;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!rc && out)
+ sidtab_sid2str_put(s, entry, *out, *out_len);
+ return rc;
+}
+
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
new file mode 100644
index 000000000..9fce0d553
--- /dev/null
+++ b/security/selinux/ss/sidtab.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A security identifier table (sidtab) is a lookup table
+ * of security context structures indexed by SID value.
+ *
+ * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
+ *
+ * Copyright (C) 2018 Red Hat, Inc.
+ */
+#ifndef _SS_SIDTAB_H_
+#define _SS_SIDTAB_H_
+
+#include <linux/spinlock_types.h>
+#include <linux/log2.h>
+#include <linux/hashtable.h>
+
+#include "context.h"
+
+struct sidtab_entry {
+ u32 sid;
+ u32 hash;
+ struct context context;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ struct sidtab_str_cache __rcu *cache;
+#endif
+ struct hlist_node list;
+};
+
+union sidtab_entry_inner {
+ struct sidtab_node_inner *ptr_inner;
+ struct sidtab_node_leaf *ptr_leaf;
+};
+
+/* align node size to page boundary */
+#define SIDTAB_NODE_ALLOC_SHIFT PAGE_SHIFT
+#define SIDTAB_NODE_ALLOC_SIZE PAGE_SIZE
+
+#define size_to_shift(size) ((size) == 1 ? 1 : (const_ilog2((size) - 1) + 1))
+
+#define SIDTAB_INNER_SHIFT \
+ (SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
+#define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
+#define SIDTAB_LEAF_ENTRIES \
+ (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry))
+
+#define SIDTAB_MAX_BITS 32
+#define SIDTAB_MAX U32_MAX
+/* ensure enough tree levels for SIDTAB_MAX entries */
+#define SIDTAB_MAX_LEVEL \
+ DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
+ SIDTAB_INNER_SHIFT)
+
+struct sidtab_node_leaf {
+ struct sidtab_entry entries[SIDTAB_LEAF_ENTRIES];
+};
+
+struct sidtab_node_inner {
+ union sidtab_entry_inner entries[SIDTAB_INNER_ENTRIES];
+};
+
+struct sidtab_isid_entry {
+ int set;
+ struct sidtab_entry entry;
+};
+
+struct sidtab_convert_params {
+ int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags);
+ void *args;
+ struct sidtab *target;
+};
+
+#define SIDTAB_HASH_BITS CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS
+#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
+
+struct sidtab {
+ /*
+ * lock-free read access only for as many items as a prior read of
+ * 'count'
+ */
+ union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
+ /*
+ * access atomically via {READ|WRITE}_ONCE(); only increment under
+ * spinlock
+ */
+ u32 count;
+ /* access only under spinlock */
+ struct sidtab_convert_params *convert;
+ bool frozen;
+ spinlock_t lock;
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ /* SID -> context string cache */
+ u32 cache_free_slots;
+ struct list_head cache_lru_list;
+ spinlock_t cache_lock;
+#endif
+
+ /* index == SID - 1 (no entry for SECSID_NULL) */
+ struct sidtab_isid_entry isids[SECINITSID_NUM];
+
+ /* Hash table for fast reverse context-to-sid lookups. */
+ DECLARE_HASHTABLE(context_to_sid, SIDTAB_HASH_BITS);
+};
+
+int sidtab_init(struct sidtab *s);
+int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid);
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid);
+
+static inline struct context *sidtab_search(struct sidtab *s, u32 sid)
+{
+ struct sidtab_entry *entry = sidtab_search_entry(s, sid);
+
+ return entry ? &entry->context : NULL;
+}
+
+static inline struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+{
+ struct sidtab_entry *entry = sidtab_search_entry_force(s, sid);
+
+ return entry ? &entry->context : NULL;
+}
+
+int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
+
+void sidtab_cancel_convert(struct sidtab *s);
+
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
+
+int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
+
+void sidtab_destroy(struct sidtab *s);
+
+int sidtab_hash_stats(struct sidtab *sidtab, char *page);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+ const char *str, u32 str_len);
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+ char **out, u32 *out_len);
+#else
+static inline void sidtab_sid2str_put(struct sidtab *s,
+ struct sidtab_entry *entry,
+ const char *str, u32 str_len)
+{
+}
+static inline int sidtab_sid2str_get(struct sidtab *s,
+ struct sidtab_entry *entry,
+ char **out, u32 *out_len)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
+
+#endif /* _SS_SIDTAB_H_ */
+
+
diff --git a/security/selinux/ss/symtab.c b/security/selinux/ss/symtab.c
new file mode 100644
index 000000000..c42a6648a
--- /dev/null
+++ b/security/selinux/ss/symtab.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the symbol table type.
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include "symtab.h"
+
+static unsigned int symhash(const void *key)
+{
+ const char *p, *keyp;
+ unsigned int size;
+ unsigned int val;
+
+ val = 0;
+ keyp = key;
+ size = strlen(keyp);
+ for (p = keyp; (p - keyp) < size; p++)
+ val = (val << 4 | (val >> (8*sizeof(unsigned int)-4))) ^ (*p);
+ return val;
+}
+
+static int symcmp(const void *key1, const void *key2)
+{
+ const char *keyp1, *keyp2;
+
+ keyp1 = key1;
+ keyp2 = key2;
+ return strcmp(keyp1, keyp2);
+}
+
+static const struct hashtab_key_params symtab_key_params = {
+ .hash = symhash,
+ .cmp = symcmp,
+};
+
+int symtab_init(struct symtab *s, unsigned int size)
+{
+ s->nprim = 0;
+ return hashtab_init(&s->table, size);
+}
+
+int symtab_insert(struct symtab *s, char *name, void *datum)
+{
+ return hashtab_insert(&s->table, name, datum, symtab_key_params);
+}
+
+void *symtab_search(struct symtab *s, const char *name)
+{
+ return hashtab_search(&s->table, name, symtab_key_params);
+}
diff --git a/security/selinux/ss/symtab.h b/security/selinux/ss/symtab.h
new file mode 100644
index 000000000..f2614138d
--- /dev/null
+++ b/security/selinux/ss/symtab.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A symbol table (symtab) maintains associations between symbol
+ * strings and datum values. The type of the datum values
+ * is arbitrary. The symbol table type is implemented
+ * using the hash table type (hashtab).
+ *
+ * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ */
+#ifndef _SS_SYMTAB_H_
+#define _SS_SYMTAB_H_
+
+#include "hashtab.h"
+
+struct symtab {
+ struct hashtab table; /* hash table (keyed on a string) */
+ u32 nprim; /* number of primary names in table */
+};
+
+int symtab_init(struct symtab *s, unsigned int size);
+
+int symtab_insert(struct symtab *s, char *name, void *datum);
+void *symtab_search(struct symtab *s, const char *name);
+
+#endif /* _SS_SYMTAB_H_ */
+
+
diff --git a/security/selinux/status.c b/security/selinux/status.c
new file mode 100644
index 000000000..4bc8f8099
--- /dev/null
+++ b/security/selinux/status.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * mmap based event notifications for SELinux
+ *
+ * Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Copyright (C) 2010 NEC corporation
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include "avc.h"
+#include "security.h"
+
+/*
+ * The selinux_status_page shall be exposed to userspace applications
+ * using mmap interface on /selinux/status.
+ * It enables to notify applications a few events that will cause reset
+ * of userspace access vector without context switching.
+ *
+ * The selinux_kernel_status structure on the head of status page is
+ * protected from concurrent accesses using seqlock logic, so userspace
+ * application should reference the status page according to the seqlock
+ * logic.
+ *
+ * Typically, application checks status->sequence at the head of access
+ * control routine. If it is odd-number, kernel is updating the status,
+ * so please wait for a moment. If it is changed from the last sequence
+ * number, it means something happen, so application will reset userspace
+ * avc, if needed.
+ * In most cases, application shall confirm the kernel status is not
+ * changed without any system call invocations.
+ */
+
+/*
+ * selinux_kernel_status_page
+ *
+ * It returns a reference to selinux_status_page. If the status page is
+ * not allocated yet, it also tries to allocate it at the first time.
+ */
+struct page *selinux_kernel_status_page(struct selinux_state *state)
+{
+ struct selinux_kernel_status *status;
+ struct page *result = NULL;
+
+ mutex_lock(&state->status_lock);
+ if (!state->status_page) {
+ state->status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+
+ if (state->status_page) {
+ status = page_address(state->status_page);
+
+ status->version = SELINUX_KERNEL_STATUS_VERSION;
+ status->sequence = 0;
+ status->enforcing = enforcing_enabled(state);
+ /*
+ * NOTE: the next policyload event shall set
+ * a positive value on the status->policyload,
+ * although it may not be 1, but never zero.
+ * So, application can know it was updated.
+ */
+ status->policyload = 0;
+ status->deny_unknown =
+ !security_get_allow_unknown(state);
+ }
+ }
+ result = state->status_page;
+ mutex_unlock(&state->status_lock);
+
+ return result;
+}
+
+/*
+ * selinux_status_update_setenforce
+ *
+ * It updates status of the current enforcing/permissive mode.
+ */
+void selinux_status_update_setenforce(struct selinux_state *state,
+ int enforcing)
+{
+ struct selinux_kernel_status *status;
+
+ mutex_lock(&state->status_lock);
+ if (state->status_page) {
+ status = page_address(state->status_page);
+
+ status->sequence++;
+ smp_wmb();
+
+ status->enforcing = enforcing;
+
+ smp_wmb();
+ status->sequence++;
+ }
+ mutex_unlock(&state->status_lock);
+}
+
+/*
+ * selinux_status_update_policyload
+ *
+ * It updates status of the times of policy reloaded, and current
+ * setting of deny_unknown.
+ */
+void selinux_status_update_policyload(struct selinux_state *state,
+ int seqno)
+{
+ struct selinux_kernel_status *status;
+
+ mutex_lock(&state->status_lock);
+ if (state->status_page) {
+ status = page_address(state->status_page);
+
+ status->sequence++;
+ smp_wmb();
+
+ status->policyload = seqno;
+ status->deny_unknown = !security_get_allow_unknown(state);
+
+ smp_wmb();
+ status->sequence++;
+ }
+ mutex_unlock(&state->status_lock);
+}
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
new file mode 100644
index 000000000..c576832fe
--- /dev/null
+++ b/security/selinux/xfrm.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NSA Security-Enhanced Linux (SELinux) security module
+ *
+ * This file contains the SELinux XFRM hook function implementations.
+ *
+ * Authors: Serge Hallyn <sergeh@us.ibm.com>
+ * Trent Jaeger <jaegert@us.ibm.com>
+ *
+ * Updated: Venkat Yekkirala <vyekkirala@TrustedCS.com>
+ *
+ * Granular IPSec Associations for use in MLS environments.
+ *
+ * Copyright (C) 2005 International Business Machines Corporation
+ * Copyright (C) 2006 Trusted Computer Solutions, Inc.
+ */
+
+/*
+ * USAGE:
+ * NOTES:
+ * 1. Make sure to enable the following options in your kernel config:
+ * CONFIG_SECURITY=y
+ * CONFIG_SECURITY_NETWORK=y
+ * CONFIG_SECURITY_NETWORK_XFRM=y
+ * CONFIG_SECURITY_SELINUX=m/y
+ * ISSUES:
+ * 1. Caching packets, so they are not dropped during negotiation
+ * 2. Emulating a reasonable SO_PEERSEC across machines
+ * 3. Testing addition of sk_policy's with security context via setsockopt
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+#include <linux/atomic.h>
+
+#include "avc.h"
+#include "objsec.h"
+#include "xfrm.h"
+
+/* Labeled XFRM instance counter */
+atomic_t selinux_xfrm_refcount __read_mostly = ATOMIC_INIT(0);
+
+/*
+ * Returns true if the context is an LSM/SELinux context.
+ */
+static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
+{
+ return (ctx &&
+ (ctx->ctx_doi == XFRM_SC_DOI_LSM) &&
+ (ctx->ctx_alg == XFRM_SC_ALG_SELINUX));
+}
+
+/*
+ * Returns true if the xfrm contains a security blob for SELinux.
+ */
+static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
+{
+ return selinux_authorizable_ctx(x->security);
+}
+
+/*
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
+ */
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
+{
+ int rc;
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ struct xfrm_sec_ctx *ctx = NULL;
+ u32 str_len;
+
+ if (ctxp == NULL || uctx == NULL ||
+ uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+ uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+ return -EINVAL;
+
+ str_len = uctx->ctx_len;
+ if (str_len >= PAGE_SIZE)
+ return -ENOMEM;
+
+ ctx = kmalloc(struct_size(ctx, ctx_str, str_len + 1), gfp);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, &uctx[1], str_len);
+ ctx->ctx_str[str_len] = '\0';
+ rc = security_context_to_sid(&selinux_state, ctx->ctx_str, str_len,
+ &ctx->ctx_sid, gfp);
+ if (rc)
+ goto err;
+
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+ if (rc)
+ goto err;
+
+ *ctxp = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+ return 0;
+
+err:
+ kfree(ctx);
+ return rc;
+}
+
+/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ atomic_dec(&selinux_xfrm_refcount);
+ kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+
+ if (!ctx)
+ return 0;
+
+ return avc_has_perm(&selinux_state,
+ tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
+{
+ int rc;
+
+ /* All flows should be treated as polmatch'ing an otherwise applicable
+ * "non-labeled" policy. This would prevent inadvertent "leaks". */
+ if (!ctx)
+ return 0;
+
+ /* Context sid is either set to label or ANY_ASSOC */
+ if (!selinux_authorizable_ctx(ctx))
+ return -EINVAL;
+
+ rc = avc_has_perm(&selinux_state,
+ fl_secid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+ return (rc == -EACCES ? -ESRCH : rc);
+}
+
+/*
+ * LSM hook implementation that authorizes that a state matches
+ * the given policy, flow combo.
+ */
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic)
+{
+ u32 state_sid;
+ u32 flic_sid;
+
+ if (!xp->security)
+ if (x->security)
+ /* unlabeled policy and labeled SA can't match */
+ return 0;
+ else
+ /* unlabeled policy and unlabeled SA match all flows */
+ return 1;
+ else
+ if (!x->security)
+ /* unlabeled SA and labeled policy can't match */
+ return 0;
+ else
+ if (!selinux_authorizable_xfrm(x))
+ /* Not a SELinux-labeled SA */
+ return 0;
+
+ state_sid = x->security->ctx_sid;
+ flic_sid = flic->flowic_secid;
+
+ if (flic_sid != state_sid)
+ return 0;
+
+ /* We don't need a separate SA Vs. policy polmatch check since the SA
+ * is now of the same label as the flow and a flow Vs. policy polmatch
+ * check had already happened in selinux_xfrm_policy_lookup() above. */
+ return (avc_has_perm(&selinux_state, flic_sid, state_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+ NULL) ? 0 : 1);
+}
+
+static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x;
+
+ if (dst == NULL)
+ return SECSID_NULL;
+ x = dst->xfrm;
+ if (x == NULL || !selinux_authorizable_xfrm(x))
+ return SECSID_NULL;
+
+ return x->security->ctx_sid;
+}
+
+static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
+ u32 *sid, int ckall)
+{
+ u32 sid_session = SECSID_NULL;
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (sp) {
+ int i;
+
+ for (i = sp->len - 1; i >= 0; i--) {
+ struct xfrm_state *x = sp->xvec[i];
+ if (selinux_authorizable_xfrm(x)) {
+ struct xfrm_sec_ctx *ctx = x->security;
+
+ if (sid_session == SECSID_NULL) {
+ sid_session = ctx->ctx_sid;
+ if (!ckall)
+ goto out;
+ } else if (sid_session != ctx->ctx_sid) {
+ *sid = SECSID_NULL;
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
+out:
+ *sid = sid_session;
+ return 0;
+}
+
+/*
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
+ */
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+{
+ if (skb == NULL) {
+ *sid = SECSID_NULL;
+ return 0;
+ }
+ return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+}
+
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+ int rc;
+
+ rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+ if (rc == 0 && *sid == SECSID_NULL)
+ *sid = selinux_xfrm_skb_sid_egress(skb);
+
+ return rc;
+}
+
+/*
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
+ */
+int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
+{
+ return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
+}
+
+/*
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
+ */
+int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctxp)
+{
+ struct xfrm_sec_ctx *new_ctx;
+
+ if (!old_ctx)
+ return 0;
+
+ new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
+ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+ atomic_inc(&selinux_xfrm_refcount);
+ *new_ctxp = new_ctx;
+
+ return 0;
+}
+
+/*
+ * LSM hook implementation that frees xfrm_sec_ctx security information.
+ */
+void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
+{
+ selinux_xfrm_free(ctx);
+}
+
+/*
+ * LSM hook implementation that authorizes deletion of labeled policies.
+ */
+int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
+{
+ return selinux_xfrm_delete(ctx);
+}
+
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *uctx)
+{
+ return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
+}
+
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
+ */
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+{
+ int rc;
+ struct xfrm_sec_ctx *ctx;
+ char *ctx_str = NULL;
+ u32 str_len;
+
+ if (!polsec)
+ return 0;
+
+ if (secid == 0)
+ return -EINVAL;
+
+ rc = security_sid_to_context(&selinux_state, secid, &ctx_str,
+ &str_len);
+ if (rc)
+ return rc;
+
+ ctx = kmalloc(struct_size(ctx, ctx_str, str_len), GFP_ATOMIC);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_sid = secid;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, ctx_str, str_len);
+
+ x->security = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+out:
+ kfree(ctx_str);
+ return rc;
+}
+
+/*
+ * LSM hook implementation that frees xfrm_state security information.
+ */
+void selinux_xfrm_state_free(struct xfrm_state *x)
+{
+ selinux_xfrm_free(x->security);
+}
+
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
+int selinux_xfrm_state_delete(struct xfrm_state *x)
+{
+ return selinux_xfrm_delete(x->security);
+}
+
+/*
+ * LSM hook that controls access to unlabelled packets. If
+ * a xfrm_state is authorizable (defined by macro) then it was
+ * already authorized by the IPSec process. If not, then
+ * we need to check for unlabelled access since this may not have
+ * gone thru the IPSec process.
+ */
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
+{
+ int i;
+ struct sec_path *sp = skb_sec_path(skb);
+ u32 peer_sid = SECINITSID_UNLABELED;
+
+ if (sp) {
+ for (i = 0; i < sp->len; i++) {
+ struct xfrm_state *x = sp->xvec[i];
+
+ if (x && selinux_authorizable_xfrm(x)) {
+ struct xfrm_sec_ctx *ctx = x->security;
+ peer_sid = ctx->ctx_sid;
+ break;
+ }
+ }
+ }
+
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(&selinux_state,
+ sk_sid, peer_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
+}
+
+/*
+ * POSTROUTE_LAST hook's XFRM processing:
+ * If we have no security association, then we need to determine
+ * whether the socket is allowed to send to an unlabelled destination.
+ * If we do have a authorizable security association, then it has already been
+ * checked in the selinux_xfrm_state_pol_flow_match hook above.
+ */
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto)
+{
+ struct dst_entry *dst;
+
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_COMP:
+ /* We should have already seen this packet once before it
+ * underwent xfrm(s). No need to subject it to the unlabeled
+ * check. */
+ return 0;
+ default:
+ break;
+ }
+
+ dst = skb_dst(skb);
+ if (dst) {
+ struct dst_entry *iter;
+
+ for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
+ struct xfrm_state *x = iter->xfrm;
+
+ if (x && selinux_authorizable_xfrm(x))
+ return 0;
+ }
+ }
+
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(&selinux_state, sk_sid, SECINITSID_UNLABELED,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
+}
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
new file mode 100644
index 000000000..5a8dfad46
--- /dev/null
+++ b/security/smack/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_SMACK
+ bool "Simplified Mandatory Access Control Kernel Support"
+ depends on NET
+ depends on INET
+ depends on SECURITY
+ select NETLABEL
+ select SECURITY_NETWORK
+ default n
+ help
+ This selects the Simplified Mandatory Access Control Kernel.
+ Smack is useful for sensitivity, integrity, and a variety
+ of other mandatory security schemes.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_BRINGUP
+ bool "Reporting on access granted by Smack rules"
+ depends on SECURITY_SMACK
+ default n
+ help
+ Enable the bring-up ("b") access mode in Smack rules.
+ When access is granted by a rule with the "b" mode a
+ message about the access requested is generated. The
+ intention is that a process can be granted a wide set
+ of access initially with the bringup mode set on the
+ rules. The developer can use the information to
+ identify which rules are necessary and what accesses
+ may be inappropriate. The developer can reduce the
+ access rule set once the behavior is well understood.
+ This is a superior mechanism to the oft abused
+ "permissive" mode of other systems.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_NETFILTER
+ bool "Packet marking using secmarks for netfilter"
+ depends on SECURITY_SMACK
+ depends on NETWORK_SECMARK
+ depends on NETFILTER
+ default n
+ help
+ This enables security marking of network packets using
+ Smack labels.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_APPEND_SIGNALS
+ bool "Treat delivering signals as an append operation"
+ depends on SECURITY_SMACK
+ default n
+ help
+ Sending a signal has been treated as a write operation to the
+ receiving process. If this option is selected, the delivery
+ will be an append operation instead. This makes it possible
+ to differentiate between delivering a network packet and
+ delivering a signal in the Smack rules.
+ If you are unsure how to answer this question, answer N.
diff --git a/security/smack/Makefile b/security/smack/Makefile
new file mode 100644
index 000000000..6dbf6e22a
--- /dev/null
+++ b/security/smack/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the SMACK LSM
+#
+
+obj-$(CONFIG_SECURITY_SMACK) := smack.o
+
+smack-y := smack_lsm.o smack_access.o smackfs.o
+smack-$(CONFIG_SECURITY_SMACK_NETFILTER) += smack_netfilter.o
diff --git a/security/smack/smack.h b/security/smack/smack.h
new file mode 100644
index 000000000..aa15ff56e
--- /dev/null
+++ b/security/smack/smack.h
@@ -0,0 +1,505 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#ifndef _SECURITY_SMACK_H
+#define _SECURITY_SMACK_H
+
+#include <linux/capability.h>
+#include <linux/spinlock.h>
+#include <linux/lsm_hooks.h>
+#include <linux/in.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#endif /* CONFIG_IPV6 */
+#include <net/netlabel.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/lsm_audit.h>
+#include <linux/msg.h>
+
+/*
+ * Use IPv6 port labeling if IPv6 is enabled and secmarks
+ * are not being used.
+ */
+#if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#define SMACK_IPV6_PORT_LABELING 1
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6) && defined(CONFIG_SECURITY_SMACK_NETFILTER)
+#define SMACK_IPV6_SECMARK_LABELING 1
+#endif
+
+/*
+ * Smack labels were limited to 23 characters for a long time.
+ */
+#define SMK_LABELLEN 24
+#define SMK_LONGLABEL 256
+
+/*
+ * This is the repository for labels seen so that it is
+ * not necessary to keep allocating tiny chuncks of memory
+ * and so that they can be shared.
+ *
+ * Labels are never modified in place. Anytime a label
+ * is imported (e.g. xattrset on a file) the list is checked
+ * for it and it is added if it doesn't exist. The address
+ * is passed out in either case. Entries are added, but
+ * never deleted.
+ *
+ * Since labels are hanging around anyway it doesn't
+ * hurt to maintain a secid for those awkward situations
+ * where kernel components that ought to use LSM independent
+ * interfaces don't. The secid should go away when all of
+ * these components have been repaired.
+ *
+ * The cipso value associated with the label gets stored here, too.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
+ */
+struct smack_known {
+ struct list_head list;
+ struct hlist_node smk_hashed;
+ char *smk_known;
+ u32 smk_secid;
+ struct netlbl_lsm_secattr smk_netlabel; /* on wire labels */
+ struct list_head smk_rules; /* access rules */
+ struct mutex smk_rules_lock; /* lock for rules */
+};
+
+/*
+ * Maximum number of bytes for the levels in a CIPSO IP option.
+ * Why 23? CIPSO is constrained to 30, so a 32 byte buffer is
+ * bigger than can be used, and 24 is the next lower multiple
+ * of 8, and there are too many issues if there isn't space set
+ * aside for the terminating null byte.
+ */
+#define SMK_CIPSOLEN 24
+
+struct superblock_smack {
+ struct smack_known *smk_root;
+ struct smack_known *smk_floor;
+ struct smack_known *smk_hat;
+ struct smack_known *smk_default;
+ int smk_flags;
+};
+
+/*
+ * Superblock flags
+ */
+#define SMK_SB_INITIALIZED 0x01
+#define SMK_SB_UNTRUSTED 0x02
+
+struct socket_smack {
+ struct smack_known *smk_out; /* outbound label */
+ struct smack_known *smk_in; /* inbound label */
+ struct smack_known *smk_packet; /* TCP peer label */
+ int smk_state; /* netlabel socket states */
+};
+#define SMK_NETLBL_UNSET 0
+#define SMK_NETLBL_UNLABELED 1
+#define SMK_NETLBL_LABELED 2
+#define SMK_NETLBL_REQSKB 3
+
+/*
+ * Inode smack data
+ */
+struct inode_smack {
+ struct smack_known *smk_inode; /* label of the fso */
+ struct smack_known *smk_task; /* label of the task */
+ struct smack_known *smk_mmap; /* label of the mmap domain */
+ int smk_flags; /* smack inode flags */
+};
+
+struct task_smack {
+ struct smack_known *smk_task; /* label for access control */
+ struct smack_known *smk_forked; /* label when forked */
+ struct smack_known *smk_transmuted;/* label when transmuted */
+ struct list_head smk_rules; /* per task access rules */
+ struct mutex smk_rules_lock; /* lock for the rules */
+ struct list_head smk_relabel; /* transit allowed labels */
+};
+
+#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
+#define SMK_INODE_TRANSMUTE 0x02 /* directory is transmuting */
+#define SMK_INODE_CHANGED 0x04 /* smack was transmuted */
+#define SMK_INODE_IMPURE 0x08 /* involved in an impure transaction */
+
+/*
+ * A label access rule.
+ */
+struct smack_rule {
+ struct list_head list;
+ struct smack_known *smk_subject;
+ struct smack_known *smk_object;
+ int smk_access;
+};
+
+/*
+ * An entry in the table identifying IPv4 hosts.
+ */
+struct smk_net4addr {
+ struct list_head list;
+ struct in_addr smk_host; /* network address */
+ struct in_addr smk_mask; /* network mask */
+ int smk_masks; /* mask size */
+ struct smack_known *smk_label; /* label */
+};
+
+/*
+ * An entry in the table identifying IPv6 hosts.
+ */
+struct smk_net6addr {
+ struct list_head list;
+ struct in6_addr smk_host; /* network address */
+ struct in6_addr smk_mask; /* network mask */
+ int smk_masks; /* mask size */
+ struct smack_known *smk_label; /* label */
+};
+
+/*
+ * An entry in the table identifying ports.
+ */
+struct smk_port_label {
+ struct list_head list;
+ struct sock *smk_sock; /* socket initialized on */
+ unsigned short smk_port; /* the port number */
+ struct smack_known *smk_in; /* inbound label */
+ struct smack_known *smk_out; /* outgoing label */
+ short smk_sock_type; /* Socket type */
+ short smk_can_reuse;
+};
+
+struct smack_known_list_elem {
+ struct list_head list;
+ struct smack_known *smk_label;
+};
+
+enum {
+ Opt_error = -1,
+ Opt_fsdefault = 0,
+ Opt_fsfloor = 1,
+ Opt_fshat = 2,
+ Opt_fsroot = 3,
+ Opt_fstransmute = 4,
+};
+
+#define SMACK_DELETE_OPTION "-DELETE"
+#define SMACK_CIPSO_OPTION "-CIPSO"
+
+/*
+ * CIPSO defaults.
+ */
+#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */
+#define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */
+#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */
+#define SMACK_CIPSO_MAPPED_DEFAULT 251 /* Also arbitrary */
+#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */
+/*
+ * CIPSO 2.2 standard is 239, but Smack wants to use the
+ * categories in a structured way that limits the value to
+ * the bits in 23 bytes, hence the unusual number.
+ */
+#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */
+
+/*
+ * Ptrace rules
+ */
+#define SMACK_PTRACE_DEFAULT 0
+#define SMACK_PTRACE_EXACT 1
+#define SMACK_PTRACE_DRACONIAN 2
+#define SMACK_PTRACE_MAX SMACK_PTRACE_DRACONIAN
+
+/*
+ * Flags for untraditional access modes.
+ * It shouldn't be necessary to avoid conflicts with definitions
+ * in fs.h, but do so anyway.
+ */
+#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
+#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
+#define MAY_BRINGUP 0x00004000 /* Report use of this rule */
+
+/*
+ * The policy for delivering signals is configurable.
+ * It is usually "write", but can be "append".
+ */
+#ifdef CONFIG_SECURITY_SMACK_APPEND_SIGNALS
+#define MAY_DELIVER MAY_APPEND /* Signal delivery requires append */
+#else
+#define MAY_DELIVER MAY_WRITE /* Signal delivery requires write */
+#endif
+
+#define SMACK_BRINGUP_ALLOW 1 /* Allow bringup mode */
+#define SMACK_UNCONFINED_SUBJECT 2 /* Allow unconfined label */
+#define SMACK_UNCONFINED_OBJECT 3 /* Allow unconfined label */
+
+/*
+ * Just to make the common cases easier to deal with
+ */
+#define MAY_ANYREAD (MAY_READ | MAY_EXEC)
+#define MAY_READWRITE (MAY_READ | MAY_WRITE)
+#define MAY_NOT 0
+
+/*
+ * Number of access types used by Smack (rwxatlb)
+ */
+#define SMK_NUM_ACCESS_TYPE 7
+
+/* SMACK data */
+struct smack_audit_data {
+ const char *function;
+ char *subject;
+ char *object;
+ char *request;
+ int result;
+};
+
+/*
+ * Smack audit data; is empty if CONFIG_AUDIT not set
+ * to save some stack
+ */
+struct smk_audit_info {
+#ifdef CONFIG_AUDIT
+ struct common_audit_data a;
+ struct smack_audit_data sad;
+#endif
+};
+
+/*
+ * These functions are in smack_access.c
+ */
+int smk_access_entry(char *, char *, struct list_head *);
+int smk_access(struct smack_known *, struct smack_known *,
+ int, struct smk_audit_info *);
+int smk_tskacc(struct task_smack *, struct smack_known *,
+ u32, struct smk_audit_info *);
+int smk_curacc(struct smack_known *, u32, struct smk_audit_info *);
+struct smack_known *smack_from_secid(const u32);
+char *smk_parse_smack(const char *string, int len);
+int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
+struct smack_known *smk_import_entry(const char *, int);
+void smk_insert_entry(struct smack_known *skp);
+struct smack_known *smk_find_entry(const char *);
+bool smack_privileged(int cap);
+bool smack_privileged_cred(int cap, const struct cred *cred);
+void smk_destroy_label_list(struct list_head *list);
+int smack_populate_secattr(struct smack_known *skp);
+
+/*
+ * Shared data.
+ */
+extern int smack_enabled __initdata;
+extern int smack_cipso_direct;
+extern int smack_cipso_mapped;
+extern struct smack_known *smack_net_ambient;
+extern struct smack_known *smack_syslog_label;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+extern struct smack_known *smack_unconfined;
+#endif
+extern int smack_ptrace_rule;
+extern struct lsm_blob_sizes smack_blob_sizes;
+
+extern struct smack_known smack_known_floor;
+extern struct smack_known smack_known_hat;
+extern struct smack_known smack_known_huh;
+extern struct smack_known smack_known_star;
+extern struct smack_known smack_known_web;
+
+extern struct mutex smack_known_lock;
+extern struct list_head smack_known_list;
+extern struct list_head smk_net4addr_list;
+extern struct list_head smk_net6addr_list;
+
+extern struct mutex smack_onlycap_lock;
+extern struct list_head smack_onlycap_list;
+
+#define SMACK_HASH_SLOTS 16
+extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+extern struct kmem_cache *smack_rule_cache;
+
+static inline struct task_smack *smack_cred(const struct cred *cred)
+{
+ return cred->security + smack_blob_sizes.lbs_cred;
+}
+
+static inline struct smack_known **smack_file(const struct file *file)
+{
+ return (struct smack_known **)(file->f_security +
+ smack_blob_sizes.lbs_file);
+}
+
+static inline struct inode_smack *smack_inode(const struct inode *inode)
+{
+ return inode->i_security + smack_blob_sizes.lbs_inode;
+}
+
+static inline struct smack_known **smack_msg_msg(const struct msg_msg *msg)
+{
+ return msg->security + smack_blob_sizes.lbs_msg_msg;
+}
+
+static inline struct smack_known **smack_ipc(const struct kern_ipc_perm *ipc)
+{
+ return ipc->security + smack_blob_sizes.lbs_ipc;
+}
+
+static inline struct superblock_smack *smack_superblock(
+ const struct super_block *superblock)
+{
+ return superblock->s_security + smack_blob_sizes.lbs_superblock;
+}
+
+/*
+ * Is the directory transmuting?
+ */
+static inline int smk_inode_transmutable(const struct inode *isp)
+{
+ struct inode_smack *sip = smack_inode(isp);
+ return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0;
+}
+
+/*
+ * Present a pointer to the smack label entry in an inode blob.
+ */
+static inline struct smack_known *smk_of_inode(const struct inode *isp)
+{
+ struct inode_smack *sip = smack_inode(isp);
+ return sip->smk_inode;
+}
+
+/*
+ * Present a pointer to the smack label entry in an task blob.
+ */
+static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
+{
+ return tsp->smk_task;
+}
+
+static inline struct smack_known *smk_of_task_struct_obj(
+ const struct task_struct *t)
+{
+ struct smack_known *skp;
+ const struct cred *cred;
+
+ rcu_read_lock();
+
+ cred = __task_cred(t);
+ skp = smk_of_task(smack_cred(cred));
+
+ rcu_read_unlock();
+
+ return skp;
+}
+
+/*
+ * Present a pointer to the forked smack label entry in an task blob.
+ */
+static inline struct smack_known *smk_of_forked(const struct task_smack *tsp)
+{
+ return tsp->smk_forked;
+}
+
+/*
+ * Present a pointer to the smack label in the current task blob.
+ */
+static inline struct smack_known *smk_of_current(void)
+{
+ return smk_of_task(smack_cred(current_cred()));
+}
+
+/*
+ * logging functions
+ */
+#define SMACK_AUDIT_DENIED 0x1
+#define SMACK_AUDIT_ACCEPT 0x2
+extern int log_policy;
+
+void smack_log(char *subject_label, char *object_label,
+ int request,
+ int result, struct smk_audit_info *auditdata);
+
+#ifdef CONFIG_AUDIT
+
+/*
+ * some inline functions to set up audit data
+ * they do nothing if CONFIG_AUDIT is not set
+ *
+ */
+static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
+ char type)
+{
+ memset(&a->sad, 0, sizeof(a->sad));
+ a->a.type = type;
+ a->a.smack_audit_data = &a->sad;
+ a->a.smack_audit_data->function = func;
+}
+
+static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func,
+ char type, struct lsm_network_audit *net)
+{
+ smk_ad_init(a, func, type);
+ memset(net, 0, sizeof(*net));
+ a->a.u.net = net;
+}
+
+static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
+ struct task_struct *t)
+{
+ a->a.u.tsk = t;
+}
+static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
+ struct dentry *d)
+{
+ a->a.u.dentry = d;
+}
+static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
+ struct inode *i)
+{
+ a->a.u.inode = i;
+}
+static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
+ struct path p)
+{
+ a->a.u.path = p;
+}
+static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
+ struct sock *sk)
+{
+ a->a.u.net->sk = sk;
+}
+
+#else /* no AUDIT */
+
+static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
+ char type)
+{
+}
+static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
+ struct task_struct *t)
+{
+}
+static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
+ struct dentry *d)
+{
+}
+static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
+ struct inode *i)
+{
+}
+static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
+ struct path p)
+{
+}
+static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
+ struct sock *sk)
+{
+}
+#endif
+
+#endif /* _SECURITY_SMACK_H */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
new file mode 100644
index 000000000..585e5e357
--- /dev/null
+++ b/security/smack/smack_access.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "smack.h"
+
+struct smack_known smack_known_huh = {
+ .smk_known = "?",
+ .smk_secid = 2,
+};
+
+struct smack_known smack_known_hat = {
+ .smk_known = "^",
+ .smk_secid = 3,
+};
+
+struct smack_known smack_known_star = {
+ .smk_known = "*",
+ .smk_secid = 4,
+};
+
+struct smack_known smack_known_floor = {
+ .smk_known = "_",
+ .smk_secid = 5,
+};
+
+struct smack_known smack_known_web = {
+ .smk_known = "@",
+ .smk_secid = 7,
+};
+
+LIST_HEAD(smack_known_list);
+
+/*
+ * The initial value needs to be bigger than any of the
+ * known values above.
+ */
+static u32 smack_next_secid = 10;
+
+/*
+ * what events do we log
+ * can be overwritten at run-time by /smack/logging
+ */
+int log_policy = SMACK_AUDIT_DENIED;
+
+/**
+ * smk_access_entry - look up matching access rule
+ * @subject_label: a pointer to the subject's Smack label
+ * @object_label: a pointer to the object's Smack label
+ * @rule_list: the list of rules to search
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns the access mode. If no
+ * entry is found returns -ENOENT.
+ *
+ * NOTE:
+ *
+ * Earlier versions of this function allowed for labels that
+ * were not on the label list. This was done to allow for
+ * labels to come over the network that had never been seen
+ * before on this host. Unless the receiving socket has the
+ * star label this will always result in a failure check. The
+ * star labeled socket case is now handled in the networking
+ * hooks so there is no case where the label is not on the
+ * label list. Checking to see if the address of two labels
+ * is the same is now a reliable test.
+ *
+ * Do the object check first because that is more
+ * likely to differ.
+ *
+ * Allowing write access implies allowing locking.
+ */
+int smk_access_entry(char *subject_label, char *object_label,
+ struct list_head *rule_list)
+{
+ struct smack_rule *srp;
+
+ list_for_each_entry_rcu(srp, rule_list, list) {
+ if (srp->smk_object->smk_known == object_label &&
+ srp->smk_subject->smk_known == subject_label) {
+ int may = srp->smk_access;
+ /*
+ * MAY_WRITE implies MAY_LOCK.
+ */
+ if ((may & MAY_WRITE) == MAY_WRITE)
+ may |= MAY_LOCK;
+ return may;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * smk_access - determine if a subject has a specific access to an object
+ * @subject: a pointer to the subject's Smack label entry
+ * @object: a pointer to the object's Smack label entry
+ * @request: the access requested, in "MAY" format
+ * @a : a pointer to the audit data
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns 0 if the access is permitted,
+ * non zero otherwise.
+ *
+ * Smack labels are shared on smack_list
+ */
+int smk_access(struct smack_known *subject, struct smack_known *object,
+ int request, struct smk_audit_info *a)
+{
+ int may = MAY_NOT;
+ int rc = 0;
+
+ /*
+ * Hardcoded comparisons.
+ */
+ /*
+ * A star subject can't access any object.
+ */
+ if (subject == &smack_known_star) {
+ rc = -EACCES;
+ goto out_audit;
+ }
+ /*
+ * An internet object can be accessed by any subject.
+ * Tasks cannot be assigned the internet label.
+ * An internet subject can access any object.
+ */
+ if (object == &smack_known_web || subject == &smack_known_web)
+ goto out_audit;
+ /*
+ * A star object can be accessed by any subject.
+ */
+ if (object == &smack_known_star)
+ goto out_audit;
+ /*
+ * An object can be accessed in any way by a subject
+ * with the same label.
+ */
+ if (subject->smk_known == object->smk_known)
+ goto out_audit;
+ /*
+ * A hat subject can read or lock any object.
+ * A floor object can be read or locked by any subject.
+ */
+ if ((request & MAY_ANYREAD) == request ||
+ (request & MAY_LOCK) == request) {
+ if (object == &smack_known_floor)
+ goto out_audit;
+ if (subject == &smack_known_hat)
+ goto out_audit;
+ }
+ /*
+ * Beyond here an explicit relationship is required.
+ * If the requested access is contained in the available
+ * access (e.g. read is included in readwrite) it's
+ * good. A negative response from smk_access_entry()
+ * indicates there is no entry for this pair.
+ */
+ rcu_read_lock();
+ may = smk_access_entry(subject->smk_known, object->smk_known,
+ &subject->smk_rules);
+ rcu_read_unlock();
+
+ if (may <= 0 || (request & may) != request) {
+ rc = -EACCES;
+ goto out_audit;
+ }
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ /*
+ * Return a positive value if using bringup mode.
+ * This allows the hooks to identify checks that
+ * succeed because of "b" rules.
+ */
+ if (may & MAY_BRINGUP)
+ rc = SMACK_BRINGUP_ALLOW;
+#endif
+
+out_audit:
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ if (rc < 0) {
+ if (object == smack_unconfined)
+ rc = SMACK_UNCONFINED_OBJECT;
+ if (subject == smack_unconfined)
+ rc = SMACK_UNCONFINED_SUBJECT;
+ }
+#endif
+
+#ifdef CONFIG_AUDIT
+ if (a)
+ smack_log(subject->smk_known, object->smk_known,
+ request, rc, a);
+#endif
+
+ return rc;
+}
+
+/**
+ * smk_tskacc - determine if a task has a specific access to an object
+ * @tsp: a pointer to the subject's task
+ * @obj_known: a pointer to the object's label entry
+ * @mode: the access requested, in "MAY" format
+ * @a : common audit data
+ *
+ * This function checks the subject task's label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that the task may have the capability
+ * to override the rules.
+ */
+int smk_tskacc(struct task_smack *tsp, struct smack_known *obj_known,
+ u32 mode, struct smk_audit_info *a)
+{
+ struct smack_known *sbj_known = smk_of_task(tsp);
+ int may;
+ int rc;
+
+ /*
+ * Check the global rule list
+ */
+ rc = smk_access(sbj_known, obj_known, mode, NULL);
+ if (rc >= 0) {
+ /*
+ * If there is an entry in the task's rule list
+ * it can further restrict access.
+ */
+ may = smk_access_entry(sbj_known->smk_known,
+ obj_known->smk_known,
+ &tsp->smk_rules);
+ if (may < 0)
+ goto out_audit;
+ if ((mode & may) == mode)
+ goto out_audit;
+ rc = -EACCES;
+ }
+
+ /*
+ * Allow for priviliged to override policy.
+ */
+ if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE))
+ rc = 0;
+
+out_audit:
+#ifdef CONFIG_AUDIT
+ if (a)
+ smack_log(sbj_known->smk_known, obj_known->smk_known,
+ mode, rc, a);
+#endif
+ return rc;
+}
+
+/**
+ * smk_curacc - determine if current has a specific access to an object
+ * @obj_known: a pointer to the object's Smack label entry
+ * @mode: the access requested, in "MAY" format
+ * @a : common audit data
+ *
+ * This function checks the current subject label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that current may have the capability
+ * to override the rules.
+ */
+int smk_curacc(struct smack_known *obj_known,
+ u32 mode, struct smk_audit_info *a)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_tskacc(tsp, obj_known, mode, a);
+}
+
+#ifdef CONFIG_AUDIT
+/**
+ * smack_str_from_perm : helper to transalate an int to a
+ * readable string
+ * @string : the string to fill
+ * @access : the int
+ *
+ */
+static inline void smack_str_from_perm(char *string, int access)
+{
+ int i = 0;
+
+ if (access & MAY_READ)
+ string[i++] = 'r';
+ if (access & MAY_WRITE)
+ string[i++] = 'w';
+ if (access & MAY_EXEC)
+ string[i++] = 'x';
+ if (access & MAY_APPEND)
+ string[i++] = 'a';
+ if (access & MAY_TRANSMUTE)
+ string[i++] = 't';
+ if (access & MAY_LOCK)
+ string[i++] = 'l';
+ string[i] = '\0';
+}
+/**
+ * smack_log_callback - SMACK specific information
+ * will be called by generic audit code
+ * @ab : the audit_buffer
+ * @a : audit_data
+ *
+ */
+static void smack_log_callback(struct audit_buffer *ab, void *a)
+{
+ struct common_audit_data *ad = a;
+ struct smack_audit_data *sad = ad->smack_audit_data;
+ audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
+ ad->smack_audit_data->function,
+ sad->result ? "denied" : "granted");
+ audit_log_format(ab, " subject=");
+ audit_log_untrustedstring(ab, sad->subject);
+ audit_log_format(ab, " object=");
+ audit_log_untrustedstring(ab, sad->object);
+ if (sad->request[0] == '\0')
+ audit_log_format(ab, " labels_differ");
+ else
+ audit_log_format(ab, " requested=%s", sad->request);
+}
+
+/**
+ * smack_log - Audit the granting or denial of permissions.
+ * @subject_label : smack label of the requester
+ * @object_label : smack label of the object being accessed
+ * @request: requested permissions
+ * @result: result from smk_access
+ * @ad: auxiliary audit data
+ *
+ * Audit the granting or denial of permissions in accordance
+ * with the policy.
+ */
+void smack_log(char *subject_label, char *object_label, int request,
+ int result, struct smk_audit_info *ad)
+{
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ char request_buffer[SMK_NUM_ACCESS_TYPE + 5];
+#else
+ char request_buffer[SMK_NUM_ACCESS_TYPE + 1];
+#endif
+ struct smack_audit_data *sad;
+ struct common_audit_data *a = &ad->a;
+
+ /* check if we have to log the current event */
+ if (result < 0 && (log_policy & SMACK_AUDIT_DENIED) == 0)
+ return;
+ if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
+ return;
+
+ sad = a->smack_audit_data;
+
+ if (sad->function == NULL)
+ sad->function = "unknown";
+
+ /* end preparing the audit data */
+ smack_str_from_perm(request_buffer, request);
+ sad->subject = subject_label;
+ sad->object = object_label;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ /*
+ * The result may be positive in bringup mode.
+ * A positive result is an allow, but not for normal reasons.
+ * Mark it as successful, but don't filter it out even if
+ * the logging policy says to do so.
+ */
+ if (result == SMACK_UNCONFINED_SUBJECT)
+ strcat(request_buffer, "(US)");
+ else if (result == SMACK_UNCONFINED_OBJECT)
+ strcat(request_buffer, "(UO)");
+
+ if (result > 0)
+ result = 0;
+#endif
+ sad->request = request_buffer;
+ sad->result = result;
+
+ common_lsm_audit(a, smack_log_callback, NULL);
+}
+#else /* #ifdef CONFIG_AUDIT */
+void smack_log(char *subject_label, char *object_label, int request,
+ int result, struct smk_audit_info *ad)
+{
+}
+#endif
+
+DEFINE_MUTEX(smack_known_lock);
+
+struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
+/**
+ * smk_insert_entry - insert a smack label into a hash map,
+ * @skp: smack label
+ *
+ * this function must be called under smack_known_lock
+ */
+void smk_insert_entry(struct smack_known *skp)
+{
+ unsigned int hash;
+ struct hlist_head *head;
+
+ hash = full_name_hash(NULL, skp->smk_known, strlen(skp->smk_known));
+ head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+ hlist_add_head_rcu(&skp->smk_hashed, head);
+ list_add_rcu(&skp->list, &smack_known_list);
+}
+
+/**
+ * smk_find_entry - find a label on the list, return the list entry
+ * @string: a text string that might be a Smack label
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string or NULL if not found.
+ */
+struct smack_known *smk_find_entry(const char *string)
+{
+ unsigned int hash;
+ struct hlist_head *head;
+ struct smack_known *skp;
+
+ hash = full_name_hash(NULL, string, strlen(string));
+ head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+ hlist_for_each_entry_rcu(skp, head, smk_hashed)
+ if (strcmp(skp->smk_known, string) == 0)
+ return skp;
+
+ return NULL;
+}
+
+/**
+ * smk_parse_smack - parse smack label from a text string
+ * @string: a text string that might contain a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the clean label or an error code.
+ */
+char *smk_parse_smack(const char *string, int len)
+{
+ char *smack;
+ int i;
+
+ if (len <= 0)
+ len = strlen(string) + 1;
+
+ /*
+ * Reserve a leading '-' as an indicator that
+ * this isn't a label, but an option to interfaces
+ * including /smack/cipso and /smack/cipso2
+ */
+ if (string[0] == '-')
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < len; i++)
+ if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' ||
+ string[i] == '"' || string[i] == '\\' || string[i] == '\'')
+ break;
+
+ if (i == 0 || i >= SMK_LONGLABEL)
+ return ERR_PTR(-EINVAL);
+
+ smack = kstrndup(string, i, GFP_NOFS);
+ if (!smack)
+ return ERR_PTR(-ENOMEM);
+ return smack;
+}
+
+/**
+ * smk_netlbl_mls - convert a catset to netlabel mls categories
+ * @level: MLS sensitivity level
+ * @catset: the Smack categories
+ * @sap: where to put the netlabel categories
+ * @len: number of bytes for the levels in a CIPSO IP option
+ *
+ * Allocates and fills attr.mls
+ * Returns 0 on success, error code on failure.
+ */
+int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
+ int len)
+{
+ unsigned char *cp;
+ unsigned char m;
+ int cat;
+ int rc;
+ int byte;
+
+ sap->flags |= NETLBL_SECATTR_MLS_CAT;
+ sap->attr.mls.lvl = level;
+ sap->attr.mls.cat = NULL;
+
+ for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++)
+ for (m = 0x80; m != 0; m >>= 1, cat++) {
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
+ cat, GFP_NOFS);
+ if (rc < 0) {
+ netlbl_catmap_free(sap->attr.mls.cat);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * smack_populate_secattr - fill in the smack_known netlabel information
+ * @skp: pointer to the structure to fill
+ *
+ * Populate the netlabel secattr structure for a Smack label.
+ *
+ * Returns 0 unless creating the category mapping fails
+ */
+int smack_populate_secattr(struct smack_known *skp)
+{
+ int slen;
+
+ skp->smk_netlabel.attr.secid = skp->smk_secid;
+ skp->smk_netlabel.domain = skp->smk_known;
+ skp->smk_netlabel.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
+ if (skp->smk_netlabel.cache != NULL) {
+ skp->smk_netlabel.flags |= NETLBL_SECATTR_CACHE;
+ skp->smk_netlabel.cache->free = NULL;
+ skp->smk_netlabel.cache->data = skp;
+ }
+ skp->smk_netlabel.flags |= NETLBL_SECATTR_SECID |
+ NETLBL_SECATTR_MLS_LVL |
+ NETLBL_SECATTR_DOMAIN;
+ /*
+ * If direct labeling works use it.
+ * Otherwise use mapped labeling.
+ */
+ slen = strlen(skp->smk_known);
+ if (slen < SMK_CIPSOLEN)
+ return smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+ &skp->smk_netlabel, slen);
+
+ return smk_netlbl_mls(smack_cipso_mapped, (char *)&skp->smk_secid,
+ &skp->smk_netlabel, sizeof(skp->smk_secid));
+}
+
+/**
+ * smk_import_entry - import a label, return the list entry
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string, adding it if necessary,
+ * or an error code.
+ */
+struct smack_known *smk_import_entry(const char *string, int len)
+{
+ struct smack_known *skp;
+ char *smack;
+ int rc;
+
+ smack = smk_parse_smack(string, len);
+ if (IS_ERR(smack))
+ return ERR_CAST(smack);
+
+ mutex_lock(&smack_known_lock);
+
+ skp = smk_find_entry(smack);
+ if (skp != NULL)
+ goto freeout;
+
+ skp = kzalloc(sizeof(*skp), GFP_NOFS);
+ if (skp == NULL) {
+ skp = ERR_PTR(-ENOMEM);
+ goto freeout;
+ }
+
+ skp->smk_known = smack;
+ skp->smk_secid = smack_next_secid++;
+
+ rc = smack_populate_secattr(skp);
+ if (rc >= 0) {
+ INIT_LIST_HEAD(&skp->smk_rules);
+ mutex_init(&skp->smk_rules_lock);
+ /*
+ * Make sure that the entry is actually
+ * filled before putting it on the list.
+ */
+ smk_insert_entry(skp);
+ goto unlockout;
+ }
+ kfree(skp);
+ skp = ERR_PTR(rc);
+freeout:
+ kfree(smack);
+unlockout:
+ mutex_unlock(&smack_known_lock);
+
+ return skp;
+}
+
+/**
+ * smack_from_secid - find the Smack label associated with a secid
+ * @secid: an integer that might be associated with a Smack label
+ *
+ * Returns a pointer to the appropriate Smack label entry if there is one,
+ * otherwise a pointer to the invalid Smack label.
+ */
+struct smack_known *smack_from_secid(const u32 secid)
+{
+ struct smack_known *skp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(skp, &smack_known_list, list) {
+ if (skp->smk_secid == secid) {
+ rcu_read_unlock();
+ return skp;
+ }
+ }
+
+ /*
+ * If we got this far someone asked for the translation
+ * of a secid that is not on the list.
+ */
+ rcu_read_unlock();
+ return &smack_known_huh;
+}
+
+/*
+ * Unless a process is running with one of these labels
+ * even having CAP_MAC_OVERRIDE isn't enough to grant
+ * privilege to violate MAC policy. If no labels are
+ * designated (the empty list case) capabilities apply to
+ * everyone.
+ */
+LIST_HEAD(smack_onlycap_list);
+DEFINE_MUTEX(smack_onlycap_lock);
+
+/**
+ * smack_privileged_cred - are all privilege requirements met by cred
+ * @cap: The requested capability
+ * @cred: the credential to use
+ *
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ *
+ * Returns true if the task is allowed to be privileged, false if it's not.
+ */
+bool smack_privileged_cred(int cap, const struct cred *cred)
+{
+ struct task_smack *tsp = smack_cred(cred);
+ struct smack_known *skp = tsp->smk_task;
+ struct smack_known_list_elem *sklep;
+ int rc;
+
+ rc = cap_capable(cred, &init_user_ns, cap, CAP_OPT_NONE);
+ if (rc)
+ return false;
+
+ rcu_read_lock();
+ if (list_empty(&smack_onlycap_list)) {
+ rcu_read_unlock();
+ return true;
+ }
+
+ list_for_each_entry_rcu(sklep, &smack_onlycap_list, list) {
+ if (sklep->smk_label == skp) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+/**
+ * smack_privileged - are all privilege requirements met
+ * @cap: The requested capability
+ *
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ *
+ * Returns true if the task is allowed to be privileged, false if it's not.
+ */
+bool smack_privileged(int cap)
+{
+ /*
+ * All kernel tasks are privileged
+ */
+ if (unlikely(current->flags & PF_KTHREAD))
+ return true;
+
+ return smack_privileged_cred(cap, current_cred());
+}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
new file mode 100644
index 000000000..fbadc61fe
--- /dev/null
+++ b/security/smack/smack_lsm.c
@@ -0,0 +1,5105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simplified MAC Kernel (smack) security module
+ *
+ * This file contains the smack hook function implementations.
+ *
+ * Authors:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@intel.com>
+ *
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ * Paul Moore <paul@paul-moore.com>
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation.
+ */
+
+#include <linux/xattr.h>
+#include <linux/pagemap.h>
+#include <linux/mount.h>
+#include <linux/stat.h>
+#include <linux/kd.h>
+#include <asm/ioctls.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/icmpv6.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <net/cipso_ipv4.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/audit.h>
+#include <linux/magic.h>
+#include <linux/dcache.h>
+#include <linux/personality.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/binfmts.h>
+#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/watch_queue.h>
+#include <linux/io_uring.h>
+#include "smack.h"
+
+#define TRANS_TRUE "TRUE"
+#define TRANS_TRUE_SIZE 4
+
+#define SMK_CONNECTING 0
+#define SMK_RECEIVING 1
+#define SMK_SENDING 2
+
+#ifdef SMACK_IPV6_PORT_LABELING
+static DEFINE_MUTEX(smack_ipv6_lock);
+static LIST_HEAD(smk_ipv6_port_list);
+#endif
+struct kmem_cache *smack_rule_cache;
+int smack_enabled __initdata;
+
+#define A(s) {"smack"#s, sizeof("smack"#s) - 1, Opt_##s}
+static struct {
+ const char *name;
+ int len;
+ int opt;
+} smk_mount_opts[] = {
+ {"smackfsdef", sizeof("smackfsdef") - 1, Opt_fsdefault},
+ A(fsdefault), A(fsfloor), A(fshat), A(fsroot), A(fstransmute)
+};
+#undef A
+
+static int match_opt_prefix(char *s, int l, char **arg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smk_mount_opts); i++) {
+ size_t len = smk_mount_opts[i].len;
+ if (len > l || memcmp(s, smk_mount_opts[i].name, len))
+ continue;
+ if (len == l || s[len] != '=')
+ continue;
+ *arg = s + len + 1;
+ return smk_mount_opts[i].opt;
+ }
+ return Opt_error;
+}
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static char *smk_bu_mess[] = {
+ "Bringup Error", /* Unused */
+ "Bringup", /* SMACK_BRINGUP_ALLOW */
+ "Unconfined Subject", /* SMACK_UNCONFINED_SUBJECT */
+ "Unconfined Object", /* SMACK_UNCONFINED_OBJECT */
+};
+
+static void smk_bu_mode(int mode, char *s)
+{
+ int i = 0;
+
+ if (mode & MAY_READ)
+ s[i++] = 'r';
+ if (mode & MAY_WRITE)
+ s[i++] = 'w';
+ if (mode & MAY_EXEC)
+ s[i++] = 'x';
+ if (mode & MAY_APPEND)
+ s[i++] = 'a';
+ if (mode & MAY_TRANSMUTE)
+ s[i++] = 't';
+ if (mode & MAY_LOCK)
+ s[i++] = 'l';
+ if (i == 0)
+ s[i++] = '-';
+ s[i] = '\0';
+}
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_note(char *note, struct smack_known *sskp,
+ struct smack_known *oskp, int mode, int rc)
+{
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack %s: (%s %s %s) %s\n", smk_bu_mess[rc],
+ sskp->smk_known, oskp->smk_known, acc, note);
+ return 0;
+}
+#else
+#define smk_bu_note(note, sskp, oskp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_current(char *note, struct smack_known *oskp,
+ int mode, int rc)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack %s: (%s %s %s) %s %s\n", smk_bu_mess[rc],
+ tsp->smk_task->smk_known, oskp->smk_known,
+ acc, current->comm, note);
+ return 0;
+}
+#else
+#define smk_bu_current(note, oskp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct smack_known *smk_task = smk_of_task_struct_obj(otp);
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack %s: (%s %s %s) %s to %s\n", smk_bu_mess[rc],
+ tsp->smk_task->smk_known, smk_task->smk_known, acc,
+ current->comm, otp->comm);
+ return 0;
+}
+#else
+#define smk_bu_task(otp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_inode(struct inode *inode, int mode, int rc)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct inode_smack *isp = smack_inode(inode);
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (isp->smk_flags & SMK_INODE_IMPURE)
+ pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+
+ if (rc <= 0)
+ return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+ if (rc == SMACK_UNCONFINED_SUBJECT &&
+ (mode & (MAY_WRITE | MAY_APPEND)))
+ isp->smk_flags |= SMK_INODE_IMPURE;
+
+ smk_bu_mode(mode, acc);
+
+ pr_info("Smack %s: (%s %s %s) inode=(%s %ld) %s\n", smk_bu_mess[rc],
+ tsp->smk_task->smk_known, isp->smk_inode->smk_known, acc,
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+ return 0;
+}
+#else
+#define smk_bu_inode(inode, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_file(struct file *file, int mode, int rc)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct smack_known *sskp = tsp->smk_task;
+ struct inode *inode = file_inode(file);
+ struct inode_smack *isp = smack_inode(inode);
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (isp->smk_flags & SMK_INODE_IMPURE)
+ pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+
+ if (rc <= 0)
+ return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc],
+ sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
+ inode->i_sb->s_id, inode->i_ino, file,
+ current->comm);
+ return 0;
+}
+#else
+#define smk_bu_file(file, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_credfile(const struct cred *cred, struct file *file,
+ int mode, int rc)
+{
+ struct task_smack *tsp = smack_cred(cred);
+ struct smack_known *sskp = tsp->smk_task;
+ struct inode *inode = file_inode(file);
+ struct inode_smack *isp = smack_inode(inode);
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (isp->smk_flags & SMK_INODE_IMPURE)
+ pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+
+ if (rc <= 0)
+ return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc],
+ sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
+ inode->i_sb->s_id, inode->i_ino, file,
+ current->comm);
+ return 0;
+}
+#else
+#define smk_bu_credfile(cred, file, mode, RC) (RC)
+#endif
+
+/**
+ * smk_fetch - Fetch the smack label from a file.
+ * @name: type of the label (attribute)
+ * @ip: a pointer to the inode
+ * @dp: a pointer to the dentry
+ *
+ * Returns a pointer to the master list entry for the Smack label,
+ * NULL if there was no label to fetch, or an error code.
+ */
+static struct smack_known *smk_fetch(const char *name, struct inode *ip,
+ struct dentry *dp)
+{
+ int rc;
+ char *buffer;
+ struct smack_known *skp = NULL;
+
+ if (!(ip->i_opflags & IOP_XATTR))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rc = __vfs_getxattr(dp, ip, name, buffer, SMK_LONGLABEL);
+ if (rc < 0)
+ skp = ERR_PTR(rc);
+ else if (rc == 0)
+ skp = NULL;
+ else
+ skp = smk_import_entry(buffer, rc);
+
+ kfree(buffer);
+
+ return skp;
+}
+
+/**
+ * init_inode_smack - initialize an inode security blob
+ * @inode: inode to extract the info from
+ * @skp: a pointer to the Smack label entry to use in the blob
+ *
+ */
+static void init_inode_smack(struct inode *inode, struct smack_known *skp)
+{
+ struct inode_smack *isp = smack_inode(inode);
+
+ isp->smk_inode = skp;
+ isp->smk_flags = 0;
+}
+
+/**
+ * init_task_smack - initialize a task security blob
+ * @tsp: blob to initialize
+ * @task: a pointer to the Smack label for the running task
+ * @forked: a pointer to the Smack label for the forked task
+ *
+ */
+static void init_task_smack(struct task_smack *tsp, struct smack_known *task,
+ struct smack_known *forked)
+{
+ tsp->smk_task = task;
+ tsp->smk_forked = forked;
+ INIT_LIST_HEAD(&tsp->smk_rules);
+ INIT_LIST_HEAD(&tsp->smk_relabel);
+ mutex_init(&tsp->smk_rules_lock);
+}
+
+/**
+ * smk_copy_rules - copy a rule set
+ * @nhead: new rules header pointer
+ * @ohead: old rules header pointer
+ * @gfp: type of the memory for the allocation
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
+ gfp_t gfp)
+{
+ struct smack_rule *nrp;
+ struct smack_rule *orp;
+ int rc = 0;
+
+ list_for_each_entry_rcu(orp, ohead, list) {
+ nrp = kmem_cache_zalloc(smack_rule_cache, gfp);
+ if (nrp == NULL) {
+ rc = -ENOMEM;
+ break;
+ }
+ *nrp = *orp;
+ list_add_rcu(&nrp->list, nhead);
+ }
+ return rc;
+}
+
+/**
+ * smk_copy_relabel - copy smk_relabel labels list
+ * @nhead: new rules header pointer
+ * @ohead: old rules header pointer
+ * @gfp: type of the memory for the allocation
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
+ gfp_t gfp)
+{
+ struct smack_known_list_elem *nklep;
+ struct smack_known_list_elem *oklep;
+
+ list_for_each_entry(oklep, ohead, list) {
+ nklep = kzalloc(sizeof(struct smack_known_list_elem), gfp);
+ if (nklep == NULL) {
+ smk_destroy_label_list(nhead);
+ return -ENOMEM;
+ }
+ nklep->smk_label = oklep->smk_label;
+ list_add(&nklep->list, nhead);
+ }
+
+ return 0;
+}
+
+/**
+ * smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_*
+ * @mode: input mode in form of PTRACE_MODE_*
+ *
+ * Returns a converted MAY_* mode usable by smack rules
+ */
+static inline unsigned int smk_ptrace_mode(unsigned int mode)
+{
+ if (mode & PTRACE_MODE_ATTACH)
+ return MAY_READWRITE;
+ if (mode & PTRACE_MODE_READ)
+ return MAY_READ;
+
+ return 0;
+}
+
+/**
+ * smk_ptrace_rule_check - helper for ptrace access
+ * @tracer: tracer process
+ * @tracee_known: label entry of the process that's about to be traced
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
+ * @func: name of the function that called us, used for audit
+ *
+ * Returns 0 on access granted, -error on error
+ */
+static int smk_ptrace_rule_check(struct task_struct *tracer,
+ struct smack_known *tracee_known,
+ unsigned int mode, const char *func)
+{
+ int rc;
+ struct smk_audit_info ad, *saip = NULL;
+ struct task_smack *tsp;
+ struct smack_known *tracer_known;
+ const struct cred *tracercred;
+
+ if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
+ smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
+ smk_ad_setfield_u_tsk(&ad, tracer);
+ saip = &ad;
+ }
+
+ rcu_read_lock();
+ tracercred = __task_cred(tracer);
+ tsp = smack_cred(tracercred);
+ tracer_known = smk_of_task(tsp);
+
+ if ((mode & PTRACE_MODE_ATTACH) &&
+ (smack_ptrace_rule == SMACK_PTRACE_EXACT ||
+ smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) {
+ if (tracer_known->smk_known == tracee_known->smk_known)
+ rc = 0;
+ else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
+ rc = -EACCES;
+ else if (smack_privileged_cred(CAP_SYS_PTRACE, tracercred))
+ rc = 0;
+ else
+ rc = -EACCES;
+
+ if (saip)
+ smack_log(tracer_known->smk_known,
+ tracee_known->smk_known,
+ 0, rc, saip);
+
+ rcu_read_unlock();
+ return rc;
+ }
+
+ /* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
+ rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip);
+
+ rcu_read_unlock();
+ return rc;
+}
+
+/*
+ * LSM hooks.
+ * We he, that is fun!
+ */
+
+/**
+ * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
+ * @ctp: child task pointer
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
+ *
+ * Returns 0 if access is OK, an error code otherwise
+ *
+ * Do the capability checks.
+ */
+static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
+{
+ struct smack_known *skp;
+
+ skp = smk_of_task_struct_obj(ctp);
+
+ return smk_ptrace_rule_check(current, skp, mode, __func__);
+}
+
+/**
+ * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME
+ * @ptp: parent task pointer
+ *
+ * Returns 0 if access is OK, an error code otherwise
+ *
+ * Do the capability checks, and require PTRACE_MODE_ATTACH.
+ */
+static int smack_ptrace_traceme(struct task_struct *ptp)
+{
+ struct smack_known *skp;
+
+ skp = smk_of_task(smack_cred(current_cred()));
+
+ return smk_ptrace_rule_check(ptp, skp, PTRACE_MODE_ATTACH, __func__);
+}
+
+/**
+ * smack_syslog - Smack approval on syslog
+ * @typefrom_file: unused
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+static int smack_syslog(int typefrom_file)
+{
+ int rc = 0;
+ struct smack_known *skp = smk_of_current();
+
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
+
+ if (smack_syslog_label != NULL && smack_syslog_label != skp)
+ rc = -EACCES;
+
+ return rc;
+}
+
+/*
+ * Superblock Hooks.
+ */
+
+/**
+ * smack_sb_alloc_security - allocate a superblock blob
+ * @sb: the superblock getting the blob
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_sb_alloc_security(struct super_block *sb)
+{
+ struct superblock_smack *sbsp = smack_superblock(sb);
+
+ sbsp->smk_root = &smack_known_floor;
+ sbsp->smk_default = &smack_known_floor;
+ sbsp->smk_floor = &smack_known_floor;
+ sbsp->smk_hat = &smack_known_hat;
+ /*
+ * SMK_SB_INITIALIZED will be zero from kzalloc.
+ */
+
+ return 0;
+}
+
+struct smack_mnt_opts {
+ const char *fsdefault, *fsfloor, *fshat, *fsroot, *fstransmute;
+};
+
+static void smack_free_mnt_opts(void *mnt_opts)
+{
+ struct smack_mnt_opts *opts = mnt_opts;
+ kfree(opts->fsdefault);
+ kfree(opts->fsfloor);
+ kfree(opts->fshat);
+ kfree(opts->fsroot);
+ kfree(opts->fstransmute);
+ kfree(opts);
+}
+
+static int smack_add_opt(int token, const char *s, void **mnt_opts)
+{
+ struct smack_mnt_opts *opts = *mnt_opts;
+
+ if (!opts) {
+ opts = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+ *mnt_opts = opts;
+ }
+ if (!s)
+ return -ENOMEM;
+
+ switch (token) {
+ case Opt_fsdefault:
+ if (opts->fsdefault)
+ goto out_opt_err;
+ opts->fsdefault = s;
+ break;
+ case Opt_fsfloor:
+ if (opts->fsfloor)
+ goto out_opt_err;
+ opts->fsfloor = s;
+ break;
+ case Opt_fshat:
+ if (opts->fshat)
+ goto out_opt_err;
+ opts->fshat = s;
+ break;
+ case Opt_fsroot:
+ if (opts->fsroot)
+ goto out_opt_err;
+ opts->fsroot = s;
+ break;
+ case Opt_fstransmute:
+ if (opts->fstransmute)
+ goto out_opt_err;
+ opts->fstransmute = s;
+ break;
+ }
+ return 0;
+
+out_opt_err:
+ pr_warn("Smack: duplicate mount options\n");
+ return -EINVAL;
+}
+
+/**
+ * smack_fs_context_submount - Initialise security data for a filesystem context
+ * @fc: The filesystem context.
+ * @reference: reference superblock
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ struct superblock_smack *sbsp;
+ struct smack_mnt_opts *ctx;
+ struct inode_smack *isp;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ fc->security = ctx;
+
+ sbsp = smack_superblock(reference);
+ isp = smack_inode(reference->s_root->d_inode);
+
+ if (sbsp->smk_default) {
+ ctx->fsdefault = kstrdup(sbsp->smk_default->smk_known, GFP_KERNEL);
+ if (!ctx->fsdefault)
+ return -ENOMEM;
+ }
+
+ if (sbsp->smk_floor) {
+ ctx->fsfloor = kstrdup(sbsp->smk_floor->smk_known, GFP_KERNEL);
+ if (!ctx->fsfloor)
+ return -ENOMEM;
+ }
+
+ if (sbsp->smk_hat) {
+ ctx->fshat = kstrdup(sbsp->smk_hat->smk_known, GFP_KERNEL);
+ if (!ctx->fshat)
+ return -ENOMEM;
+ }
+
+ if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
+ if (sbsp->smk_root) {
+ ctx->fstransmute = kstrdup(sbsp->smk_root->smk_known, GFP_KERNEL);
+ if (!ctx->fstransmute)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * smack_fs_context_dup - Duplicate the security data on fs_context duplication
+ * @fc: The new filesystem context.
+ * @src_fc: The source filesystem context being duplicated.
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc)
+{
+ struct smack_mnt_opts *dst, *src = src_fc->security;
+
+ if (!src)
+ return 0;
+
+ fc->security = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL);
+ if (!fc->security)
+ return -ENOMEM;
+ dst = fc->security;
+
+ if (src->fsdefault) {
+ dst->fsdefault = kstrdup(src->fsdefault, GFP_KERNEL);
+ if (!dst->fsdefault)
+ return -ENOMEM;
+ }
+ if (src->fsfloor) {
+ dst->fsfloor = kstrdup(src->fsfloor, GFP_KERNEL);
+ if (!dst->fsfloor)
+ return -ENOMEM;
+ }
+ if (src->fshat) {
+ dst->fshat = kstrdup(src->fshat, GFP_KERNEL);
+ if (!dst->fshat)
+ return -ENOMEM;
+ }
+ if (src->fsroot) {
+ dst->fsroot = kstrdup(src->fsroot, GFP_KERNEL);
+ if (!dst->fsroot)
+ return -ENOMEM;
+ }
+ if (src->fstransmute) {
+ dst->fstransmute = kstrdup(src->fstransmute, GFP_KERNEL);
+ if (!dst->fstransmute)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static const struct fs_parameter_spec smack_fs_parameters[] = {
+ fsparam_string("smackfsdef", Opt_fsdefault),
+ fsparam_string("smackfsdefault", Opt_fsdefault),
+ fsparam_string("smackfsfloor", Opt_fsfloor),
+ fsparam_string("smackfshat", Opt_fshat),
+ fsparam_string("smackfsroot", Opt_fsroot),
+ fsparam_string("smackfstransmute", Opt_fstransmute),
+ {}
+};
+
+/**
+ * smack_fs_context_parse_param - Parse a single mount parameter
+ * @fc: The new filesystem context being constructed.
+ * @param: The parameter.
+ *
+ * Returns 0 on success, -ENOPARAM to pass the parameter on or anything else on
+ * error.
+ */
+static int smack_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ int opt, rc;
+
+ opt = fs_parse(fc, smack_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ rc = smack_add_opt(opt, param->string, &fc->security);
+ if (!rc)
+ param->string = NULL;
+ return rc;
+}
+
+static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts)
+{
+ char *from = options, *to = options;
+ bool first = true;
+
+ while (1) {
+ char *next = strchr(from, ',');
+ int token, len, rc;
+ char *arg = NULL;
+
+ if (next)
+ len = next - from;
+ else
+ len = strlen(from);
+
+ token = match_opt_prefix(from, len, &arg);
+ if (token != Opt_error) {
+ arg = kmemdup_nul(arg, from + len - arg, GFP_KERNEL);
+ rc = smack_add_opt(token, arg, mnt_opts);
+ if (unlikely(rc)) {
+ kfree(arg);
+ if (*mnt_opts)
+ smack_free_mnt_opts(*mnt_opts);
+ *mnt_opts = NULL;
+ return rc;
+ }
+ } else {
+ if (!first) { // copy with preceding comma
+ from--;
+ len++;
+ }
+ if (to != from)
+ memmove(to, from, len);
+ to += len;
+ first = false;
+ }
+ if (!from[len])
+ break;
+ from += len + 1;
+ }
+ *to = '\0';
+ return 0;
+}
+
+/**
+ * smack_set_mnt_opts - set Smack specific mount options
+ * @sb: the file system superblock
+ * @mnt_opts: Smack mount options
+ * @kern_flags: mount option from kernel space or user space
+ * @set_kern_flags: where to store converted mount opts
+ *
+ * Returns 0 on success, an error code on failure
+ *
+ * Allow filesystems with binary mount data to explicitly set Smack mount
+ * labels.
+ */
+static int smack_set_mnt_opts(struct super_block *sb,
+ void *mnt_opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+{
+ struct dentry *root = sb->s_root;
+ struct inode *inode = d_backing_inode(root);
+ struct superblock_smack *sp = smack_superblock(sb);
+ struct inode_smack *isp;
+ struct smack_known *skp;
+ struct smack_mnt_opts *opts = mnt_opts;
+ bool transmute = false;
+
+ if (sp->smk_flags & SMK_SB_INITIALIZED)
+ return 0;
+
+ if (!smack_privileged(CAP_MAC_ADMIN)) {
+ /*
+ * Unprivileged mounts don't get to specify Smack values.
+ */
+ if (opts)
+ return -EPERM;
+ /*
+ * Unprivileged mounts get root and default from the caller.
+ */
+ skp = smk_of_current();
+ sp->smk_root = skp;
+ sp->smk_default = skp;
+ /*
+ * For a handful of fs types with no user-controlled
+ * backing store it's okay to trust security labels
+ * in the filesystem. The rest are untrusted.
+ */
+ if (sb->s_user_ns != &init_user_ns &&
+ sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC &&
+ sb->s_magic != RAMFS_MAGIC) {
+ transmute = true;
+ sp->smk_flags |= SMK_SB_UNTRUSTED;
+ }
+ }
+
+ sp->smk_flags |= SMK_SB_INITIALIZED;
+
+ if (opts) {
+ if (opts->fsdefault) {
+ skp = smk_import_entry(opts->fsdefault, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+ sp->smk_default = skp;
+ }
+ if (opts->fsfloor) {
+ skp = smk_import_entry(opts->fsfloor, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+ sp->smk_floor = skp;
+ }
+ if (opts->fshat) {
+ skp = smk_import_entry(opts->fshat, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+ sp->smk_hat = skp;
+ }
+ if (opts->fsroot) {
+ skp = smk_import_entry(opts->fsroot, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+ sp->smk_root = skp;
+ }
+ if (opts->fstransmute) {
+ skp = smk_import_entry(opts->fstransmute, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+ sp->smk_root = skp;
+ transmute = true;
+ }
+ }
+
+ /*
+ * Initialize the root inode.
+ */
+ init_inode_smack(inode, sp->smk_root);
+
+ if (transmute) {
+ isp = smack_inode(inode);
+ isp->smk_flags |= SMK_INODE_TRANSMUTE;
+ }
+
+ return 0;
+}
+
+/**
+ * smack_sb_statfs - Smack check on statfs
+ * @dentry: identifies the file system in question
+ *
+ * Returns 0 if current can read the floor of the filesystem,
+ * and error code otherwise
+ */
+static int smack_sb_statfs(struct dentry *dentry)
+{
+ struct superblock_smack *sbp = smack_superblock(dentry->d_sb);
+ int rc;
+ struct smk_audit_info ad;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad);
+ rc = smk_bu_current("statfs", sbp->smk_floor, MAY_READ, rc);
+ return rc;
+}
+
+/*
+ * BPRM hooks
+ */
+
+/**
+ * smack_bprm_creds_for_exec - Update bprm->cred if needed for exec
+ * @bprm: the exec information
+ *
+ * Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise
+ */
+static int smack_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ struct inode *inode = file_inode(bprm->file);
+ struct task_smack *bsp = smack_cred(bprm->cred);
+ struct inode_smack *isp;
+ struct superblock_smack *sbsp;
+ int rc;
+
+ isp = smack_inode(inode);
+ if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
+ return 0;
+
+ sbsp = smack_superblock(inode->i_sb);
+ if ((sbsp->smk_flags & SMK_SB_UNTRUSTED) &&
+ isp->smk_task != sbsp->smk_root)
+ return 0;
+
+ if (bprm->unsafe & LSM_UNSAFE_PTRACE) {
+ struct task_struct *tracer;
+ rc = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+ if (likely(tracer != NULL))
+ rc = smk_ptrace_rule_check(tracer,
+ isp->smk_task,
+ PTRACE_MODE_ATTACH,
+ __func__);
+ rcu_read_unlock();
+
+ if (rc != 0)
+ return rc;
+ }
+ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
+ return -EPERM;
+
+ bsp->smk_task = isp->smk_task;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+ /* Decide if this is a secure exec. */
+ if (bsp->smk_task != bsp->smk_forked)
+ bprm->secureexec = 1;
+
+ return 0;
+}
+
+/*
+ * Inode hooks
+ */
+
+/**
+ * smack_inode_alloc_security - allocate an inode blob
+ * @inode: the inode in need of a blob
+ *
+ * Returns 0
+ */
+static int smack_inode_alloc_security(struct inode *inode)
+{
+ struct smack_known *skp = smk_of_current();
+
+ init_inode_smack(inode, skp);
+ return 0;
+}
+
+/**
+ * smack_inode_init_security - copy out the smack from an inode
+ * @inode: the newly created inode
+ * @dir: containing directory object
+ * @qstr: unused
+ * @name: where to put the attribute name
+ * @value: where to put the attribute value
+ * @len: where to put the length of the attribute
+ *
+ * Returns 0 if it all works out, -ENOMEM if there's no memory
+ */
+static int smack_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct inode_smack *issp = smack_inode(inode);
+ struct smack_known *skp = smk_of_task(tsp);
+ struct smack_known *isp = smk_of_inode(inode);
+ struct smack_known *dsp = smk_of_inode(dir);
+ int may;
+
+ if (name)
+ *name = XATTR_SMACK_SUFFIX;
+
+ if (value && len) {
+ /*
+ * If equal, transmuting already occurred in
+ * smack_dentry_create_files_as(). No need to check again.
+ */
+ if (tsp->smk_task != tsp->smk_transmuted) {
+ rcu_read_lock();
+ may = smk_access_entry(skp->smk_known, dsp->smk_known,
+ &skp->smk_rules);
+ rcu_read_unlock();
+ }
+
+ /*
+ * In addition to having smk_task equal to smk_transmuted,
+ * if the access rule allows transmutation and the directory
+ * requests transmutation then by all means transmute.
+ * Mark the inode as changed.
+ */
+ if ((tsp->smk_task == tsp->smk_transmuted) ||
+ (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
+ smk_inode_transmutable(dir))) {
+ /*
+ * The caller of smack_dentry_create_files_as()
+ * should have overridden the current cred, so the
+ * inode label was already set correctly in
+ * smack_inode_alloc_security().
+ */
+ if (tsp->smk_task != tsp->smk_transmuted)
+ isp = dsp;
+ issp->smk_flags |= SMK_INODE_CHANGED;
+ }
+
+ *value = kstrdup(isp->smk_known, GFP_NOFS);
+ if (*value == NULL)
+ return -ENOMEM;
+
+ *len = strlen(isp->smk_known);
+ }
+
+ return 0;
+}
+
+/**
+ * smack_inode_link - Smack check on link
+ * @old_dentry: the existing object
+ * @dir: unused
+ * @new_dentry: the new object
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+{
+ struct smack_known *isp;
+ struct smk_audit_info ad;
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
+
+ isp = smk_of_inode(d_backing_inode(old_dentry));
+ rc = smk_curacc(isp, MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_WRITE, rc);
+
+ if (rc == 0 && d_is_positive(new_dentry)) {
+ isp = smk_of_inode(d_backing_inode(new_dentry));
+ smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
+ rc = smk_curacc(isp, MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_WRITE, rc);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_inode_unlink - Smack check on inode deletion
+ * @dir: containing directory object
+ * @dentry: file to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the object, error code otherwise
+ */
+static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *ip = d_backing_inode(dentry);
+ struct smk_audit_info ad;
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ /*
+ * You need write access to the thing you're unlinking
+ */
+ rc = smk_curacc(smk_of_inode(ip), MAY_WRITE, &ad);
+ rc = smk_bu_inode(ip, MAY_WRITE, rc);
+ if (rc == 0) {
+ /*
+ * You also need write access to the containing directory
+ */
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
+ smk_ad_setfield_u_fs_inode(&ad, dir);
+ rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dir, MAY_WRITE, rc);
+ }
+ return rc;
+}
+
+/**
+ * smack_inode_rmdir - Smack check on directory deletion
+ * @dir: containing directory object
+ * @dentry: directory to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the directory, error code otherwise
+ */
+static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct smk_audit_info ad;
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ /*
+ * You need write access to the thing you're removing
+ */
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+ if (rc == 0) {
+ /*
+ * You also need write access to the containing directory
+ */
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
+ smk_ad_setfield_u_fs_inode(&ad, dir);
+ rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dir, MAY_WRITE, rc);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_inode_rename - Smack check on rename
+ * @old_inode: unused
+ * @old_dentry: the old object
+ * @new_inode: unused
+ * @new_dentry: the new object
+ *
+ * Read and write access is required on both the old and
+ * new directories.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_rename(struct inode *old_inode,
+ struct dentry *old_dentry,
+ struct inode *new_inode,
+ struct dentry *new_dentry)
+{
+ int rc;
+ struct smack_known *isp;
+ struct smk_audit_info ad;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
+
+ isp = smk_of_inode(d_backing_inode(old_dentry));
+ rc = smk_curacc(isp, MAY_READWRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_READWRITE, rc);
+
+ if (rc == 0 && d_is_positive(new_dentry)) {
+ isp = smk_of_inode(d_backing_inode(new_dentry));
+ smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
+ rc = smk_curacc(isp, MAY_READWRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_READWRITE, rc);
+ }
+ return rc;
+}
+
+/**
+ * smack_inode_permission - Smack version of permission()
+ * @inode: the inode in question
+ * @mask: the access requested
+ *
+ * This is the important Smack hook.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_permission(struct inode *inode, int mask)
+{
+ struct superblock_smack *sbsp = smack_superblock(inode->i_sb);
+ struct smk_audit_info ad;
+ int no_block = mask & MAY_NOT_BLOCK;
+ int rc;
+
+ mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+ /*
+ * No permission to check. Existence test. Yup, it's there.
+ */
+ if (mask == 0)
+ return 0;
+
+ if (sbsp->smk_flags & SMK_SB_UNTRUSTED) {
+ if (smk_of_inode(inode) != sbsp->smk_root)
+ return -EACCES;
+ }
+
+ /* May be droppable after audit */
+ if (no_block)
+ return -ECHILD;
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
+ smk_ad_setfield_u_fs_inode(&ad, inode);
+ rc = smk_curacc(smk_of_inode(inode), mask, &ad);
+ rc = smk_bu_inode(inode, mask, rc);
+ return rc;
+}
+
+/**
+ * smack_inode_setattr - Smack check for setting attributes
+ * @dentry: the object
+ * @iattr: for the force flag
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct smk_audit_info ad;
+ int rc;
+
+ /*
+ * Need to allow for clearing the setuid bit.
+ */
+ if (iattr->ia_valid & ATTR_FORCE)
+ return 0;
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+ return rc;
+}
+
+/**
+ * smack_inode_getattr - Smack check for getting attributes
+ * @path: path to extract the info from
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_getattr(const struct path *path)
+{
+ struct smk_audit_info ad;
+ struct inode *inode = d_backing_inode(path->dentry);
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, *path);
+ rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_inode(inode, MAY_READ, rc);
+ return rc;
+}
+
+/**
+ * smack_inode_setxattr - Smack check for setting xattrs
+ * @mnt_userns: active user namespace
+ * @dentry: the object
+ * @name: name of the attribute
+ * @value: value of the attribute
+ * @size: size of the value
+ * @flags: unused
+ *
+ * This protects the Smack attribute explicitly.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct smk_audit_info ad;
+ struct smack_known *skp;
+ int check_priv = 0;
+ int check_import = 0;
+ int check_star = 0;
+ int rc = 0;
+
+ /*
+ * Check label validity here so import won't fail in post_setxattr
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+ check_priv = 1;
+ check_import = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ check_priv = 1;
+ check_import = 1;
+ check_star = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ check_priv = 1;
+ if (size != TRANS_TRUE_SIZE ||
+ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+ rc = -EINVAL;
+ } else
+ rc = cap_inode_setxattr(dentry, name, value, size, flags);
+
+ if (check_priv && !smack_privileged(CAP_MAC_ADMIN))
+ rc = -EPERM;
+
+ if (rc == 0 && check_import) {
+ skp = size ? smk_import_entry(value, size) : NULL;
+ if (IS_ERR(skp))
+ rc = PTR_ERR(skp);
+ else if (skp == NULL || (check_star &&
+ (skp == &smack_known_star || skp == &smack_known_web)))
+ rc = -EINVAL;
+ }
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ if (rc == 0) {
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_inode_post_setxattr - Apply the Smack update approved above
+ * @dentry: object
+ * @name: attribute name
+ * @value: attribute value
+ * @size: attribute size
+ * @flags: unused
+ *
+ * Set the pointer in the inode blob to the entry found
+ * in the master label list.
+ */
+static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct smack_known *skp;
+ struct inode_smack *isp = smack_inode(d_backing_inode(dentry));
+
+ if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ isp->smk_flags |= SMK_INODE_TRANSMUTE;
+ return;
+ }
+
+ if (strcmp(name, XATTR_NAME_SMACK) == 0) {
+ skp = smk_import_entry(value, size);
+ if (!IS_ERR(skp))
+ isp->smk_inode = skp;
+ } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
+ skp = smk_import_entry(value, size);
+ if (!IS_ERR(skp))
+ isp->smk_task = skp;
+ } else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ skp = smk_import_entry(value, size);
+ if (!IS_ERR(skp))
+ isp->smk_mmap = skp;
+ }
+
+ return;
+}
+
+/**
+ * smack_inode_getxattr - Smack check on getxattr
+ * @dentry: the object
+ * @name: unused
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_getxattr(struct dentry *dentry, const char *name)
+{
+ struct smk_audit_info ad;
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_READ, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_READ, rc);
+ return rc;
+}
+
+/**
+ * smack_inode_removexattr - Smack check on removexattr
+ * @mnt_userns: active user namespace
+ * @dentry: the object
+ * @name: name of the attribute
+ *
+ * Removing the Smack attribute requires CAP_MAC_ADMIN
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name)
+{
+ struct inode_smack *isp;
+ struct smk_audit_info ad;
+ int rc = 0;
+
+ if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ rc = -EPERM;
+ } else
+ rc = cap_inode_removexattr(mnt_userns, dentry, name);
+
+ if (rc != 0)
+ return rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
+ smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
+
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
+ if (rc != 0)
+ return rc;
+
+ isp = smack_inode(d_backing_inode(dentry));
+ /*
+ * Don't do anything special for these.
+ * XATTR_NAME_SMACKIPIN
+ * XATTR_NAME_SMACKIPOUT
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0) {
+ struct super_block *sbp = dentry->d_sb;
+ struct superblock_smack *sbsp = smack_superblock(sbp);
+
+ isp->smk_inode = sbsp->smk_default;
+ } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0)
+ isp->smk_task = NULL;
+ else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0)
+ isp->smk_mmap = NULL;
+ else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
+ isp->smk_flags &= ~SMK_INODE_TRANSMUTE;
+
+ return 0;
+}
+
+/**
+ * smack_inode_getsecurity - get smack xattrs
+ * @mnt_userns: active user namespace
+ * @inode: the object
+ * @name: attribute name
+ * @buffer: where to put the result
+ * @alloc: duplicate memory
+ *
+ * Returns the size of the attribute or an error code
+ */
+static int smack_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc)
+{
+ struct socket_smack *ssp;
+ struct socket *sock;
+ struct super_block *sbp;
+ struct inode *ip = (struct inode *)inode;
+ struct smack_known *isp;
+ struct inode_smack *ispp;
+ size_t label_len;
+ char *label = NULL;
+
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ isp = smk_of_inode(inode);
+ } else if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
+ ispp = smack_inode(inode);
+ if (ispp->smk_flags & SMK_INODE_TRANSMUTE)
+ label = TRANS_TRUE;
+ else
+ label = "";
+ } else {
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ sbp = ip->i_sb;
+ if (sbp->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
+
+ sock = SOCKET_I(ip);
+ if (sock == NULL || sock->sk == NULL)
+ return -EOPNOTSUPP;
+
+ ssp = sock->sk->sk_security;
+
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ isp = ssp->smk_in;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+ isp = ssp->smk_out;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ if (!label)
+ label = isp->smk_known;
+
+ label_len = strlen(label);
+
+ if (alloc) {
+ *buffer = kstrdup(label, GFP_KERNEL);
+ if (*buffer == NULL)
+ return -ENOMEM;
+ }
+
+ return label_len;
+}
+
+
+/**
+ * smack_inode_listsecurity - list the Smack attributes
+ * @inode: the object
+ * @buffer: where they go
+ * @buffer_size: size of buffer
+ */
+static int smack_inode_listsecurity(struct inode *inode, char *buffer,
+ size_t buffer_size)
+{
+ int len = sizeof(XATTR_NAME_SMACK);
+
+ if (buffer != NULL && len <= buffer_size)
+ memcpy(buffer, XATTR_NAME_SMACK, len);
+
+ return len;
+}
+
+/**
+ * smack_inode_getsecid - Extract inode's security id
+ * @inode: inode to extract the info from
+ * @secid: where result will be saved
+ */
+static void smack_inode_getsecid(struct inode *inode, u32 *secid)
+{
+ struct smack_known *skp = smk_of_inode(inode);
+
+ *secid = skp->smk_secid;
+}
+
+/*
+ * File Hooks
+ */
+
+/*
+ * There is no smack_file_permission hook
+ *
+ * Should access checks be done on each read or write?
+ * UNICOS and SELinux say yes.
+ * Trusted Solaris, Trusted Irix, and just about everyone else says no.
+ *
+ * I'll say no for now. Smack does not do the frequent
+ * label changing that SELinux does.
+ */
+
+/**
+ * smack_file_alloc_security - assign a file security blob
+ * @file: the object
+ *
+ * The security blob for a file is a pointer to the master
+ * label list, so no allocation is done.
+ *
+ * f_security is the owner security information. It
+ * isn't used on file access checks, it's for send_sigio.
+ *
+ * Returns 0
+ */
+static int smack_file_alloc_security(struct file *file)
+{
+ struct smack_known **blob = smack_file(file);
+
+ *blob = smk_of_current();
+ return 0;
+}
+
+/**
+ * smack_file_ioctl - Smack check on ioctls
+ * @file: the object
+ * @cmd: what to do
+ * @arg: unused
+ *
+ * Relies heavily on the correct use of the ioctl command conventions.
+ *
+ * Returns 0 if allowed, error code otherwise
+ */
+static int smack_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc = 0;
+ struct smk_audit_info ad;
+ struct inode *inode = file_inode(file);
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad);
+ rc = smk_bu_file(file, MAY_WRITE, rc);
+ }
+
+ if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) {
+ rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_file(file, MAY_READ, rc);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_file_lock - Smack check on file locking
+ * @file: the object
+ * @cmd: unused
+ *
+ * Returns 0 if current has lock access, error code otherwise
+ */
+static int smack_file_lock(struct file *file, unsigned int cmd)
+{
+ struct smk_audit_info ad;
+ int rc;
+ struct inode *inode = file_inode(file);
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad);
+ rc = smk_bu_file(file, MAY_LOCK, rc);
+ return rc;
+}
+
+/**
+ * smack_file_fcntl - Smack check on fcntl
+ * @file: the object
+ * @cmd: what action to check
+ * @arg: unused
+ *
+ * Generally these operations are harmless.
+ * File locking operations present an obvious mechanism
+ * for passing information, so they require write access.
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_fcntl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct smk_audit_info ad;
+ int rc = 0;
+ struct inode *inode = file_inode(file);
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ switch (cmd) {
+ case F_GETLK:
+ break;
+ case F_SETLK:
+ case F_SETLKW:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad);
+ rc = smk_bu_file(file, MAY_LOCK, rc);
+ break;
+ case F_SETOWN:
+ case F_SETSIG:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad);
+ rc = smk_bu_file(file, MAY_WRITE, rc);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * smack_mmap_file - Check permissions for a mmap operation.
+ * @file: contains the file structure for file to map (may be NULL).
+ * @reqprot: contains the protection requested by the application.
+ * @prot: contains the protection that will be applied by the kernel.
+ * @flags: contains the operational flags.
+ *
+ * The @file may be NULL, e.g. if mapping anonymous memory.
+ *
+ * Return 0 if permission is granted.
+ */
+static int smack_mmap_file(struct file *file,
+ unsigned long reqprot, unsigned long prot,
+ unsigned long flags)
+{
+ struct smack_known *skp;
+ struct smack_known *mkp;
+ struct smack_rule *srp;
+ struct task_smack *tsp;
+ struct smack_known *okp;
+ struct inode_smack *isp;
+ struct superblock_smack *sbsp;
+ int may;
+ int mmay;
+ int tmay;
+ int rc;
+
+ if (file == NULL)
+ return 0;
+
+ if (unlikely(IS_PRIVATE(file_inode(file))))
+ return 0;
+
+ isp = smack_inode(file_inode(file));
+ if (isp->smk_mmap == NULL)
+ return 0;
+ sbsp = smack_superblock(file_inode(file)->i_sb);
+ if (sbsp->smk_flags & SMK_SB_UNTRUSTED &&
+ isp->smk_mmap != sbsp->smk_root)
+ return -EACCES;
+ mkp = isp->smk_mmap;
+
+ tsp = smack_cred(current_cred());
+ skp = smk_of_current();
+ rc = 0;
+
+ rcu_read_lock();
+ /*
+ * For each Smack rule associated with the subject
+ * label verify that the SMACK64MMAP also has access
+ * to that rule's object label.
+ */
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
+ okp = srp->smk_object;
+ /*
+ * Matching labels always allows access.
+ */
+ if (mkp->smk_known == okp->smk_known)
+ continue;
+ /*
+ * If there is a matching local rule take
+ * that into account as well.
+ */
+ may = smk_access_entry(srp->smk_subject->smk_known,
+ okp->smk_known,
+ &tsp->smk_rules);
+ if (may == -ENOENT)
+ may = srp->smk_access;
+ else
+ may &= srp->smk_access;
+ /*
+ * If may is zero the SMACK64MMAP subject can't
+ * possibly have less access.
+ */
+ if (may == 0)
+ continue;
+
+ /*
+ * Fetch the global list entry.
+ * If there isn't one a SMACK64MMAP subject
+ * can't have as much access as current.
+ */
+ mmay = smk_access_entry(mkp->smk_known, okp->smk_known,
+ &mkp->smk_rules);
+ if (mmay == -ENOENT) {
+ rc = -EACCES;
+ break;
+ }
+ /*
+ * If there is a local entry it modifies the
+ * potential access, too.
+ */
+ tmay = smk_access_entry(mkp->smk_known, okp->smk_known,
+ &tsp->smk_rules);
+ if (tmay != -ENOENT)
+ mmay &= tmay;
+
+ /*
+ * If there is any access available to current that is
+ * not available to a SMACK64MMAP subject
+ * deny access.
+ */
+ if ((may | mmay) != mmay) {
+ rc = -EACCES;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * smack_file_set_fowner - set the file security blob value
+ * @file: object in question
+ *
+ */
+static void smack_file_set_fowner(struct file *file)
+{
+ struct smack_known **blob = smack_file(file);
+
+ *blob = smk_of_current();
+}
+
+/**
+ * smack_file_send_sigiotask - Smack on sigio
+ * @tsk: The target task
+ * @fown: the object the signal come from
+ * @signum: unused
+ *
+ * Allow a privileged task to get signals even if it shouldn't
+ *
+ * Returns 0 if a subject with the object's smack could
+ * write to the task, an error code otherwise.
+ */
+static int smack_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown, int signum)
+{
+ struct smack_known **blob;
+ struct smack_known *skp;
+ struct smack_known *tkp = smk_of_task(smack_cred(tsk->cred));
+ const struct cred *tcred;
+ struct file *file;
+ int rc;
+ struct smk_audit_info ad;
+
+ /*
+ * struct fown_struct is never outside the context of a struct file
+ */
+ file = container_of(fown, struct file, f_owner);
+
+ /* we don't log here as rc can be overriden */
+ blob = smack_file(file);
+ skp = *blob;
+ rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
+ rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
+
+ rcu_read_lock();
+ tcred = __task_cred(tsk);
+ if (rc != 0 && smack_privileged_cred(CAP_MAC_OVERRIDE, tcred))
+ rc = 0;
+ rcu_read_unlock();
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_setfield_u_tsk(&ad, tsk);
+ smack_log(skp->smk_known, tkp->smk_known, MAY_DELIVER, rc, &ad);
+ return rc;
+}
+
+/**
+ * smack_file_receive - Smack file receive check
+ * @file: the object
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_receive(struct file *file)
+{
+ int rc;
+ int may = 0;
+ struct smk_audit_info ad;
+ struct inode *inode = file_inode(file);
+ struct socket *sock;
+ struct task_smack *tsp;
+ struct socket_smack *ssp;
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+
+ if (inode->i_sb->s_magic == SOCKFS_MAGIC) {
+ sock = SOCKET_I(inode);
+ ssp = sock->sk->sk_security;
+ tsp = smack_cred(current_cred());
+ /*
+ * If the receiving process can't write to the
+ * passed socket or if the passed socket can't
+ * write to the receiving process don't accept
+ * the passed socket.
+ */
+ rc = smk_access(tsp->smk_task, ssp->smk_out, MAY_WRITE, &ad);
+ rc = smk_bu_file(file, may, rc);
+ if (rc < 0)
+ return rc;
+ rc = smk_access(ssp->smk_in, tsp->smk_task, MAY_WRITE, &ad);
+ rc = smk_bu_file(file, may, rc);
+ return rc;
+ }
+ /*
+ * This code relies on bitmasks.
+ */
+ if (file->f_mode & FMODE_READ)
+ may = MAY_READ;
+ if (file->f_mode & FMODE_WRITE)
+ may |= MAY_WRITE;
+
+ rc = smk_curacc(smk_of_inode(inode), may, &ad);
+ rc = smk_bu_file(file, may, rc);
+ return rc;
+}
+
+/**
+ * smack_file_open - Smack dentry open processing
+ * @file: the object
+ *
+ * Set the security blob in the file structure.
+ * Allow the open only if the task has read access. There are
+ * many read operations (e.g. fstat) that you can do with an
+ * fd even if you have the file open write-only.
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_open(struct file *file)
+{
+ struct task_smack *tsp = smack_cred(file->f_cred);
+ struct inode *inode = file_inode(file);
+ struct smk_audit_info ad;
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
+
+ return rc;
+}
+
+/*
+ * Task hooks
+ */
+
+/**
+ * smack_cred_alloc_blank - "allocate" blank task-level security credentials
+ * @cred: the new credentials
+ * @gfp: the atomicity of any memory allocations
+ *
+ * Prepare a blank set of credentials for modification. This must allocate all
+ * the memory the LSM module might require such that cred_transfer() can
+ * complete without error.
+ */
+static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ init_task_smack(smack_cred(cred), NULL, NULL);
+ return 0;
+}
+
+
+/**
+ * smack_cred_free - "free" task-level security credentials
+ * @cred: the credentials in question
+ *
+ */
+static void smack_cred_free(struct cred *cred)
+{
+ struct task_smack *tsp = smack_cred(cred);
+ struct smack_rule *rp;
+ struct list_head *l;
+ struct list_head *n;
+
+ smk_destroy_label_list(&tsp->smk_relabel);
+
+ list_for_each_safe(l, n, &tsp->smk_rules) {
+ rp = list_entry(l, struct smack_rule, list);
+ list_del(&rp->list);
+ kmem_cache_free(smack_rule_cache, rp);
+ }
+}
+
+/**
+ * smack_cred_prepare - prepare new set of credentials for modification
+ * @new: the new credentials
+ * @old: the original credentials
+ * @gfp: the atomicity of any memory allocations
+ *
+ * Prepare a new set of credentials for modification.
+ */
+static int smack_cred_prepare(struct cred *new, const struct cred *old,
+ gfp_t gfp)
+{
+ struct task_smack *old_tsp = smack_cred(old);
+ struct task_smack *new_tsp = smack_cred(new);
+ int rc;
+
+ init_task_smack(new_tsp, old_tsp->smk_task, old_tsp->smk_task);
+
+ rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
+ if (rc != 0)
+ return rc;
+
+ rc = smk_copy_relabel(&new_tsp->smk_relabel, &old_tsp->smk_relabel,
+ gfp);
+ return rc;
+}
+
+/**
+ * smack_cred_transfer - Transfer the old credentials to the new credentials
+ * @new: the new credentials
+ * @old: the original credentials
+ *
+ * Fill in a set of blank credentials from another set of credentials.
+ */
+static void smack_cred_transfer(struct cred *new, const struct cred *old)
+{
+ struct task_smack *old_tsp = smack_cred(old);
+ struct task_smack *new_tsp = smack_cred(new);
+
+ new_tsp->smk_task = old_tsp->smk_task;
+ new_tsp->smk_forked = old_tsp->smk_task;
+ mutex_init(&new_tsp->smk_rules_lock);
+ INIT_LIST_HEAD(&new_tsp->smk_rules);
+
+ /* cbs copy rule list */
+}
+
+/**
+ * smack_cred_getsecid - get the secid corresponding to a creds structure
+ * @cred: the object creds
+ * @secid: where to put the result
+ *
+ * Sets the secid to contain a u32 version of the smack label.
+ */
+static void smack_cred_getsecid(const struct cred *cred, u32 *secid)
+{
+ struct smack_known *skp;
+
+ rcu_read_lock();
+ skp = smk_of_task(smack_cred(cred));
+ *secid = skp->smk_secid;
+ rcu_read_unlock();
+}
+
+/**
+ * smack_kernel_act_as - Set the subjective context in a set of credentials
+ * @new: points to the set of credentials to be modified.
+ * @secid: specifies the security ID to be set
+ *
+ * Set the security data for a kernel service.
+ */
+static int smack_kernel_act_as(struct cred *new, u32 secid)
+{
+ struct task_smack *new_tsp = smack_cred(new);
+
+ new_tsp->smk_task = smack_from_secid(secid);
+ return 0;
+}
+
+/**
+ * smack_kernel_create_files_as - Set the file creation label in a set of creds
+ * @new: points to the set of credentials to be modified
+ * @inode: points to the inode to use as a reference
+ *
+ * Set the file creation context in a set of credentials to the same
+ * as the objective context of the specified inode
+ */
+static int smack_kernel_create_files_as(struct cred *new,
+ struct inode *inode)
+{
+ struct inode_smack *isp = smack_inode(inode);
+ struct task_smack *tsp = smack_cred(new);
+
+ tsp->smk_forked = isp->smk_inode;
+ tsp->smk_task = tsp->smk_forked;
+ return 0;
+}
+
+/**
+ * smk_curacc_on_task - helper to log task related access
+ * @p: the task object
+ * @access: the access requested
+ * @caller: name of the calling function for audit
+ *
+ * Return 0 if access is permitted
+ */
+static int smk_curacc_on_task(struct task_struct *p, int access,
+ const char *caller)
+{
+ struct smk_audit_info ad;
+ struct smack_known *skp = smk_of_task_struct_obj(p);
+ int rc;
+
+ smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
+ smk_ad_setfield_u_tsk(&ad, p);
+ rc = smk_curacc(skp, access, &ad);
+ rc = smk_bu_task(p, access, rc);
+ return rc;
+}
+
+/**
+ * smack_task_setpgid - Smack check on setting pgid
+ * @p: the task object
+ * @pgid: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_getpgid - Smack access check for getpgid
+ * @p: the object task
+ *
+ * Returns 0 if current can read the object task, error code otherwise
+ */
+static int smack_task_getpgid(struct task_struct *p)
+{
+ return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_getsid - Smack access check for getsid
+ * @p: the object task
+ *
+ * Returns 0 if current can read the object task, error code otherwise
+ */
+static int smack_task_getsid(struct task_struct *p)
+{
+ return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_current_getsecid_subj - get the subjective secid of the current task
+ * @secid: where to put the result
+ *
+ * Sets the secid to contain a u32 version of the task's subjective smack label.
+ */
+static void smack_current_getsecid_subj(u32 *secid)
+{
+ struct smack_known *skp = smk_of_current();
+
+ *secid = skp->smk_secid;
+}
+
+/**
+ * smack_task_getsecid_obj - get the objective secid of the task
+ * @p: the task
+ * @secid: where to put the result
+ *
+ * Sets the secid to contain a u32 version of the task's objective smack label.
+ */
+static void smack_task_getsecid_obj(struct task_struct *p, u32 *secid)
+{
+ struct smack_known *skp = smk_of_task_struct_obj(p);
+
+ *secid = skp->smk_secid;
+}
+
+/**
+ * smack_task_setnice - Smack check on setting nice
+ * @p: the task object
+ * @nice: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setnice(struct task_struct *p, int nice)
+{
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_setioprio - Smack check on setting ioprio
+ * @p: the task object
+ * @ioprio: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_getioprio - Smack check on reading ioprio
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_getioprio(struct task_struct *p)
+{
+ return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_setscheduler - Smack check on setting scheduler
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_setscheduler(struct task_struct *p)
+{
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_getscheduler - Smack check on reading scheduler
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_getscheduler(struct task_struct *p)
+{
+ return smk_curacc_on_task(p, MAY_READ, __func__);
+}
+
+/**
+ * smack_task_movememory - Smack check on moving memory
+ * @p: the task object
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_movememory(struct task_struct *p)
+{
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
+}
+
+/**
+ * smack_task_kill - Smack check on signal delivery
+ * @p: the task object
+ * @info: unused
+ * @sig: unused
+ * @cred: identifies the cred to use in lieu of current's
+ *
+ * Return 0 if write access is permitted
+ *
+ */
+static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+{
+ struct smk_audit_info ad;
+ struct smack_known *skp;
+ struct smack_known *tkp = smk_of_task_struct_obj(p);
+ int rc;
+
+ if (!sig)
+ return 0; /* null signal; existence test */
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_setfield_u_tsk(&ad, p);
+ /*
+ * Sending a signal requires that the sender
+ * can write the receiver.
+ */
+ if (cred == NULL) {
+ rc = smk_curacc(tkp, MAY_DELIVER, &ad);
+ rc = smk_bu_task(p, MAY_DELIVER, rc);
+ return rc;
+ }
+ /*
+ * If the cred isn't NULL we're dealing with some USB IO
+ * specific behavior. This is not clean. For one thing
+ * we can't take privilege into account.
+ */
+ skp = smk_of_task(smack_cred(cred));
+ rc = smk_access(skp, tkp, MAY_DELIVER, &ad);
+ rc = smk_bu_note("USB signal", skp, tkp, MAY_DELIVER, rc);
+ return rc;
+}
+
+/**
+ * smack_task_to_inode - copy task smack into the inode blob
+ * @p: task to copy from
+ * @inode: inode to copy to
+ *
+ * Sets the smack pointer in the inode security blob
+ */
+static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+{
+ struct inode_smack *isp = smack_inode(inode);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
+
+ isp->smk_inode = skp;
+ isp->smk_flags |= SMK_INODE_INSTANT;
+}
+
+/*
+ * Socket hooks.
+ */
+
+/**
+ * smack_sk_alloc_security - Allocate a socket blob
+ * @sk: the socket
+ * @family: unused
+ * @gfp_flags: memory allocation flags
+ *
+ * Assign Smack pointers to current
+ *
+ * Returns 0 on success, -ENOMEM is there's no memory
+ */
+static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
+{
+ struct smack_known *skp = smk_of_current();
+ struct socket_smack *ssp;
+
+ ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
+ if (ssp == NULL)
+ return -ENOMEM;
+
+ /*
+ * Sockets created by kernel threads receive web label.
+ */
+ if (unlikely(current->flags & PF_KTHREAD)) {
+ ssp->smk_in = &smack_known_web;
+ ssp->smk_out = &smack_known_web;
+ } else {
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
+ }
+ ssp->smk_packet = NULL;
+
+ sk->sk_security = ssp;
+
+ return 0;
+}
+
+/**
+ * smack_sk_free_security - Free a socket blob
+ * @sk: the socket
+ *
+ * Clears the blob pointer
+ */
+static void smack_sk_free_security(struct sock *sk)
+{
+#ifdef SMACK_IPV6_PORT_LABELING
+ struct smk_port_label *spp;
+
+ if (sk->sk_family == PF_INET6) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_sock != sk)
+ continue;
+ spp->smk_can_reuse = 1;
+ break;
+ }
+ rcu_read_unlock();
+ }
+#endif
+ kfree(sk->sk_security);
+}
+
+/**
+ * smack_sk_clone_security - Copy security context
+ * @sk: the old socket
+ * @newsk: the new socket
+ *
+ * Copy the security context of the old socket pointer to the cloned
+ */
+static void smack_sk_clone_security(const struct sock *sk, struct sock *newsk)
+{
+ struct socket_smack *ssp_old = sk->sk_security;
+ struct socket_smack *ssp_new = newsk->sk_security;
+
+ *ssp_new = *ssp_old;
+}
+
+/**
+* smack_ipv4host_label - check host based restrictions
+* @sip: the object end
+*
+* looks for host based access restrictions
+*
+* This version will only be appropriate for really small sets of single label
+* hosts. The caller is responsible for ensuring that the RCU read lock is
+* taken before calling this function.
+*
+* Returns the label of the far end or NULL if it's not special.
+*/
+static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
+{
+ struct smk_net4addr *snp;
+ struct in_addr *siap = &sip->sin_addr;
+
+ if (siap->s_addr == 0)
+ return NULL;
+
+ list_for_each_entry_rcu(snp, &smk_net4addr_list, list)
+ /*
+ * we break after finding the first match because
+ * the list is sorted from longest to shortest mask
+ * so we have found the most specific match
+ */
+ if (snp->smk_host.s_addr ==
+ (siap->s_addr & snp->smk_mask.s_addr))
+ return snp->smk_label;
+
+ return NULL;
+}
+
+/*
+ * smk_ipv6_localhost - Check for local ipv6 host address
+ * @sip: the address
+ *
+ * Returns boolean true if this is the localhost address
+ */
+static bool smk_ipv6_localhost(struct sockaddr_in6 *sip)
+{
+ __be16 *be16p = (__be16 *)&sip->sin6_addr;
+ __be32 *be32p = (__be32 *)&sip->sin6_addr;
+
+ if (be32p[0] == 0 && be32p[1] == 0 && be32p[2] == 0 && be16p[6] == 0 &&
+ ntohs(be16p[7]) == 1)
+ return true;
+ return false;
+}
+
+/**
+* smack_ipv6host_label - check host based restrictions
+* @sip: the object end
+*
+* looks for host based access restrictions
+*
+* This version will only be appropriate for really small sets of single label
+* hosts. The caller is responsible for ensuring that the RCU read lock is
+* taken before calling this function.
+*
+* Returns the label of the far end or NULL if it's not special.
+*/
+static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
+{
+ struct smk_net6addr *snp;
+ struct in6_addr *sap = &sip->sin6_addr;
+ int i;
+ int found = 0;
+
+ /*
+ * It's local. Don't look for a host label.
+ */
+ if (smk_ipv6_localhost(sip))
+ return NULL;
+
+ list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
+ /*
+ * If the label is NULL the entry has
+ * been renounced. Ignore it.
+ */
+ if (snp->smk_label == NULL)
+ continue;
+ /*
+ * we break after finding the first match because
+ * the list is sorted from longest to shortest mask
+ * so we have found the most specific match
+ */
+ for (found = 1, i = 0; i < 8; i++) {
+ if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) !=
+ snp->smk_host.s6_addr16[i]) {
+ found = 0;
+ break;
+ }
+ }
+ if (found)
+ return snp->smk_label;
+ }
+
+ return NULL;
+}
+
+/**
+ * smack_netlbl_add - Set the secattr on a socket
+ * @sk: the socket
+ *
+ * Attach the outbound smack value (smk_out) to the socket.
+ *
+ * Returns 0 on success or an error code
+ */
+static int smack_netlbl_add(struct sock *sk)
+{
+ struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp = ssp->smk_out;
+ int rc;
+
+ local_bh_disable();
+ bh_lock_sock_nested(sk);
+
+ rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel);
+ switch (rc) {
+ case 0:
+ ssp->smk_state = SMK_NETLBL_LABELED;
+ break;
+ case -EDESTADDRREQ:
+ ssp->smk_state = SMK_NETLBL_REQSKB;
+ rc = 0;
+ break;
+ }
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
+
+ return rc;
+}
+
+/**
+ * smack_netlbl_delete - Remove the secattr from a socket
+ * @sk: the socket
+ *
+ * Remove the outbound smack value from a socket
+ */
+static void smack_netlbl_delete(struct sock *sk)
+{
+ struct socket_smack *ssp = sk->sk_security;
+
+ /*
+ * Take the label off the socket if one is set.
+ */
+ if (ssp->smk_state != SMK_NETLBL_LABELED)
+ return;
+
+ local_bh_disable();
+ bh_lock_sock_nested(sk);
+ netlbl_sock_delattr(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ ssp->smk_state = SMK_NETLBL_UNLABELED;
+}
+
+/**
+ * smk_ipv4_check - Perform IPv4 host access checks
+ * @sk: the socket
+ * @sap: the destination address
+ *
+ * Set the correct secattr for the given socket based on the destination
+ * address and perform any outbound access checks needed.
+ *
+ * Returns 0 on success or an error code.
+ *
+ */
+static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap)
+{
+ struct smack_known *skp;
+ int rc = 0;
+ struct smack_known *hkp;
+ struct socket_smack *ssp = sk->sk_security;
+ struct smk_audit_info ad;
+
+ rcu_read_lock();
+ hkp = smack_ipv4host_label(sap);
+ if (hkp != NULL) {
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sap->sin_family;
+ ad.a.u.net->dport = sap->sin_port;
+ ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
+#endif
+ skp = ssp->smk_out;
+ rc = smk_access(skp, hkp, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv4 host check", skp, hkp, MAY_WRITE, rc);
+ /*
+ * Clear the socket netlabel if it's set.
+ */
+ if (!rc)
+ smack_netlbl_delete(sk);
+ }
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * smk_ipv6_check - check Smack access
+ * @subject: subject Smack label
+ * @object: object Smack label
+ * @address: address
+ * @act: the action being taken
+ *
+ * Check an IPv6 access
+ */
+static int smk_ipv6_check(struct smack_known *subject,
+ struct smack_known *object,
+ struct sockaddr_in6 *address, int act)
+{
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+ struct smk_audit_info ad;
+ int rc;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = PF_INET6;
+ ad.a.u.net->dport = address->sin6_port;
+ if (act == SMK_RECEIVING)
+ ad.a.u.net->v6info.saddr = address->sin6_addr;
+ else
+ ad.a.u.net->v6info.daddr = address->sin6_addr;
+#endif
+ rc = smk_access(subject, object, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
+ return rc;
+}
+
+#ifdef SMACK_IPV6_PORT_LABELING
+/**
+ * smk_ipv6_port_label - Smack port access table management
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_in6 *addr6;
+ struct socket_smack *ssp = sock->sk->sk_security;
+ struct smk_port_label *spp;
+ unsigned short port = 0;
+
+ if (address == NULL) {
+ /*
+ * This operation is changing the Smack information
+ * on the bound socket. Take the changes to the port
+ * as well.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) {
+ if (sk != spp->smk_sock)
+ continue;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ rcu_read_unlock();
+ return;
+ }
+ /*
+ * A NULL address is only used for updating existing
+ * bound entries. If there isn't one, it's OK.
+ */
+ rcu_read_unlock();
+ return;
+ }
+
+ addr6 = (struct sockaddr_in6 *)address;
+ port = ntohs(addr6->sin6_port);
+ /*
+ * This is a special case that is safely ignored.
+ */
+ if (port == 0)
+ return;
+
+ /*
+ * Look for an existing port list entry.
+ * This is an indication that a port is getting reused.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_port != port || spp->smk_sock_type != sock->type)
+ continue;
+ if (spp->smk_can_reuse != 1) {
+ rcu_read_unlock();
+ return;
+ }
+ spp->smk_port = port;
+ spp->smk_sock = sk;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ spp->smk_can_reuse = 0;
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+ /*
+ * A new port entry is required.
+ */
+ spp = kzalloc(sizeof(*spp), GFP_KERNEL);
+ if (spp == NULL)
+ return;
+
+ spp->smk_port = port;
+ spp->smk_sock = sk;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ spp->smk_sock_type = sock->type;
+ spp->smk_can_reuse = 0;
+
+ mutex_lock(&smack_ipv6_lock);
+ list_add_rcu(&spp->list, &smk_ipv6_port_list);
+ mutex_unlock(&smack_ipv6_lock);
+ return;
+}
+
+/**
+ * smk_ipv6_port_check - check Smack port access
+ * @sk: socket
+ * @address: address
+ * @act: the action being taken
+ *
+ * Create or update the port list entry
+ */
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
+ int act)
+{
+ struct smk_port_label *spp;
+ struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp = NULL;
+ unsigned short port;
+ struct smack_known *object;
+
+ if (act == SMK_RECEIVING) {
+ skp = smack_ipv6host_label(address);
+ object = ssp->smk_in;
+ } else {
+ skp = ssp->smk_out;
+ object = smack_ipv6host_label(address);
+ }
+
+ /*
+ * The other end is a single label host.
+ */
+ if (skp != NULL && object != NULL)
+ return smk_ipv6_check(skp, object, address, act);
+ if (skp == NULL)
+ skp = smack_net_ambient;
+ if (object == NULL)
+ object = smack_net_ambient;
+
+ /*
+ * It's remote, so port lookup does no good.
+ */
+ if (!smk_ipv6_localhost(address))
+ return smk_ipv6_check(skp, object, address, act);
+
+ /*
+ * It's local so the send check has to have passed.
+ */
+ if (act == SMK_RECEIVING)
+ return 0;
+
+ port = ntohs(address->sin6_port);
+ rcu_read_lock();
+ list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_port != port || spp->smk_sock_type != sk->sk_type)
+ continue;
+ object = spp->smk_in;
+ if (act == SMK_CONNECTING)
+ ssp->smk_packet = spp->smk_out;
+ break;
+ }
+ rcu_read_unlock();
+
+ return smk_ipv6_check(skp, object, address, act);
+}
+#endif
+
+/**
+ * smack_inode_setsecurity - set smack xattrs
+ * @inode: the object
+ * @name: attribute name
+ * @value: attribute value
+ * @size: size of the attribute
+ * @flags: unused
+ *
+ * Sets the named attribute in the appropriate blob
+ *
+ * Returns 0 on success, or an error code
+ */
+static int smack_inode_setsecurity(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct smack_known *skp;
+ struct inode_smack *nsp = smack_inode(inode);
+ struct socket_smack *ssp;
+ struct socket *sock;
+ int rc = 0;
+
+ if (value == NULL || size > SMK_LONGLABEL || size == 0)
+ return -EINVAL;
+
+ skp = smk_import_entry(value, size);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ nsp->smk_inode = skp;
+ nsp->smk_flags |= SMK_INODE_INSTANT;
+ return 0;
+ }
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ if (inode->i_sb->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
+
+ sock = SOCKET_I(inode);
+ if (sock == NULL || sock->sk == NULL)
+ return -EOPNOTSUPP;
+
+ ssp = sock->sk->sk_security;
+
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ ssp->smk_in = skp;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
+ ssp->smk_out = skp;
+ if (sock->sk->sk_family == PF_INET) {
+ rc = smack_netlbl_add(sock->sk);
+ if (rc != 0)
+ printk(KERN_WARNING
+ "Smack: \"%s\" netlbl error %d.\n",
+ __func__, -rc);
+ }
+ } else
+ return -EOPNOTSUPP;
+
+#ifdef SMACK_IPV6_PORT_LABELING
+ if (sock->sk->sk_family == PF_INET6)
+ smk_ipv6_port_label(sock, NULL);
+#endif
+
+ return 0;
+}
+
+/**
+ * smack_socket_post_create - finish socket setup
+ * @sock: the socket
+ * @family: protocol family
+ * @type: unused
+ * @protocol: unused
+ * @kern: unused
+ *
+ * Sets the netlabel information on the socket
+ *
+ * Returns 0 on success, and error code otherwise
+ */
+static int smack_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ struct socket_smack *ssp;
+
+ if (sock->sk == NULL)
+ return 0;
+
+ /*
+ * Sockets created by kernel threads receive web label.
+ */
+ if (unlikely(current->flags & PF_KTHREAD)) {
+ ssp = sock->sk->sk_security;
+ ssp->smk_in = &smack_known_web;
+ ssp->smk_out = &smack_known_web;
+ }
+
+ if (family != PF_INET)
+ return 0;
+ /*
+ * Set the outbound netlbl.
+ */
+ return smack_netlbl_add(sock->sk);
+}
+
+/**
+ * smack_socket_socketpair - create socket pair
+ * @socka: one socket
+ * @sockb: another socket
+ *
+ * Cross reference the peer labels for SO_PEERSEC
+ *
+ * Returns 0
+ */
+static int smack_socket_socketpair(struct socket *socka,
+ struct socket *sockb)
+{
+ struct socket_smack *asp = socka->sk->sk_security;
+ struct socket_smack *bsp = sockb->sk->sk_security;
+
+ asp->smk_packet = bsp->smk_out;
+ bsp->smk_packet = asp->smk_out;
+
+ return 0;
+}
+
+#ifdef SMACK_IPV6_PORT_LABELING
+/**
+ * smack_socket_bind - record port binding information.
+ * @sock: the socket
+ * @address: the port address
+ * @addrlen: size of the address
+ *
+ * Records the label bound to a port.
+ *
+ * Returns 0 on success, and error code otherwise
+ */
+static int smack_socket_bind(struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ if (sock->sk != NULL && sock->sk->sk_family == PF_INET6) {
+ if (addrlen < SIN6_LEN_RFC2133 ||
+ address->sa_family != AF_INET6)
+ return -EINVAL;
+ smk_ipv6_port_label(sock, address);
+ }
+ return 0;
+}
+#endif /* SMACK_IPV6_PORT_LABELING */
+
+/**
+ * smack_socket_connect - connect access check
+ * @sock: the socket
+ * @sap: the other end
+ * @addrlen: size of sap
+ *
+ * Verifies that a connection may be possible
+ *
+ * Returns 0 on success, and error code otherwise
+ */
+static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
+ int addrlen)
+{
+ int rc = 0;
+
+ if (sock->sk == NULL)
+ return 0;
+ if (sock->sk->sk_family != PF_INET &&
+ (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6))
+ return 0;
+ if (addrlen < offsetofend(struct sockaddr, sa_family))
+ return 0;
+ if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+ struct smack_known *rsp = NULL;
+
+ if (addrlen < SIN6_LEN_RFC2133)
+ return 0;
+ if (__is_defined(SMACK_IPV6_SECMARK_LABELING))
+ rsp = smack_ipv6host_label(sip);
+ if (rsp != NULL) {
+ struct socket_smack *ssp = sock->sk->sk_security;
+
+ rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
+ SMK_CONNECTING);
+ }
+#ifdef SMACK_IPV6_PORT_LABELING
+ rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
+#endif
+
+ return rc;
+ }
+ if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
+ return 0;
+ rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap);
+ return rc;
+}
+
+/**
+ * smack_flags_to_may - convert S_ to MAY_ values
+ * @flags: the S_ value
+ *
+ * Returns the equivalent MAY_ value
+ */
+static int smack_flags_to_may(int flags)
+{
+ int may = 0;
+
+ if (flags & S_IRUGO)
+ may |= MAY_READ;
+ if (flags & S_IWUGO)
+ may |= MAY_WRITE;
+ if (flags & S_IXUGO)
+ may |= MAY_EXEC;
+
+ return may;
+}
+
+/**
+ * smack_msg_msg_alloc_security - Set the security blob for msg_msg
+ * @msg: the object
+ *
+ * Returns 0
+ */
+static int smack_msg_msg_alloc_security(struct msg_msg *msg)
+{
+ struct smack_known **blob = smack_msg_msg(msg);
+
+ *blob = smk_of_current();
+ return 0;
+}
+
+/**
+ * smack_of_ipc - the smack pointer for the ipc
+ * @isp: the object
+ *
+ * Returns a pointer to the smack value
+ */
+static struct smack_known *smack_of_ipc(struct kern_ipc_perm *isp)
+{
+ struct smack_known **blob = smack_ipc(isp);
+
+ return *blob;
+}
+
+/**
+ * smack_ipc_alloc_security - Set the security blob for ipc
+ * @isp: the object
+ *
+ * Returns 0
+ */
+static int smack_ipc_alloc_security(struct kern_ipc_perm *isp)
+{
+ struct smack_known **blob = smack_ipc(isp);
+
+ *blob = smk_of_current();
+ return 0;
+}
+
+/**
+ * smk_curacc_shm : check if current has access on shm
+ * @isp : the object
+ * @access : access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smk_curacc_shm(struct kern_ipc_perm *isp, int access)
+{
+ struct smack_known *ssp = smack_of_ipc(isp);
+ struct smk_audit_info ad;
+ int rc;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+ ad.a.u.ipc_id = isp->id;
+#endif
+ rc = smk_curacc(ssp, access, &ad);
+ rc = smk_bu_current("shm", ssp, access, rc);
+ return rc;
+}
+
+/**
+ * smack_shm_associate - Smack access check for shm
+ * @isp: the object
+ * @shmflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_associate(struct kern_ipc_perm *isp, int shmflg)
+{
+ int may;
+
+ may = smack_flags_to_may(shmflg);
+ return smk_curacc_shm(isp, may);
+}
+
+/**
+ * smack_shm_shmctl - Smack access check for shm
+ * @isp: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_shmctl(struct kern_ipc_perm *isp, int cmd)
+{
+ int may;
+
+ switch (cmd) {
+ case IPC_STAT:
+ case SHM_STAT:
+ case SHM_STAT_ANY:
+ may = MAY_READ;
+ break;
+ case IPC_SET:
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ case IPC_RMID:
+ may = MAY_READWRITE;
+ break;
+ case IPC_INFO:
+ case SHM_INFO:
+ /*
+ * System level information.
+ */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return smk_curacc_shm(isp, may);
+}
+
+/**
+ * smack_shm_shmat - Smack access for shmat
+ * @isp: the object
+ * @shmaddr: unused
+ * @shmflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_shmat(struct kern_ipc_perm *isp, char __user *shmaddr,
+ int shmflg)
+{
+ int may;
+
+ may = smack_flags_to_may(shmflg);
+ return smk_curacc_shm(isp, may);
+}
+
+/**
+ * smk_curacc_sem : check if current has access on sem
+ * @isp : the object
+ * @access : access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smk_curacc_sem(struct kern_ipc_perm *isp, int access)
+{
+ struct smack_known *ssp = smack_of_ipc(isp);
+ struct smk_audit_info ad;
+ int rc;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+ ad.a.u.ipc_id = isp->id;
+#endif
+ rc = smk_curacc(ssp, access, &ad);
+ rc = smk_bu_current("sem", ssp, access, rc);
+ return rc;
+}
+
+/**
+ * smack_sem_associate - Smack access check for sem
+ * @isp: the object
+ * @semflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_sem_associate(struct kern_ipc_perm *isp, int semflg)
+{
+ int may;
+
+ may = smack_flags_to_may(semflg);
+ return smk_curacc_sem(isp, may);
+}
+
+/**
+ * smack_sem_semctl - Smack access check for sem
+ * @isp: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_sem_semctl(struct kern_ipc_perm *isp, int cmd)
+{
+ int may;
+
+ switch (cmd) {
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ case GETVAL:
+ case GETALL:
+ case IPC_STAT:
+ case SEM_STAT:
+ case SEM_STAT_ANY:
+ may = MAY_READ;
+ break;
+ case SETVAL:
+ case SETALL:
+ case IPC_RMID:
+ case IPC_SET:
+ may = MAY_READWRITE;
+ break;
+ case IPC_INFO:
+ case SEM_INFO:
+ /*
+ * System level information
+ */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return smk_curacc_sem(isp, may);
+}
+
+/**
+ * smack_sem_semop - Smack checks of semaphore operations
+ * @isp: the object
+ * @sops: unused
+ * @nsops: unused
+ * @alter: unused
+ *
+ * Treated as read and write in all cases.
+ *
+ * Returns 0 if access is allowed, error code otherwise
+ */
+static int smack_sem_semop(struct kern_ipc_perm *isp, struct sembuf *sops,
+ unsigned nsops, int alter)
+{
+ return smk_curacc_sem(isp, MAY_READWRITE);
+}
+
+/**
+ * smk_curacc_msq : helper to check if current has access on msq
+ * @isp : the msq
+ * @access : access requested
+ *
+ * return 0 if current has access, error otherwise
+ */
+static int smk_curacc_msq(struct kern_ipc_perm *isp, int access)
+{
+ struct smack_known *msp = smack_of_ipc(isp);
+ struct smk_audit_info ad;
+ int rc;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+ ad.a.u.ipc_id = isp->id;
+#endif
+ rc = smk_curacc(msp, access, &ad);
+ rc = smk_bu_current("msq", msp, access, rc);
+ return rc;
+}
+
+/**
+ * smack_msg_queue_associate - Smack access check for msg_queue
+ * @isp: the object
+ * @msqflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_associate(struct kern_ipc_perm *isp, int msqflg)
+{
+ int may;
+
+ may = smack_flags_to_may(msqflg);
+ return smk_curacc_msq(isp, may);
+}
+
+/**
+ * smack_msg_queue_msgctl - Smack access check for msg_queue
+ * @isp: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_msgctl(struct kern_ipc_perm *isp, int cmd)
+{
+ int may;
+
+ switch (cmd) {
+ case IPC_STAT:
+ case MSG_STAT:
+ case MSG_STAT_ANY:
+ may = MAY_READ;
+ break;
+ case IPC_SET:
+ case IPC_RMID:
+ may = MAY_READWRITE;
+ break;
+ case IPC_INFO:
+ case MSG_INFO:
+ /*
+ * System level information
+ */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return smk_curacc_msq(isp, may);
+}
+
+/**
+ * smack_msg_queue_msgsnd - Smack access check for msg_queue
+ * @isp: the object
+ * @msg: unused
+ * @msqflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_msgsnd(struct kern_ipc_perm *isp, struct msg_msg *msg,
+ int msqflg)
+{
+ int may;
+
+ may = smack_flags_to_may(msqflg);
+ return smk_curacc_msq(isp, may);
+}
+
+/**
+ * smack_msg_queue_msgrcv - Smack access check for msg_queue
+ * @isp: the object
+ * @msg: unused
+ * @target: unused
+ * @type: unused
+ * @mode: unused
+ *
+ * Returns 0 if current has read and write access, error code otherwise
+ */
+static int smack_msg_queue_msgrcv(struct kern_ipc_perm *isp,
+ struct msg_msg *msg,
+ struct task_struct *target, long type,
+ int mode)
+{
+ return smk_curacc_msq(isp, MAY_READWRITE);
+}
+
+/**
+ * smack_ipc_permission - Smack access for ipc_permission()
+ * @ipp: the object permissions
+ * @flag: access requested
+ *
+ * Returns 0 if current has read and write access, error code otherwise
+ */
+static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
+{
+ struct smack_known **blob = smack_ipc(ipp);
+ struct smack_known *iskp = *blob;
+ int may = smack_flags_to_may(flag);
+ struct smk_audit_info ad;
+ int rc;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
+ ad.a.u.ipc_id = ipp->id;
+#endif
+ rc = smk_curacc(iskp, may, &ad);
+ rc = smk_bu_current("svipc", iskp, may, rc);
+ return rc;
+}
+
+/**
+ * smack_ipc_getsecid - Extract smack security id
+ * @ipp: the object permissions
+ * @secid: where result will be saved
+ */
+static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
+{
+ struct smack_known **blob = smack_ipc(ipp);
+ struct smack_known *iskp = *blob;
+
+ *secid = iskp->smk_secid;
+}
+
+/**
+ * smack_d_instantiate - Make sure the blob is correct on an inode
+ * @opt_dentry: dentry where inode will be attached
+ * @inode: the object
+ *
+ * Set the inode's security blob if it hasn't been done already.
+ */
+static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
+{
+ struct super_block *sbp;
+ struct superblock_smack *sbsp;
+ struct inode_smack *isp;
+ struct smack_known *skp;
+ struct smack_known *ckp = smk_of_current();
+ struct smack_known *final;
+ char trattr[TRANS_TRUE_SIZE];
+ int transflag = 0;
+ int rc;
+ struct dentry *dp;
+
+ if (inode == NULL)
+ return;
+
+ isp = smack_inode(inode);
+
+ /*
+ * If the inode is already instantiated
+ * take the quick way out
+ */
+ if (isp->smk_flags & SMK_INODE_INSTANT)
+ return;
+
+ sbp = inode->i_sb;
+ sbsp = smack_superblock(sbp);
+ /*
+ * We're going to use the superblock default label
+ * if there's no label on the file.
+ */
+ final = sbsp->smk_default;
+
+ /*
+ * If this is the root inode the superblock
+ * may be in the process of initialization.
+ * If that is the case use the root value out
+ * of the superblock.
+ */
+ if (opt_dentry->d_parent == opt_dentry) {
+ switch (sbp->s_magic) {
+ case CGROUP_SUPER_MAGIC:
+ case CGROUP2_SUPER_MAGIC:
+ /*
+ * The cgroup filesystem is never mounted,
+ * so there's no opportunity to set the mount
+ * options.
+ */
+ sbsp->smk_root = &smack_known_star;
+ sbsp->smk_default = &smack_known_star;
+ isp->smk_inode = sbsp->smk_root;
+ break;
+ case TMPFS_MAGIC:
+ /*
+ * What about shmem/tmpfs anonymous files with dentry
+ * obtained from d_alloc_pseudo()?
+ */
+ isp->smk_inode = smk_of_current();
+ break;
+ case PIPEFS_MAGIC:
+ isp->smk_inode = smk_of_current();
+ break;
+ case SOCKFS_MAGIC:
+ /*
+ * Socket access is controlled by the socket
+ * structures associated with the task involved.
+ */
+ isp->smk_inode = &smack_known_star;
+ break;
+ default:
+ isp->smk_inode = sbsp->smk_root;
+ break;
+ }
+ isp->smk_flags |= SMK_INODE_INSTANT;
+ return;
+ }
+
+ /*
+ * This is pretty hackish.
+ * Casey says that we shouldn't have to do
+ * file system specific code, but it does help
+ * with keeping it simple.
+ */
+ switch (sbp->s_magic) {
+ case SMACK_MAGIC:
+ case CGROUP_SUPER_MAGIC:
+ case CGROUP2_SUPER_MAGIC:
+ /*
+ * Casey says that it's a little embarrassing
+ * that the smack file system doesn't do
+ * extended attributes.
+ *
+ * Cgroupfs is special
+ */
+ final = &smack_known_star;
+ break;
+ case DEVPTS_SUPER_MAGIC:
+ /*
+ * devpts seems content with the label of the task.
+ * Programs that change smack have to treat the
+ * pty with respect.
+ */
+ final = ckp;
+ break;
+ case PROC_SUPER_MAGIC:
+ /*
+ * Casey says procfs appears not to care.
+ * The superblock default suffices.
+ */
+ break;
+ case TMPFS_MAGIC:
+ /*
+ * Device labels should come from the filesystem,
+ * but watch out, because they're volitile,
+ * getting recreated on every reboot.
+ */
+ final = &smack_known_star;
+ /*
+ * If a smack value has been set we want to use it,
+ * but since tmpfs isn't giving us the opportunity
+ * to set mount options simulate setting the
+ * superblock default.
+ */
+ fallthrough;
+ default:
+ /*
+ * This isn't an understood special case.
+ * Get the value from the xattr.
+ */
+
+ /*
+ * UNIX domain sockets use lower level socket data.
+ */
+ if (S_ISSOCK(inode->i_mode)) {
+ final = &smack_known_star;
+ break;
+ }
+ /*
+ * No xattr support means, alas, no SMACK label.
+ * Use the aforeapplied default.
+ * It would be curious if the label of the task
+ * does not match that assigned.
+ */
+ if (!(inode->i_opflags & IOP_XATTR))
+ break;
+ /*
+ * Get the dentry for xattr.
+ */
+ dp = dget(opt_dentry);
+ skp = smk_fetch(XATTR_NAME_SMACK, inode, dp);
+ if (!IS_ERR_OR_NULL(skp))
+ final = skp;
+
+ /*
+ * Transmuting directory
+ */
+ if (S_ISDIR(inode->i_mode)) {
+ /*
+ * If this is a new directory and the label was
+ * transmuted when the inode was initialized
+ * set the transmute attribute on the directory
+ * and mark the inode.
+ *
+ * If there is a transmute attribute on the
+ * directory mark the inode.
+ */
+ if (isp->smk_flags & SMK_INODE_CHANGED) {
+ isp->smk_flags &= ~SMK_INODE_CHANGED;
+ rc = __vfs_setxattr(&init_user_ns, dp, inode,
+ XATTR_NAME_SMACKTRANSMUTE,
+ TRANS_TRUE, TRANS_TRUE_SIZE,
+ 0);
+ } else {
+ rc = __vfs_getxattr(dp, inode,
+ XATTR_NAME_SMACKTRANSMUTE, trattr,
+ TRANS_TRUE_SIZE);
+ if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
+ TRANS_TRUE_SIZE) != 0)
+ rc = -EINVAL;
+ }
+ if (rc >= 0)
+ transflag = SMK_INODE_TRANSMUTE;
+ }
+ /*
+ * Don't let the exec or mmap label be "*" or "@".
+ */
+ skp = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
+ if (IS_ERR(skp) || skp == &smack_known_star ||
+ skp == &smack_known_web)
+ skp = NULL;
+ isp->smk_task = skp;
+
+ skp = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
+ if (IS_ERR(skp) || skp == &smack_known_star ||
+ skp == &smack_known_web)
+ skp = NULL;
+ isp->smk_mmap = skp;
+
+ dput(dp);
+ break;
+ }
+
+ if (final == NULL)
+ isp->smk_inode = ckp;
+ else
+ isp->smk_inode = final;
+
+ isp->smk_flags |= (SMK_INODE_INSTANT | transflag);
+
+ return;
+}
+
+/**
+ * smack_getprocattr - Smack process attribute access
+ * @p: the object task
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: where to put the result
+ *
+ * Places a copy of the task Smack into value
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_getprocattr(struct task_struct *p, const char *name, char **value)
+{
+ struct smack_known *skp = smk_of_task_struct_obj(p);
+ char *cp;
+ int slen;
+
+ if (strcmp(name, "current") != 0)
+ return -EINVAL;
+
+ cp = kstrdup(skp->smk_known, GFP_KERNEL);
+ if (cp == NULL)
+ return -ENOMEM;
+
+ slen = strlen(cp);
+ *value = cp;
+ return slen;
+}
+
+/**
+ * smack_setprocattr - Smack process attribute setting
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: the value to set
+ * @size: the size of the value
+ *
+ * Sets the Smack value of the task. Only setting self
+ * is permitted and only with privilege
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_setprocattr(const char *name, void *value, size_t size)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct cred *new;
+ struct smack_known *skp;
+ struct smack_known_list_elem *sklep;
+ int rc;
+
+ if (!smack_privileged(CAP_MAC_ADMIN) && list_empty(&tsp->smk_relabel))
+ return -EPERM;
+
+ if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
+ return -EINVAL;
+
+ if (strcmp(name, "current") != 0)
+ return -EINVAL;
+
+ skp = smk_import_entry(value, size);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+
+ /*
+ * No process is ever allowed the web ("@") label
+ * and the star ("*") label.
+ */
+ if (skp == &smack_known_web || skp == &smack_known_star)
+ return -EINVAL;
+
+ if (!smack_privileged(CAP_MAC_ADMIN)) {
+ rc = -EPERM;
+ list_for_each_entry(sklep, &tsp->smk_relabel, list)
+ if (sklep->smk_label == skp) {
+ rc = 0;
+ break;
+ }
+ if (rc)
+ return rc;
+ }
+
+ new = prepare_creds();
+ if (new == NULL)
+ return -ENOMEM;
+
+ tsp = smack_cred(new);
+ tsp->smk_task = skp;
+ /*
+ * process can change its label only once
+ */
+ smk_destroy_label_list(&tsp->smk_relabel);
+
+ commit_creds(new);
+ return size;
+}
+
+/**
+ * smack_unix_stream_connect - Smack access on UDS
+ * @sock: one sock
+ * @other: the other sock
+ * @newsk: unused
+ *
+ * Return 0 if a subject with the smack of sock could access
+ * an object with the smack of other, otherwise an error code
+ */
+static int smack_unix_stream_connect(struct sock *sock,
+ struct sock *other, struct sock *newsk)
+{
+ struct smack_known *skp;
+ struct smack_known *okp;
+ struct socket_smack *ssp = sock->sk_security;
+ struct socket_smack *osp = other->sk_security;
+ struct socket_smack *nsp = newsk->sk_security;
+ struct smk_audit_info ad;
+ int rc = 0;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+
+ if (!smack_privileged(CAP_MAC_OVERRIDE)) {
+ skp = ssp->smk_out;
+ okp = osp->smk_in;
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ smk_ad_setfield_u_net_sk(&ad, other);
+#endif
+ rc = smk_access(skp, okp, MAY_WRITE, &ad);
+ rc = smk_bu_note("UDS connect", skp, okp, MAY_WRITE, rc);
+ if (rc == 0) {
+ okp = osp->smk_out;
+ skp = ssp->smk_in;
+ rc = smk_access(okp, skp, MAY_WRITE, &ad);
+ rc = smk_bu_note("UDS connect", okp, skp,
+ MAY_WRITE, rc);
+ }
+ }
+
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
+ if (rc == 0) {
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
+ }
+
+ return rc;
+}
+
+/**
+ * smack_unix_may_send - Smack access on UDS
+ * @sock: one socket
+ * @other: the other socket
+ *
+ * Return 0 if a subject with the smack of sock could access
+ * an object with the smack of other, otherwise an error code
+ */
+static int smack_unix_may_send(struct socket *sock, struct socket *other)
+{
+ struct socket_smack *ssp = sock->sk->sk_security;
+ struct socket_smack *osp = other->sk->sk_security;
+ struct smk_audit_info ad;
+ int rc;
+
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ smk_ad_setfield_u_net_sk(&ad, other->sk);
+#endif
+
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
+
+ rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("UDS send", ssp->smk_out, osp->smk_in, MAY_WRITE, rc);
+ return rc;
+}
+
+/**
+ * smack_socket_sendmsg - Smack check based on destination host
+ * @sock: the socket
+ * @msg: the message
+ * @size: the size of the message
+ *
+ * Return 0 if the current subject can write to the destination host.
+ * For IPv4 this is only a question if the destination is a single label host.
+ * For IPv6 this is a check against the label of the port.
+ */
+static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ struct socket_smack *ssp = sock->sk->sk_security;
+ struct smack_known *rsp;
+#endif
+ int rc = 0;
+
+ /*
+ * Perfectly reasonable for this to be NULL
+ */
+ if (sip == NULL)
+ return 0;
+
+ switch (sock->sk->sk_family) {
+ case AF_INET:
+ if (msg->msg_namelen < sizeof(struct sockaddr_in) ||
+ sip->sin_family != AF_INET)
+ return -EINVAL;
+ rc = smk_ipv4_check(sock->sk, sip);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (msg->msg_namelen < SIN6_LEN_RFC2133 ||
+ sap->sin6_family != AF_INET6)
+ return -EINVAL;
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ rsp = smack_ipv6host_label(sap);
+ if (rsp != NULL)
+ rc = smk_ipv6_check(ssp->smk_out, rsp, sap,
+ SMK_CONNECTING);
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+ rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
+#endif
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+ break;
+ }
+ return rc;
+}
+
+/**
+ * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
+ * @sap: netlabel secattr
+ * @ssp: socket security information
+ *
+ * Returns a pointer to a Smack label entry found on the label list.
+ */
+static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+ struct socket_smack *ssp)
+{
+ struct smack_known *skp;
+ int found = 0;
+ int acat;
+ int kcat;
+
+ /*
+ * Netlabel found it in the cache.
+ */
+ if ((sap->flags & NETLBL_SECATTR_CACHE) != 0)
+ return (struct smack_known *)sap->cache->data;
+
+ if ((sap->flags & NETLBL_SECATTR_SECID) != 0)
+ /*
+ * Looks like a fallback, which gives us a secid.
+ */
+ return smack_from_secid(sap->attr.secid);
+
+ if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) {
+ /*
+ * Looks like a CIPSO packet.
+ * If there are flags but no level netlabel isn't
+ * behaving the way we expect it to.
+ *
+ * Look it up in the label table
+ * Without guidance regarding the smack value
+ * for the packet fall back on the network
+ * ambient value.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(skp, &smack_known_list, list) {
+ if (sap->attr.mls.lvl != skp->smk_netlabel.attr.mls.lvl)
+ continue;
+ /*
+ * Compare the catsets. Use the netlbl APIs.
+ */
+ if ((sap->flags & NETLBL_SECATTR_MLS_CAT) == 0) {
+ if ((skp->smk_netlabel.flags &
+ NETLBL_SECATTR_MLS_CAT) == 0)
+ found = 1;
+ break;
+ }
+ for (acat = -1, kcat = -1; acat == kcat; ) {
+ acat = netlbl_catmap_walk(sap->attr.mls.cat,
+ acat + 1);
+ kcat = netlbl_catmap_walk(
+ skp->smk_netlabel.attr.mls.cat,
+ kcat + 1);
+ if (acat < 0 || kcat < 0)
+ break;
+ }
+ if (acat == kcat) {
+ found = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (found)
+ return skp;
+
+ if (ssp != NULL && ssp->smk_in == &smack_known_star)
+ return &smack_known_web;
+ return &smack_known_star;
+ }
+ /*
+ * Without guidance regarding the smack value
+ * for the packet fall back on the network
+ * ambient value.
+ */
+ return smack_net_ambient;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
+{
+ u8 nexthdr;
+ int offset;
+ int proto = -EINVAL;
+ struct ipv6hdr _ipv6h;
+ struct ipv6hdr *ip6;
+ __be16 frag_off;
+ struct tcphdr _tcph, *th;
+ struct udphdr _udph, *uh;
+ struct dccp_hdr _dccph, *dh;
+
+ sip->sin6_port = 0;
+
+ offset = skb_network_offset(skb);
+ ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+ if (ip6 == NULL)
+ return -EINVAL;
+ sip->sin6_addr = ip6->saddr;
+
+ nexthdr = ip6->nexthdr;
+ offset += sizeof(_ipv6h);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+ if (offset < 0)
+ return -EINVAL;
+
+ proto = nexthdr;
+ switch (proto) {
+ case IPPROTO_TCP:
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th != NULL)
+ sip->sin6_port = th->source;
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh != NULL)
+ sip->sin6_port = uh->source;
+ break;
+ case IPPROTO_DCCP:
+ dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+ if (dh != NULL)
+ sip->sin6_port = dh->dccph_sport;
+ break;
+ }
+ return proto;
+}
+#endif /* CONFIG_IPV6 */
+
+/**
+ * smack_from_skb - Smack data from the secmark in an skb
+ * @skb: packet
+ *
+ * Returns smack_known of the secmark or NULL if that won't work.
+ */
+#ifdef CONFIG_NETWORK_SECMARK
+static struct smack_known *smack_from_skb(struct sk_buff *skb)
+{
+ if (skb == NULL || skb->secmark == 0)
+ return NULL;
+
+ return smack_from_secid(skb->secmark);
+}
+#else
+static inline struct smack_known *smack_from_skb(struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
+/**
+ * smack_from_netlbl - Smack data from the IP options in an skb
+ * @sk: socket data came in on
+ * @family: address family
+ * @skb: packet
+ *
+ * Find the Smack label in the IP options. If it hasn't been
+ * added to the netlabel cache, add it here.
+ *
+ * Returns smack_known of the IP options or NULL if that won't work.
+ */
+static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family,
+ struct sk_buff *skb)
+{
+ struct netlbl_lsm_secattr secattr;
+ struct socket_smack *ssp = NULL;
+ struct smack_known *skp = NULL;
+
+ netlbl_secattr_init(&secattr);
+
+ if (sk)
+ ssp = sk->sk_security;
+
+ if (netlbl_skbuff_getattr(skb, family, &secattr) == 0) {
+ skp = smack_from_secattr(&secattr, ssp);
+ if (secattr.flags & NETLBL_SECATTR_CACHEABLE)
+ netlbl_cache_add(skb, family, &skp->smk_netlabel);
+ }
+
+ netlbl_secattr_destroy(&secattr);
+
+ return skp;
+}
+
+/**
+ * smack_socket_sock_rcv_skb - Smack packet delivery access check
+ * @sk: socket
+ * @skb: packet
+ *
+ * Returns 0 if the packet should be delivered, an error code otherwise
+ */
+static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp = NULL;
+ int rc = 0;
+ struct smk_audit_info ad;
+ u16 family = sk->sk_family;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 sadd;
+ int proto;
+
+ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+#endif /* CONFIG_IPV6 */
+
+ switch (family) {
+ case PF_INET:
+ /*
+ * If there is a secmark use it rather than the CIPSO label.
+ * If there is no secmark fall back to CIPSO.
+ * The secmark is assumed to reflect policy better.
+ */
+ skp = smack_from_skb(skb);
+ if (skp == NULL) {
+ skp = smack_from_netlbl(sk, family, skb);
+ if (skp == NULL)
+ skp = smack_net_ambient;
+ }
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+ ad.a.u.net->netif = skb->skb_iif;
+ ipv4_skb_to_auditdata(skb, &ad.a, NULL);
+#endif
+ /*
+ * Receiving a packet requires that the other end
+ * be able to write here. Read access is not required.
+ * This is the simplist possible security model
+ * for networking.
+ */
+ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in,
+ MAY_WRITE, rc);
+ if (rc != 0)
+ netlbl_skbuff_err(skb, family, rc, 0);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case PF_INET6:
+ proto = smk_skb_to_addr_ipv6(skb, &sadd);
+ if (proto != IPPROTO_UDP && proto != IPPROTO_UDPLITE &&
+ proto != IPPROTO_TCP && proto != IPPROTO_DCCP)
+ break;
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ skp = smack_from_skb(skb);
+ if (skp == NULL) {
+ if (smk_ipv6_localhost(&sadd))
+ break;
+ skp = smack_ipv6host_label(&sadd);
+ if (skp == NULL)
+ skp = smack_net_ambient;
+ }
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+ ad.a.u.net->netif = skb->skb_iif;
+ ipv6_skb_to_auditdata(skb, &ad.a, NULL);
+#endif /* CONFIG_AUDIT */
+ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv6 delivery", skp, ssp->smk_in,
+ MAY_WRITE, rc);
+#endif /* SMACK_IPV6_SECMARK_LABELING */
+#ifdef SMACK_IPV6_PORT_LABELING
+ rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
+#endif /* SMACK_IPV6_PORT_LABELING */
+ if (rc != 0)
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH,
+ ICMPV6_ADM_PROHIBITED, 0);
+ break;
+#endif /* CONFIG_IPV6 */
+ }
+
+ return rc;
+}
+
+/**
+ * smack_socket_getpeersec_stream - pull in packet label
+ * @sock: the socket
+ * @optval: user's destination
+ * @optlen: size thereof
+ * @len: max thereof
+ *
+ * returns zero on success, an error code otherwise
+ */
+static int smack_socket_getpeersec_stream(struct socket *sock,
+ char __user *optval,
+ int __user *optlen, unsigned len)
+{
+ struct socket_smack *ssp;
+ char *rcp = "";
+ int slen = 1;
+ int rc = 0;
+
+ ssp = sock->sk->sk_security;
+ if (ssp->smk_packet != NULL) {
+ rcp = ssp->smk_packet->smk_known;
+ slen = strlen(rcp) + 1;
+ }
+
+ if (slen > len)
+ rc = -ERANGE;
+ else if (copy_to_user(optval, rcp, slen) != 0)
+ rc = -EFAULT;
+
+ if (put_user(slen, optlen) != 0)
+ rc = -EFAULT;
+
+ return rc;
+}
+
+
+/**
+ * smack_socket_getpeersec_dgram - pull in packet label
+ * @sock: the peer socket
+ * @skb: packet data
+ * @secid: pointer to where to put the secid of the packet
+ *
+ * Sets the netlabel socket state on sk from parent
+ */
+static int smack_socket_getpeersec_dgram(struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+
+{
+ struct socket_smack *ssp = NULL;
+ struct smack_known *skp;
+ struct sock *sk = NULL;
+ int family = PF_UNSPEC;
+ u32 s = 0; /* 0 is the invalid secid */
+
+ if (skb != NULL) {
+ if (skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ family = PF_INET6;
+#endif /* CONFIG_IPV6 */
+ }
+ if (family == PF_UNSPEC && sock != NULL)
+ family = sock->sk->sk_family;
+
+ switch (family) {
+ case PF_UNIX:
+ ssp = sock->sk->sk_security;
+ s = ssp->smk_out->smk_secid;
+ break;
+ case PF_INET:
+ skp = smack_from_skb(skb);
+ if (skp) {
+ s = skp->smk_secid;
+ break;
+ }
+ /*
+ * Translate what netlabel gave us.
+ */
+ if (sock != NULL)
+ sk = sock->sk;
+ skp = smack_from_netlbl(sk, family, skb);
+ if (skp != NULL)
+ s = skp->smk_secid;
+ break;
+ case PF_INET6:
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ skp = smack_from_skb(skb);
+ if (skp)
+ s = skp->smk_secid;
+#endif
+ break;
+ }
+ *secid = s;
+ if (s == 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * smack_sock_graft - Initialize a newly created socket with an existing sock
+ * @sk: child sock
+ * @parent: parent socket
+ *
+ * Set the smk_{in,out} state of an existing sock based on the process that
+ * is creating the new socket.
+ */
+static void smack_sock_graft(struct sock *sk, struct socket *parent)
+{
+ struct socket_smack *ssp;
+ struct smack_known *skp = smk_of_current();
+
+ if (sk == NULL ||
+ (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
+ return;
+
+ ssp = sk->sk_security;
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
+ /* cssp->smk_packet is already set in smack_inet_csk_clone() */
+}
+
+/**
+ * smack_inet_conn_request - Smack access check on connect
+ * @sk: socket involved
+ * @skb: packet
+ * @req: unused
+ *
+ * Returns 0 if a task with the packet label could write to
+ * the socket, otherwise an error code
+ */
+static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+{
+ u16 family = sk->sk_family;
+ struct smack_known *skp;
+ struct socket_smack *ssp = sk->sk_security;
+ struct sockaddr_in addr;
+ struct iphdr *hdr;
+ struct smack_known *hskp;
+ int rc;
+ struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == PF_INET6) {
+ /*
+ * Handle mapped IPv4 packets arriving
+ * via IPv6 sockets. Don't set up netlabel
+ * processing on IPv6.
+ */
+ if (skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+ else
+ return 0;
+ }
+#endif /* CONFIG_IPV6 */
+
+ /*
+ * If there is a secmark use it rather than the CIPSO label.
+ * If there is no secmark fall back to CIPSO.
+ * The secmark is assumed to reflect policy better.
+ */
+ skp = smack_from_skb(skb);
+ if (skp == NULL) {
+ skp = smack_from_netlbl(sk, family, skb);
+ if (skp == NULL)
+ skp = &smack_known_huh;
+ }
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+ ad.a.u.net->netif = skb->skb_iif;
+ ipv4_skb_to_auditdata(skb, &ad.a, NULL);
+#endif
+ /*
+ * Receiving a packet requires that the other end be able to write
+ * here. Read access is not required.
+ */
+ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv4 connect", skp, ssp->smk_in, MAY_WRITE, rc);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Save the peer's label in the request_sock so we can later setup
+ * smk_packet in the child socket so that SO_PEERCRED can report it.
+ */
+ req->peer_secid = skp->smk_secid;
+
+ /*
+ * We need to decide if we want to label the incoming connection here
+ * if we do we only need to label the request_sock and the stack will
+ * propagate the wire-label to the sock when it is created.
+ */
+ hdr = ip_hdr(skb);
+ addr.sin_addr.s_addr = hdr->saddr;
+ rcu_read_lock();
+ hskp = smack_ipv4host_label(&addr);
+ rcu_read_unlock();
+
+ if (hskp == NULL)
+ rc = netlbl_req_setattr(req, &skp->smk_netlabel);
+ else
+ netlbl_req_delattr(req);
+
+ return rc;
+}
+
+/**
+ * smack_inet_csk_clone - Copy the connection information to the new socket
+ * @sk: the new socket
+ * @req: the connection's request_sock
+ *
+ * Transfer the connection's peer label to the newly created socket.
+ */
+static void smack_inet_csk_clone(struct sock *sk,
+ const struct request_sock *req)
+{
+ struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp;
+
+ if (req->peer_secid != 0) {
+ skp = smack_from_secid(req->peer_secid);
+ ssp->smk_packet = skp;
+ } else
+ ssp->smk_packet = NULL;
+}
+
+/*
+ * Key management security hooks
+ *
+ * Casey has not tested key support very heavily.
+ * The permission check is most likely too restrictive.
+ * If you care about keys please have a look.
+ */
+#ifdef CONFIG_KEYS
+
+/**
+ * smack_key_alloc - Set the key security blob
+ * @key: object
+ * @cred: the credentials to use
+ * @flags: unused
+ *
+ * No allocation required
+ *
+ * Returns 0
+ */
+static int smack_key_alloc(struct key *key, const struct cred *cred,
+ unsigned long flags)
+{
+ struct smack_known *skp = smk_of_task(smack_cred(cred));
+
+ key->security = skp;
+ return 0;
+}
+
+/**
+ * smack_key_free - Clear the key security blob
+ * @key: the object
+ *
+ * Clear the blob pointer
+ */
+static void smack_key_free(struct key *key)
+{
+ key->security = NULL;
+}
+
+/**
+ * smack_key_permission - Smack access on a key
+ * @key_ref: gets to the object
+ * @cred: the credentials to use
+ * @need_perm: requested key permission
+ *
+ * Return 0 if the task has read and write to the object,
+ * an error code otherwise
+ */
+static int smack_key_permission(key_ref_t key_ref,
+ const struct cred *cred,
+ enum key_need_perm need_perm)
+{
+ struct key *keyp;
+ struct smk_audit_info ad;
+ struct smack_known *tkp = smk_of_task(smack_cred(cred));
+ int request = 0;
+ int rc;
+
+ /*
+ * Validate requested permissions
+ */
+ switch (need_perm) {
+ case KEY_NEED_READ:
+ case KEY_NEED_SEARCH:
+ case KEY_NEED_VIEW:
+ request |= MAY_READ;
+ break;
+ case KEY_NEED_WRITE:
+ case KEY_NEED_LINK:
+ case KEY_NEED_SETATTR:
+ request |= MAY_WRITE;
+ break;
+ case KEY_NEED_UNSPECIFIED:
+ case KEY_NEED_UNLINK:
+ case KEY_SYSADMIN_OVERRIDE:
+ case KEY_AUTHTOKEN_OVERRIDE:
+ case KEY_DEFER_PERM_CHECK:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ keyp = key_ref_to_ptr(key_ref);
+ if (keyp == NULL)
+ return -EINVAL;
+ /*
+ * If the key hasn't been initialized give it access so that
+ * it may do so.
+ */
+ if (keyp->security == NULL)
+ return 0;
+ /*
+ * This should not occur
+ */
+ if (tkp == NULL)
+ return -EACCES;
+
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
+ ad.a.u.key_struct.key = keyp->serial;
+ ad.a.u.key_struct.key_desc = keyp->description;
+#endif
+ rc = smk_access(tkp, keyp->security, request, &ad);
+ rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
+ return rc;
+}
+
+/*
+ * smack_key_getsecurity - Smack label tagging the key
+ * @key points to the key to be queried
+ * @_buffer points to a pointer that should be set to point to the
+ * resulting string (if no label or an error occurs).
+ * Return the length of the string (including terminating NUL) or -ve if
+ * an error.
+ * May also return 0 (and a NULL buffer pointer) if there is no label.
+ */
+static int smack_key_getsecurity(struct key *key, char **_buffer)
+{
+ struct smack_known *skp = key->security;
+ size_t length;
+ char *copy;
+
+ if (key->security == NULL) {
+ *_buffer = NULL;
+ return 0;
+ }
+
+ copy = kstrdup(skp->smk_known, GFP_KERNEL);
+ if (copy == NULL)
+ return -ENOMEM;
+ length = strlen(copy) + 1;
+
+ *_buffer = copy;
+ return length;
+}
+
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+/**
+ * smack_watch_key - Smack access to watch a key for notifications.
+ * @key: The key to be watched
+ *
+ * Return 0 if the @watch->cred has permission to read from the key object and
+ * an error otherwise.
+ */
+static int smack_watch_key(struct key *key)
+{
+ struct smk_audit_info ad;
+ struct smack_known *tkp = smk_of_current();
+ int rc;
+
+ if (key == NULL)
+ return -EINVAL;
+ /*
+ * If the key hasn't been initialized give it access so that
+ * it may do so.
+ */
+ if (key->security == NULL)
+ return 0;
+ /*
+ * This should not occur
+ */
+ if (tkp == NULL)
+ return -EACCES;
+
+ if (smack_privileged_cred(CAP_MAC_OVERRIDE, current_cred()))
+ return 0;
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
+ ad.a.u.key_struct.key = key->serial;
+ ad.a.u.key_struct.key_desc = key->description;
+#endif
+ rc = smk_access(tkp, key->security, MAY_READ, &ad);
+ rc = smk_bu_note("key watch", tkp, key->security, MAY_READ, rc);
+ return rc;
+}
+#endif /* CONFIG_KEY_NOTIFICATIONS */
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_WATCH_QUEUE
+/**
+ * smack_post_notification - Smack access to post a notification to a queue
+ * @w_cred: The credentials of the watcher.
+ * @cred: The credentials of the event source (may be NULL).
+ * @n: The notification message to be posted.
+ */
+static int smack_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n)
+{
+ struct smk_audit_info ad;
+ struct smack_known *subj, *obj;
+ int rc;
+
+ /* Always let maintenance notifications through. */
+ if (n->type == WATCH_TYPE_META)
+ return 0;
+
+ if (!cred)
+ return 0;
+ subj = smk_of_task(smack_cred(cred));
+ obj = smk_of_task(smack_cred(w_cred));
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NOTIFICATION);
+ rc = smk_access(subj, obj, MAY_WRITE, &ad);
+ rc = smk_bu_note("notification", subj, obj, MAY_WRITE, rc);
+ return rc;
+}
+#endif /* CONFIG_WATCH_QUEUE */
+
+/*
+ * Smack Audit hooks
+ *
+ * Audit requires a unique representation of each Smack specific
+ * rule. This unique representation is used to distinguish the
+ * object to be audited from remaining kernel objects and also
+ * works as a glue between the audit hooks.
+ *
+ * Since repository entries are added but never deleted, we'll use
+ * the smack_known label address related to the given audit rule as
+ * the needed unique representation. This also better fits the smack
+ * model where nearly everything is a label.
+ */
+#ifdef CONFIG_AUDIT
+
+/**
+ * smack_audit_rule_init - Initialize a smack audit rule
+ * @field: audit rule fields given from user-space (audit.h)
+ * @op: required testing operator (=, !=, >, <, ...)
+ * @rulestr: smack label to be audited
+ * @vrule: pointer to save our own audit rule representation
+ *
+ * Prepare to audit cases where (@field @op @rulestr) is true.
+ * The label to be audited is created if necessay.
+ */
+static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+{
+ struct smack_known *skp;
+ char **rule = (char **)vrule;
+ *rule = NULL;
+
+ if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
+ return -EINVAL;
+
+ if (op != Audit_equal && op != Audit_not_equal)
+ return -EINVAL;
+
+ skp = smk_import_entry(rulestr, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+
+ *rule = skp->smk_known;
+
+ return 0;
+}
+
+/**
+ * smack_audit_rule_known - Distinguish Smack audit rules
+ * @krule: rule of interest, in Audit kernel representation format
+ *
+ * This is used to filter Smack rules from remaining Audit ones.
+ * If it's proved that this rule belongs to us, the
+ * audit_rule_match hook will be called to do the final judgement.
+ */
+static int smack_audit_rule_known(struct audit_krule *krule)
+{
+ struct audit_field *f;
+ int i;
+
+ for (i = 0; i < krule->field_count; i++) {
+ f = &krule->fields[i];
+
+ if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * smack_audit_rule_match - Audit given object ?
+ * @secid: security id for identifying the object to test
+ * @field: audit rule flags given from user-space
+ * @op: required testing operator
+ * @vrule: smack internal rule presentation
+ *
+ * The core Audit hook. It's used to take the decision of
+ * whether to audit or not to audit a given object.
+ */
+static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule)
+{
+ struct smack_known *skp;
+ char *rule = vrule;
+
+ if (unlikely(!rule)) {
+ WARN_ONCE(1, "Smack: missing rule\n");
+ return -ENOENT;
+ }
+
+ if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
+ return 0;
+
+ skp = smack_from_secid(secid);
+
+ /*
+ * No need to do string comparisons. If a match occurs,
+ * both pointers will point to the same smack_known
+ * label.
+ */
+ if (op == Audit_equal)
+ return (rule == skp->smk_known);
+ if (op == Audit_not_equal)
+ return (rule != skp->smk_known);
+
+ return 0;
+}
+
+/*
+ * There is no need for a smack_audit_rule_free hook.
+ * No memory was allocated.
+ */
+
+#endif /* CONFIG_AUDIT */
+
+/**
+ * smack_ismaclabel - check if xattr @name references a smack MAC label
+ * @name: Full xattr name to check.
+ */
+static int smack_ismaclabel(const char *name)
+{
+ return (strcmp(name, XATTR_SMACK_SUFFIX) == 0);
+}
+
+
+/**
+ * smack_secid_to_secctx - return the smack label for a secid
+ * @secid: incoming integer
+ * @secdata: destination
+ * @seclen: how long it is
+ *
+ * Exists for networking code.
+ */
+static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ struct smack_known *skp = smack_from_secid(secid);
+
+ if (secdata)
+ *secdata = skp->smk_known;
+ *seclen = strlen(skp->smk_known);
+ return 0;
+}
+
+/**
+ * smack_secctx_to_secid - return the secid for a smack label
+ * @secdata: smack label
+ * @seclen: how long result is
+ * @secid: outgoing integer
+ *
+ * Exists for audit and networking code.
+ */
+static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
+{
+ struct smack_known *skp = smk_find_entry(secdata);
+
+ if (skp)
+ *secid = skp->smk_secid;
+ else
+ *secid = 0;
+ return 0;
+}
+
+/*
+ * There used to be a smack_release_secctx hook
+ * that did nothing back when hooks were in a vector.
+ * Now that there's a list such a hook adds cost.
+ */
+
+static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx,
+ ctxlen, 0);
+}
+
+static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return __vfs_setxattr_noperm(&init_user_ns, dentry, XATTR_NAME_SMACK,
+ ctx, ctxlen, 0);
+}
+
+static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ struct smack_known *skp = smk_of_inode(inode);
+
+ *ctx = skp->smk_known;
+ *ctxlen = strlen(skp->smk_known);
+ return 0;
+}
+
+static int smack_inode_copy_up(struct dentry *dentry, struct cred **new)
+{
+
+ struct task_smack *tsp;
+ struct smack_known *skp;
+ struct inode_smack *isp;
+ struct cred *new_creds = *new;
+
+ if (new_creds == NULL) {
+ new_creds = prepare_creds();
+ if (new_creds == NULL)
+ return -ENOMEM;
+ }
+
+ tsp = smack_cred(new_creds);
+
+ /*
+ * Get label from overlay inode and set it in create_sid
+ */
+ isp = smack_inode(d_inode(dentry));
+ skp = isp->smk_inode;
+ tsp->smk_task = skp;
+ *new = new_creds;
+ return 0;
+}
+
+static int smack_inode_copy_up_xattr(const char *name)
+{
+ /*
+ * Return 1 if this is the smack access Smack attribute.
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0)
+ return 1;
+
+ return -EOPNOTSUPP;
+}
+
+static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ struct task_smack *otsp = smack_cred(old);
+ struct task_smack *ntsp = smack_cred(new);
+ struct inode_smack *isp;
+ int may;
+
+ /*
+ * Use the process credential unless all of
+ * the transmuting criteria are met
+ */
+ ntsp->smk_task = otsp->smk_task;
+
+ /*
+ * the attribute of the containing directory
+ */
+ isp = smack_inode(d_inode(dentry->d_parent));
+
+ if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
+ rcu_read_lock();
+ may = smk_access_entry(otsp->smk_task->smk_known,
+ isp->smk_inode->smk_known,
+ &otsp->smk_task->smk_rules);
+ rcu_read_unlock();
+
+ /*
+ * If the directory is transmuting and the rule
+ * providing access is transmuting use the containing
+ * directory label instead of the process label.
+ */
+ if (may > 0 && (may & MAY_TRANSMUTE)) {
+ ntsp->smk_task = isp->smk_inode;
+ ntsp->smk_transmuted = ntsp->smk_task;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_IO_URING
+/**
+ * smack_uring_override_creds - Is io_uring cred override allowed?
+ * @new: the target creds
+ *
+ * Check to see if the current task is allowed to override it's credentials
+ * to service an io_uring operation.
+ */
+static int smack_uring_override_creds(const struct cred *new)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct task_smack *nsp = smack_cred(new);
+
+ /*
+ * Allow the degenerate case where the new Smack value is
+ * the same as the current Smack value.
+ */
+ if (tsp->smk_task == nsp->smk_task)
+ return 0;
+
+ if (smack_privileged_cred(CAP_MAC_OVERRIDE, current_cred()))
+ return 0;
+
+ return -EPERM;
+}
+
+/**
+ * smack_uring_sqpoll - check if a io_uring polling thread can be created
+ *
+ * Check to see if the current task is allowed to create a new io_uring
+ * kernel polling thread.
+ */
+static int smack_uring_sqpoll(void)
+{
+ if (smack_privileged_cred(CAP_MAC_ADMIN, current_cred()))
+ return 0;
+
+ return -EPERM;
+}
+
+/**
+ * smack_uring_cmd - check on file operations for io_uring
+ * @ioucmd: the command in question
+ *
+ * Make a best guess about whether a io_uring "command" should
+ * be allowed. Use the same logic used for determining if the
+ * file could be opened for read in the absence of better criteria.
+ */
+static int smack_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ struct file *file = ioucmd->file;
+ struct smk_audit_info ad;
+ struct task_smack *tsp;
+ struct inode *inode;
+ int rc;
+
+ if (!file)
+ return -EINVAL;
+
+ tsp = smack_cred(file->f_cred);
+ inode = file_inode(file);
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
+
+ return rc;
+}
+
+#endif /* CONFIG_IO_URING */
+
+struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct task_smack),
+ .lbs_file = sizeof(struct smack_known *),
+ .lbs_inode = sizeof(struct inode_smack),
+ .lbs_ipc = sizeof(struct smack_known *),
+ .lbs_msg_msg = sizeof(struct smack_known *),
+ .lbs_superblock = sizeof(struct superblock_smack),
+};
+
+static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
+ LSM_HOOK_INIT(syslog, smack_syslog),
+
+ LSM_HOOK_INIT(fs_context_submount, smack_fs_context_submount),
+ LSM_HOOK_INIT(fs_context_dup, smack_fs_context_dup),
+ LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param),
+
+ LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security),
+ LSM_HOOK_INIT(sb_free_mnt_opts, smack_free_mnt_opts),
+ LSM_HOOK_INIT(sb_eat_lsm_opts, smack_sb_eat_lsm_opts),
+ LSM_HOOK_INIT(sb_statfs, smack_sb_statfs),
+ LSM_HOOK_INIT(sb_set_mnt_opts, smack_set_mnt_opts),
+
+ LSM_HOOK_INIT(bprm_creds_for_exec, smack_bprm_creds_for_exec),
+
+ LSM_HOOK_INIT(inode_alloc_security, smack_inode_alloc_security),
+ LSM_HOOK_INIT(inode_init_security, smack_inode_init_security),
+ LSM_HOOK_INIT(inode_link, smack_inode_link),
+ LSM_HOOK_INIT(inode_unlink, smack_inode_unlink),
+ LSM_HOOK_INIT(inode_rmdir, smack_inode_rmdir),
+ LSM_HOOK_INIT(inode_rename, smack_inode_rename),
+ LSM_HOOK_INIT(inode_permission, smack_inode_permission),
+ LSM_HOOK_INIT(inode_setattr, smack_inode_setattr),
+ LSM_HOOK_INIT(inode_getattr, smack_inode_getattr),
+ LSM_HOOK_INIT(inode_setxattr, smack_inode_setxattr),
+ LSM_HOOK_INIT(inode_post_setxattr, smack_inode_post_setxattr),
+ LSM_HOOK_INIT(inode_getxattr, smack_inode_getxattr),
+ LSM_HOOK_INIT(inode_removexattr, smack_inode_removexattr),
+ LSM_HOOK_INIT(inode_getsecurity, smack_inode_getsecurity),
+ LSM_HOOK_INIT(inode_setsecurity, smack_inode_setsecurity),
+ LSM_HOOK_INIT(inode_listsecurity, smack_inode_listsecurity),
+ LSM_HOOK_INIT(inode_getsecid, smack_inode_getsecid),
+
+ LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
+ LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl),
+ LSM_HOOK_INIT(file_lock, smack_file_lock),
+ LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
+ LSM_HOOK_INIT(mmap_file, smack_mmap_file),
+ LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
+ LSM_HOOK_INIT(file_set_fowner, smack_file_set_fowner),
+ LSM_HOOK_INIT(file_send_sigiotask, smack_file_send_sigiotask),
+ LSM_HOOK_INIT(file_receive, smack_file_receive),
+
+ LSM_HOOK_INIT(file_open, smack_file_open),
+
+ LSM_HOOK_INIT(cred_alloc_blank, smack_cred_alloc_blank),
+ LSM_HOOK_INIT(cred_free, smack_cred_free),
+ LSM_HOOK_INIT(cred_prepare, smack_cred_prepare),
+ LSM_HOOK_INIT(cred_transfer, smack_cred_transfer),
+ LSM_HOOK_INIT(cred_getsecid, smack_cred_getsecid),
+ LSM_HOOK_INIT(kernel_act_as, smack_kernel_act_as),
+ LSM_HOOK_INIT(kernel_create_files_as, smack_kernel_create_files_as),
+ LSM_HOOK_INIT(task_setpgid, smack_task_setpgid),
+ LSM_HOOK_INIT(task_getpgid, smack_task_getpgid),
+ LSM_HOOK_INIT(task_getsid, smack_task_getsid),
+ LSM_HOOK_INIT(current_getsecid_subj, smack_current_getsecid_subj),
+ LSM_HOOK_INIT(task_getsecid_obj, smack_task_getsecid_obj),
+ LSM_HOOK_INIT(task_setnice, smack_task_setnice),
+ LSM_HOOK_INIT(task_setioprio, smack_task_setioprio),
+ LSM_HOOK_INIT(task_getioprio, smack_task_getioprio),
+ LSM_HOOK_INIT(task_setscheduler, smack_task_setscheduler),
+ LSM_HOOK_INIT(task_getscheduler, smack_task_getscheduler),
+ LSM_HOOK_INIT(task_movememory, smack_task_movememory),
+ LSM_HOOK_INIT(task_kill, smack_task_kill),
+ LSM_HOOK_INIT(task_to_inode, smack_task_to_inode),
+
+ LSM_HOOK_INIT(ipc_permission, smack_ipc_permission),
+ LSM_HOOK_INIT(ipc_getsecid, smack_ipc_getsecid),
+
+ LSM_HOOK_INIT(msg_msg_alloc_security, smack_msg_msg_alloc_security),
+
+ LSM_HOOK_INIT(msg_queue_alloc_security, smack_ipc_alloc_security),
+ LSM_HOOK_INIT(msg_queue_associate, smack_msg_queue_associate),
+ LSM_HOOK_INIT(msg_queue_msgctl, smack_msg_queue_msgctl),
+ LSM_HOOK_INIT(msg_queue_msgsnd, smack_msg_queue_msgsnd),
+ LSM_HOOK_INIT(msg_queue_msgrcv, smack_msg_queue_msgrcv),
+
+ LSM_HOOK_INIT(shm_alloc_security, smack_ipc_alloc_security),
+ LSM_HOOK_INIT(shm_associate, smack_shm_associate),
+ LSM_HOOK_INIT(shm_shmctl, smack_shm_shmctl),
+ LSM_HOOK_INIT(shm_shmat, smack_shm_shmat),
+
+ LSM_HOOK_INIT(sem_alloc_security, smack_ipc_alloc_security),
+ LSM_HOOK_INIT(sem_associate, smack_sem_associate),
+ LSM_HOOK_INIT(sem_semctl, smack_sem_semctl),
+ LSM_HOOK_INIT(sem_semop, smack_sem_semop),
+
+ LSM_HOOK_INIT(d_instantiate, smack_d_instantiate),
+
+ LSM_HOOK_INIT(getprocattr, smack_getprocattr),
+ LSM_HOOK_INIT(setprocattr, smack_setprocattr),
+
+ LSM_HOOK_INIT(unix_stream_connect, smack_unix_stream_connect),
+ LSM_HOOK_INIT(unix_may_send, smack_unix_may_send),
+
+ LSM_HOOK_INIT(socket_post_create, smack_socket_post_create),
+ LSM_HOOK_INIT(socket_socketpair, smack_socket_socketpair),
+#ifdef SMACK_IPV6_PORT_LABELING
+ LSM_HOOK_INIT(socket_bind, smack_socket_bind),
+#endif
+ LSM_HOOK_INIT(socket_connect, smack_socket_connect),
+ LSM_HOOK_INIT(socket_sendmsg, smack_socket_sendmsg),
+ LSM_HOOK_INIT(socket_sock_rcv_skb, smack_socket_sock_rcv_skb),
+ LSM_HOOK_INIT(socket_getpeersec_stream, smack_socket_getpeersec_stream),
+ LSM_HOOK_INIT(socket_getpeersec_dgram, smack_socket_getpeersec_dgram),
+ LSM_HOOK_INIT(sk_alloc_security, smack_sk_alloc_security),
+ LSM_HOOK_INIT(sk_free_security, smack_sk_free_security),
+ LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security),
+ LSM_HOOK_INIT(sock_graft, smack_sock_graft),
+ LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request),
+ LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone),
+
+ /* key management security hooks */
+#ifdef CONFIG_KEYS
+ LSM_HOOK_INIT(key_alloc, smack_key_alloc),
+ LSM_HOOK_INIT(key_free, smack_key_free),
+ LSM_HOOK_INIT(key_permission, smack_key_permission),
+ LSM_HOOK_INIT(key_getsecurity, smack_key_getsecurity),
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ LSM_HOOK_INIT(watch_key, smack_watch_key),
+#endif
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_WATCH_QUEUE
+ LSM_HOOK_INIT(post_notification, smack_post_notification),
+#endif
+
+ /* Audit hooks */
+#ifdef CONFIG_AUDIT
+ LSM_HOOK_INIT(audit_rule_init, smack_audit_rule_init),
+ LSM_HOOK_INIT(audit_rule_known, smack_audit_rule_known),
+ LSM_HOOK_INIT(audit_rule_match, smack_audit_rule_match),
+#endif /* CONFIG_AUDIT */
+
+ LSM_HOOK_INIT(ismaclabel, smack_ismaclabel),
+ LSM_HOOK_INIT(secid_to_secctx, smack_secid_to_secctx),
+ LSM_HOOK_INIT(secctx_to_secid, smack_secctx_to_secid),
+ LSM_HOOK_INIT(inode_notifysecctx, smack_inode_notifysecctx),
+ LSM_HOOK_INIT(inode_setsecctx, smack_inode_setsecctx),
+ LSM_HOOK_INIT(inode_getsecctx, smack_inode_getsecctx),
+ LSM_HOOK_INIT(inode_copy_up, smack_inode_copy_up),
+ LSM_HOOK_INIT(inode_copy_up_xattr, smack_inode_copy_up_xattr),
+ LSM_HOOK_INIT(dentry_create_files_as, smack_dentry_create_files_as),
+#ifdef CONFIG_IO_URING
+ LSM_HOOK_INIT(uring_override_creds, smack_uring_override_creds),
+ LSM_HOOK_INIT(uring_sqpoll, smack_uring_sqpoll),
+ LSM_HOOK_INIT(uring_cmd, smack_uring_cmd),
+#endif
+};
+
+
+static __init void init_smack_known_list(void)
+{
+ /*
+ * Initialize rule list locks
+ */
+ mutex_init(&smack_known_huh.smk_rules_lock);
+ mutex_init(&smack_known_hat.smk_rules_lock);
+ mutex_init(&smack_known_floor.smk_rules_lock);
+ mutex_init(&smack_known_star.smk_rules_lock);
+ mutex_init(&smack_known_web.smk_rules_lock);
+ /*
+ * Initialize rule lists
+ */
+ INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+ INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+ INIT_LIST_HEAD(&smack_known_star.smk_rules);
+ INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+ INIT_LIST_HEAD(&smack_known_web.smk_rules);
+ /*
+ * Create the known labels list
+ */
+ smk_insert_entry(&smack_known_huh);
+ smk_insert_entry(&smack_known_hat);
+ smk_insert_entry(&smack_known_star);
+ smk_insert_entry(&smack_known_floor);
+ smk_insert_entry(&smack_known_web);
+}
+
+/**
+ * smack_init - initialize the smack system
+ *
+ * Returns 0 on success, -ENOMEM is there's no memory
+ */
+static __init int smack_init(void)
+{
+ struct cred *cred = (struct cred *) current->cred;
+ struct task_smack *tsp;
+
+ smack_rule_cache = KMEM_CACHE(smack_rule, 0);
+ if (!smack_rule_cache)
+ return -ENOMEM;
+
+ /*
+ * Set the security state for the initial task.
+ */
+ tsp = smack_cred(cred);
+ init_task_smack(tsp, &smack_known_floor, &smack_known_floor);
+
+ /*
+ * Register with LSM
+ */
+ security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks), "smack");
+ smack_enabled = 1;
+
+ pr_info("Smack: Initializing.\n");
+#ifdef CONFIG_SECURITY_SMACK_NETFILTER
+ pr_info("Smack: Netfilter enabled.\n");
+#endif
+#ifdef SMACK_IPV6_PORT_LABELING
+ pr_info("Smack: IPv6 port labeling enabled.\n");
+#endif
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ pr_info("Smack: IPv6 Netfilter enabled.\n");
+#endif
+
+ /* initialize the smack_known_list */
+ init_smack_known_list();
+
+ return 0;
+}
+
+/*
+ * Smack requires early initialization in order to label
+ * all processes and objects when they are created.
+ */
+DEFINE_LSM(smack) = {
+ .name = "smack",
+ .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
+ .blobs = &smack_blob_sizes,
+ .init = smack_init,
+};
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
new file mode 100644
index 000000000..b945c1d3a
--- /dev/null
+++ b/security/smack/smack_netfilter.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simplified MAC Kernel (smack) security module
+ *
+ * This file contains the Smack netfilter implementation
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * Copyright (C) 2014 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2014 Intel Corporation.
+ */
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netdevice.h>
+#include <net/inet_sock.h>
+#include <net/net_namespace.h>
+#include "smack.h"
+
+static unsigned int smack_ip_output(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct sock *sk = skb_to_full_sk(skb);
+ struct socket_smack *ssp;
+ struct smack_known *skp;
+
+ if (sk && sk->sk_security) {
+ ssp = sk->sk_security;
+ skp = ssp->smk_out;
+ skb->secmark = skp->smk_secid;
+ }
+
+ return NF_ACCEPT;
+}
+
+static const struct nf_hook_ops smack_nf_ops[] = {
+ {
+ .hook = smack_ip_output,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_SELINUX_FIRST,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .hook = smack_ip_output,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_SELINUX_FIRST,
+ },
+#endif /* IPV6 */
+};
+
+static int __net_init smack_nf_register(struct net *net)
+{
+ return nf_register_net_hooks(net, smack_nf_ops,
+ ARRAY_SIZE(smack_nf_ops));
+}
+
+static void __net_exit smack_nf_unregister(struct net *net)
+{
+ nf_unregister_net_hooks(net, smack_nf_ops, ARRAY_SIZE(smack_nf_ops));
+}
+
+static struct pernet_operations smack_net_ops = {
+ .init = smack_nf_register,
+ .exit = smack_nf_unregister,
+};
+
+static int __init smack_nf_ip_init(void)
+{
+ if (smack_enabled == 0)
+ return 0;
+
+ printk(KERN_DEBUG "Smack: Registering netfilter hooks\n");
+ return register_pernet_subsys(&smack_net_ops);
+}
+
+__initcall(smack_nf_ip_init);
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
new file mode 100644
index 000000000..da7db9e22
--- /dev/null
+++ b/security/smack/smackfs.c
@@ -0,0 +1,3035 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * Authors:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ * Ahmed S. Darwish <darwish.07@gmail.com>
+ *
+ * Special thanks to the authors of selinuxfs.
+ *
+ * Karl MacMillan <kmacmillan@tresys.com>
+ * James Morris <jmorris@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+#include <net/cipso_ipv4.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include <linux/audit.h>
+#include <linux/magic.h>
+#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include "smack.h"
+
+#define BEBITS (sizeof(__be32) * 8)
+/*
+ * smackfs pseudo filesystem.
+ */
+
+enum smk_inos {
+ SMK_ROOT_INO = 2,
+ SMK_LOAD = 3, /* load policy */
+ SMK_CIPSO = 4, /* load label -> CIPSO mapping */
+ SMK_DOI = 5, /* CIPSO DOI */
+ SMK_DIRECT = 6, /* CIPSO level indicating direct label */
+ SMK_AMBIENT = 7, /* internet ambient label */
+ SMK_NET4ADDR = 8, /* single label hosts */
+ SMK_ONLYCAP = 9, /* the only "capable" label */
+ SMK_LOGGING = 10, /* logging */
+ SMK_LOAD_SELF = 11, /* task specific rules */
+ SMK_ACCESSES = 12, /* access policy */
+ SMK_MAPPED = 13, /* CIPSO level indicating mapped label */
+ SMK_LOAD2 = 14, /* load policy with long labels */
+ SMK_LOAD_SELF2 = 15, /* load task specific rules with long labels */
+ SMK_ACCESS2 = 16, /* make an access check with long labels */
+ SMK_CIPSO2 = 17, /* load long label -> CIPSO mapping */
+ SMK_REVOKE_SUBJ = 18, /* set rules with subject label to '-' */
+ SMK_CHANGE_RULE = 19, /* change or add rules (long labels) */
+ SMK_SYSLOG = 20, /* change syslog label) */
+ SMK_PTRACE = 21, /* set ptrace rule */
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ SMK_UNCONFINED = 22, /* define an unconfined label */
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ SMK_NET6ADDR = 23, /* single label IPv6 hosts */
+#endif /* CONFIG_IPV6 */
+ SMK_RELABEL_SELF = 24, /* relabel possible without CAP_MAC_ADMIN */
+};
+
+/*
+ * List locks
+ */
+static DEFINE_MUTEX(smack_cipso_lock);
+static DEFINE_MUTEX(smack_ambient_lock);
+static DEFINE_MUTEX(smk_net4addr_lock);
+#if IS_ENABLED(CONFIG_IPV6)
+static DEFINE_MUTEX(smk_net6addr_lock);
+#endif /* CONFIG_IPV6 */
+
+/*
+ * This is the "ambient" label for network traffic.
+ * If it isn't somehow marked, use this.
+ * It can be reset via smackfs/ambient
+ */
+struct smack_known *smack_net_ambient;
+
+/*
+ * This is the level in a CIPSO header that indicates a
+ * smack label is contained directly in the category set.
+ * It can be reset via smackfs/direct
+ */
+int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
+
+/*
+ * This is the level in a CIPSO header that indicates a
+ * secid is contained directly in the category set.
+ * It can be reset via smackfs/mapped
+ */
+int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+/*
+ * Allow one label to be unconfined. This is for
+ * debugging and application bring-up purposes only.
+ * It is bad and wrong, but everyone seems to expect
+ * to have it.
+ */
+struct smack_known *smack_unconfined;
+#endif
+
+/*
+ * If this value is set restrict syslog use to the label specified.
+ * It can be reset via smackfs/syslog
+ */
+struct smack_known *smack_syslog_label;
+
+/*
+ * Ptrace current rule
+ * SMACK_PTRACE_DEFAULT regular smack ptrace rules (/proc based)
+ * SMACK_PTRACE_EXACT labels must match, but can be overriden with
+ * CAP_SYS_PTRACE
+ * SMACK_PTRACE_DRACONIAN lables must match, CAP_SYS_PTRACE has no effect
+ */
+int smack_ptrace_rule = SMACK_PTRACE_DEFAULT;
+
+/*
+ * Certain IP addresses may be designated as single label hosts.
+ * Packets are sent there unlabeled, but only from tasks that
+ * can write to the specified label.
+ */
+
+LIST_HEAD(smk_net4addr_list);
+#if IS_ENABLED(CONFIG_IPV6)
+LIST_HEAD(smk_net6addr_list);
+#endif /* CONFIG_IPV6 */
+
+/*
+ * Rule lists are maintained for each label.
+ */
+struct smack_parsed_rule {
+ struct smack_known *smk_subject;
+ struct smack_known *smk_object;
+ int smk_access1;
+ int smk_access2;
+};
+
+static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
+
+/*
+ * Values for parsing cipso rules
+ * SMK_DIGITLEN: Length of a digit field in a rule.
+ * SMK_CIPSOMIN: Minimum possible cipso rule length.
+ * SMK_CIPSOMAX: Maximum possible cipso rule length.
+ */
+#define SMK_DIGITLEN 4
+#define SMK_CIPSOMIN (SMK_LABELLEN + 2 * SMK_DIGITLEN)
+#define SMK_CIPSOMAX (SMK_CIPSOMIN + SMACK_CIPSO_MAXCATNUM * SMK_DIGITLEN)
+
+/*
+ * Values for parsing MAC rules
+ * SMK_ACCESS: Maximum possible combination of access permissions
+ * SMK_ACCESSLEN: Maximum length for a rule access field
+ * SMK_LOADLEN: Smack rule length
+ */
+#define SMK_OACCESS "rwxa"
+#define SMK_ACCESS "rwxatl"
+#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
+#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
+#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
+#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
+
+/*
+ * Stricly for CIPSO level manipulation.
+ * Set the category bit number in a smack label sized buffer.
+ */
+static inline void smack_catset_bit(unsigned int cat, char *catsetp)
+{
+ if (cat == 0 || cat > (SMK_CIPSOLEN * 8))
+ return;
+
+ catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
+}
+
+/**
+ * smk_netlabel_audit_set - fill a netlbl_audit struct
+ * @nap: structure to fill
+ */
+static void smk_netlabel_audit_set(struct netlbl_audit *nap)
+{
+ struct smack_known *skp = smk_of_current();
+
+ nap->loginuid = audit_get_loginuid(current);
+ nap->sessionid = audit_get_sessionid(current);
+ nap->secid = skp->smk_secid;
+}
+
+/*
+ * Value for parsing single label host rules
+ * "1.2.3.4 X"
+ */
+#define SMK_NETLBLADDRMIN 9
+
+/**
+ * smk_set_access - add a rule to the rule list or replace an old rule
+ * @srp: the rule to add or replace
+ * @rule_list: the list of rules
+ * @rule_lock: the rule list lock
+ *
+ * Looks through the current subject/object/access list for
+ * the subject/object pair and replaces the access that was
+ * there. If the pair isn't found add it with the specified
+ * access.
+ *
+ * Returns 0 if nothing goes wrong or -ENOMEM if it fails
+ * during the allocation of the new pair to add.
+ */
+static int smk_set_access(struct smack_parsed_rule *srp,
+ struct list_head *rule_list,
+ struct mutex *rule_lock)
+{
+ struct smack_rule *sp;
+ int found = 0;
+ int rc = 0;
+
+ mutex_lock(rule_lock);
+
+ /*
+ * Because the object label is less likely to match
+ * than the subject label check it first
+ */
+ list_for_each_entry_rcu(sp, rule_list, list) {
+ if (sp->smk_object == srp->smk_object &&
+ sp->smk_subject == srp->smk_subject) {
+ found = 1;
+ sp->smk_access |= srp->smk_access1;
+ sp->smk_access &= ~srp->smk_access2;
+ break;
+ }
+ }
+
+ if (found == 0) {
+ sp = kmem_cache_zalloc(smack_rule_cache, GFP_KERNEL);
+ if (sp == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sp->smk_subject = srp->smk_subject;
+ sp->smk_object = srp->smk_object;
+ sp->smk_access = srp->smk_access1 & ~srp->smk_access2;
+
+ list_add_rcu(&sp->list, rule_list);
+ }
+
+out:
+ mutex_unlock(rule_lock);
+ return rc;
+}
+
+/**
+ * smk_perm_from_str - parse smack accesses from a text string
+ * @string: a text string that contains a Smack accesses code
+ *
+ * Returns an integer with respective bits set for specified accesses.
+ */
+static int smk_perm_from_str(const char *string)
+{
+ int perm = 0;
+ const char *cp;
+
+ for (cp = string; ; cp++)
+ switch (*cp) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ perm |= MAY_READ;
+ break;
+ case 'w':
+ case 'W':
+ perm |= MAY_WRITE;
+ break;
+ case 'x':
+ case 'X':
+ perm |= MAY_EXEC;
+ break;
+ case 'a':
+ case 'A':
+ perm |= MAY_APPEND;
+ break;
+ case 't':
+ case 'T':
+ perm |= MAY_TRANSMUTE;
+ break;
+ case 'l':
+ case 'L':
+ perm |= MAY_LOCK;
+ break;
+ case 'b':
+ case 'B':
+ perm |= MAY_BRINGUP;
+ break;
+ default:
+ return perm;
+ }
+}
+
+/**
+ * smk_fill_rule - Fill Smack rule from strings
+ * @subject: subject label string
+ * @object: object label string
+ * @access1: access string
+ * @access2: string with permissions to be removed
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ * @len: label length limit
+ *
+ * Returns 0 on success, appropriate error code on failure.
+ */
+static int smk_fill_rule(const char *subject, const char *object,
+ const char *access1, const char *access2,
+ struct smack_parsed_rule *rule, int import,
+ int len)
+{
+ const char *cp;
+ struct smack_known *skp;
+
+ if (import) {
+ rule->smk_subject = smk_import_entry(subject, len);
+ if (IS_ERR(rule->smk_subject))
+ return PTR_ERR(rule->smk_subject);
+
+ rule->smk_object = smk_import_entry(object, len);
+ if (IS_ERR(rule->smk_object))
+ return PTR_ERR(rule->smk_object);
+ } else {
+ cp = smk_parse_smack(subject, len);
+ if (IS_ERR(cp))
+ return PTR_ERR(cp);
+ skp = smk_find_entry(cp);
+ kfree(cp);
+ if (skp == NULL)
+ return -ENOENT;
+ rule->smk_subject = skp;
+
+ cp = smk_parse_smack(object, len);
+ if (IS_ERR(cp))
+ return PTR_ERR(cp);
+ skp = smk_find_entry(cp);
+ kfree(cp);
+ if (skp == NULL)
+ return -ENOENT;
+ rule->smk_object = skp;
+ }
+
+ rule->smk_access1 = smk_perm_from_str(access1);
+ if (access2)
+ rule->smk_access2 = smk_perm_from_str(access2);
+ else
+ rule->smk_access2 = ~rule->smk_access1;
+
+ return 0;
+}
+
+/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ *
+ * Returns 0 on success, -1 on errors.
+ */
+static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
+ int import)
+{
+ int rc;
+
+ rc = smk_fill_rule(data, data + SMK_LABELLEN,
+ data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule,
+ import, SMK_LABELLEN);
+ return rc;
+}
+
+/**
+ * smk_parse_long_rule - parse Smack rule from rule string
+ * @data: string to be parsed, null terminated
+ * @rule: Will be filled with Smack parsed rule
+ * @import: if non-zero, import labels
+ * @tokens: number of substrings expected in data
+ *
+ * Returns number of processed bytes on success, -ERRNO on failure.
+ */
+static ssize_t smk_parse_long_rule(char *data, struct smack_parsed_rule *rule,
+ int import, int tokens)
+{
+ ssize_t cnt = 0;
+ char *tok[4];
+ int rc;
+ int i;
+
+ /*
+ * Parsing the rule in-place, filling all white-spaces with '\0'
+ */
+ for (i = 0; i < tokens; ++i) {
+ while (isspace(data[cnt]))
+ data[cnt++] = '\0';
+
+ if (data[cnt] == '\0')
+ /* Unexpected end of data */
+ return -EINVAL;
+
+ tok[i] = data + cnt;
+
+ while (data[cnt] && !isspace(data[cnt]))
+ ++cnt;
+ }
+ while (isspace(data[cnt]))
+ data[cnt++] = '\0';
+
+ while (i < 4)
+ tok[i++] = NULL;
+
+ rc = smk_fill_rule(tok[0], tok[1], tok[2], tok[3], rule, import, 0);
+ return rc == 0 ? cnt : rc;
+}
+
+#define SMK_FIXED24_FMT 0 /* Fixed 24byte label format */
+#define SMK_LONG_FMT 1 /* Variable long label format */
+#define SMK_CHANGE_FMT 2 /* Rule modification format */
+/**
+ * smk_write_rules_list - write() for any /smack rule file
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ * @rule_list: the list of rules to write to
+ * @rule_lock: lock for the rule list
+ * @format: /smack/load or /smack/load2 or /smack/change-rule format.
+ *
+ * Get one smack access rule from above.
+ * The format for SMK_LONG_FMT is:
+ * "subject<whitespace>object<whitespace>access[<whitespace>...]"
+ * The format for SMK_FIXED24_FMT is exactly:
+ * "subject object rwxat"
+ * The format for SMK_CHANGE_FMT is:
+ * "subject<whitespace>object<whitespace>
+ * acc_enable<whitespace>acc_disable[<whitespace>...]"
+ */
+static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ struct list_head *rule_list,
+ struct mutex *rule_lock, int format)
+{
+ struct smack_parsed_rule rule;
+ char *data;
+ int rc;
+ int trunc = 0;
+ int tokens;
+ ssize_t cnt = 0;
+
+ /*
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (format == SMK_FIXED24_FMT) {
+ /*
+ * Minor hack for backward compatibility
+ */
+ if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
+ return -EINVAL;
+ } else {
+ if (count >= PAGE_SIZE) {
+ count = PAGE_SIZE - 1;
+ trunc = 1;
+ }
+ }
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /*
+ * In case of parsing only part of user buf,
+ * avoid having partial rule at the data buffer
+ */
+ if (trunc) {
+ while (count > 0 && (data[count - 1] != '\n'))
+ --count;
+ if (count == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ data[count] = '\0';
+ tokens = (format == SMK_CHANGE_FMT ? 4 : 3);
+ while (cnt < count) {
+ if (format == SMK_FIXED24_FMT) {
+ rc = smk_parse_rule(data, &rule, 1);
+ if (rc < 0)
+ goto out;
+ cnt = count;
+ } else {
+ rc = smk_parse_long_rule(data + cnt, &rule, 1, tokens);
+ if (rc < 0)
+ goto out;
+ if (rc == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cnt += rc;
+ }
+
+ if (rule_list == NULL)
+ rc = smk_set_access(&rule, &rule.smk_subject->smk_rules,
+ &rule.smk_subject->smk_rules_lock);
+ else
+ rc = smk_set_access(&rule, rule_list, rule_lock);
+
+ if (rc)
+ goto out;
+ }
+
+ rc = cnt;
+out:
+ kfree(data);
+ return rc;
+}
+
+/*
+ * Core logic for smackfs seq list operations.
+ */
+
+static void *smk_seq_start(struct seq_file *s, loff_t *pos,
+ struct list_head *head)
+{
+ struct list_head *list;
+ int i = *pos;
+
+ rcu_read_lock();
+ for (list = rcu_dereference(list_next_rcu(head));
+ list != head;
+ list = rcu_dereference(list_next_rcu(list))) {
+ if (i-- == 0)
+ return list;
+ }
+
+ return NULL;
+}
+
+static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
+ struct list_head *head)
+{
+ struct list_head *list = v;
+
+ ++*pos;
+ list = rcu_dereference(list_next_rcu(list));
+
+ return (list == head) ? NULL : list;
+}
+
+static void smk_seq_stop(struct seq_file *s, void *v)
+{
+ rcu_read_unlock();
+}
+
+static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
+{
+ /*
+ * Don't show any rules with label names too long for
+ * interface file (/smack/load or /smack/load2)
+ * because you should expect to be able to write
+ * anything you read back.
+ */
+ if (strlen(srp->smk_subject->smk_known) >= max ||
+ strlen(srp->smk_object->smk_known) >= max)
+ return;
+
+ if (srp->smk_access == 0)
+ return;
+
+ seq_printf(s, "%s %s",
+ srp->smk_subject->smk_known,
+ srp->smk_object->smk_known);
+
+ seq_putc(s, ' ');
+
+ if (srp->smk_access & MAY_READ)
+ seq_putc(s, 'r');
+ if (srp->smk_access & MAY_WRITE)
+ seq_putc(s, 'w');
+ if (srp->smk_access & MAY_EXEC)
+ seq_putc(s, 'x');
+ if (srp->smk_access & MAY_APPEND)
+ seq_putc(s, 'a');
+ if (srp->smk_access & MAY_TRANSMUTE)
+ seq_putc(s, 't');
+ if (srp->smk_access & MAY_LOCK)
+ seq_putc(s, 'l');
+ if (srp->smk_access & MAY_BRINGUP)
+ seq_putc(s, 'b');
+
+ seq_putc(s, '\n');
+}
+
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load2_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smack_known_list);
+}
+
+static void *load2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return smk_seq_next(s, v, pos, &smack_known_list);
+}
+
+static int load_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp;
+ struct smack_known *skp =
+ list_entry_rcu(list, struct smack_known, list);
+
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list)
+ smk_rule_show(s, srp, SMK_LABELLEN);
+
+ return 0;
+}
+
+static const struct seq_operations load_seq_ops = {
+ .start = load2_seq_start,
+ .next = load2_seq_next,
+ .show = load_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load - open() for /smack/load
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_seq_ops);
+}
+
+/**
+ * smk_write_load - write() for /smack/load
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_load_ops = {
+ .open = smk_open_load,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load,
+ .release = seq_release,
+};
+
+/**
+ * smk_cipso_doi - initialize the CIPSO domain
+ */
+static void smk_cipso_doi(void)
+{
+ int rc;
+ struct cipso_v4_doi *doip;
+ struct netlbl_audit nai;
+
+ smk_netlabel_audit_set(&nai);
+
+ rc = netlbl_cfg_map_del(NULL, PF_INET, NULL, NULL, &nai);
+ if (rc != 0)
+ printk(KERN_WARNING "%s:%d remove rc = %d\n",
+ __func__, __LINE__, rc);
+
+ doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL | __GFP_NOFAIL);
+ doip->map.std = NULL;
+ doip->doi = smk_cipso_doi_value;
+ doip->type = CIPSO_V4_MAP_PASS;
+ doip->tags[0] = CIPSO_V4_TAG_RBITMAP;
+ for (rc = 1; rc < CIPSO_V4_TAG_MAXCNT; rc++)
+ doip->tags[rc] = CIPSO_V4_TAG_INVALID;
+
+ rc = netlbl_cfg_cipsov4_add(doip, &nai);
+ if (rc != 0) {
+ printk(KERN_WARNING "%s:%d cipso add rc = %d\n",
+ __func__, __LINE__, rc);
+ kfree(doip);
+ return;
+ }
+ rc = netlbl_cfg_cipsov4_map_add(doip->doi, NULL, NULL, NULL, &nai);
+ if (rc != 0) {
+ printk(KERN_WARNING "%s:%d map add rc = %d\n",
+ __func__, __LINE__, rc);
+ netlbl_cfg_cipsov4_del(doip->doi, &nai);
+ return;
+ }
+}
+
+/**
+ * smk_unlbl_ambient - initialize the unlabeled domain
+ * @oldambient: previous domain string
+ */
+static void smk_unlbl_ambient(char *oldambient)
+{
+ int rc;
+ struct netlbl_audit nai;
+
+ smk_netlabel_audit_set(&nai);
+
+ if (oldambient != NULL) {
+ rc = netlbl_cfg_map_del(oldambient, PF_INET, NULL, NULL, &nai);
+ if (rc != 0)
+ printk(KERN_WARNING "%s:%d remove rc = %d\n",
+ __func__, __LINE__, rc);
+ }
+ if (smack_net_ambient == NULL)
+ smack_net_ambient = &smack_known_floor;
+
+ rc = netlbl_cfg_unlbl_map_add(smack_net_ambient->smk_known, PF_INET,
+ NULL, NULL, &nai);
+ if (rc != 0)
+ printk(KERN_WARNING "%s:%d add rc = %d\n",
+ __func__, __LINE__, rc);
+}
+
+/*
+ * Seq_file read operations for /smack/cipso
+ */
+
+static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smack_known_list);
+}
+
+static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return smk_seq_next(s, v, pos, &smack_known_list);
+}
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known *skp =
+ list_entry_rcu(list, struct smack_known, list);
+ struct netlbl_lsm_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
+ char sep = '/';
+ int i;
+
+ /*
+ * Don't show a label that could not have been set using
+ * /smack/cipso. This is in support of the notion that
+ * anything read from /smack/cipso ought to be writeable
+ * to /smack/cipso.
+ *
+ * /smack/cipso2 should be used instead.
+ */
+ if (strlen(skp->smk_known) >= SMK_LABELLEN)
+ return 0;
+
+ seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
+
+ for (i = netlbl_catmap_walk(cmp, 0); i >= 0;
+ i = netlbl_catmap_walk(cmp, i + 1)) {
+ seq_printf(s, "%c%d", sep, i);
+ sep = ',';
+ }
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations cipso_seq_ops = {
+ .start = cipso_seq_start,
+ .next = cipso_seq_next,
+ .show = cipso_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_cipso - open() for /smack/cipso
+ * @inode: inode structure representing file
+ * @file: "cipso" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso
+ * file_operations
+ */
+static int smk_open_cipso(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cipso_seq_ops);
+}
+
+/**
+ * smk_set_cipso - do the work for write() for cipso and cipso2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ * @format: /smack/cipso or /smack/cipso2
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int format)
+{
+ struct netlbl_lsm_catmap *old_cat;
+ struct smack_known *skp;
+ struct netlbl_lsm_secattr ncats;
+ char mapcatset[SMK_CIPSOLEN];
+ int maplevel;
+ unsigned int cat;
+ int catlen;
+ ssize_t rc = -EINVAL;
+ char *data = NULL;
+ char *rule;
+ int ret;
+ int i;
+
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+ if (*ppos != 0)
+ return -EINVAL;
+ if (format == SMK_FIXED24_FMT &&
+ (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
+ return -EINVAL;
+ if (count > PAGE_SIZE)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ rule = data;
+ /*
+ * Only allow one writer at a time. Writes should be
+ * quite rare and small in any case.
+ */
+ mutex_lock(&smack_cipso_lock);
+
+ skp = smk_import_entry(rule, 0);
+ if (IS_ERR(skp)) {
+ rc = PTR_ERR(skp);
+ goto out;
+ }
+
+ if (format == SMK_FIXED24_FMT)
+ rule += SMK_LABELLEN;
+ else
+ rule += strlen(skp->smk_known) + 1;
+
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+
+ ret = sscanf(rule, "%d", &maplevel);
+ if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL)
+ goto out;
+
+ rule += SMK_DIGITLEN;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+
+ ret = sscanf(rule, "%d", &catlen);
+ if (ret != 1 || catlen < 0 || catlen > SMACK_CIPSO_MAXCATNUM)
+ goto out;
+
+ if (format == SMK_FIXED24_FMT &&
+ count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
+ goto out;
+
+ memset(mapcatset, 0, sizeof(mapcatset));
+
+ for (i = 0; i < catlen; i++) {
+ rule += SMK_DIGITLEN;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+ ret = sscanf(rule, "%u", &cat);
+ if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
+ goto out;
+
+ smack_catset_bit(cat, mapcatset);
+ }
+
+ rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
+ if (rc >= 0) {
+ old_cat = skp->smk_netlabel.attr.mls.cat;
+ skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
+ skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
+ synchronize_rcu();
+ netlbl_catmap_free(old_cat);
+ rc = count;
+ /*
+ * This mapping may have been cached, so clear the cache.
+ */
+ netlbl_cache_invalidate();
+ }
+
+out:
+ mutex_unlock(&smack_cipso_lock);
+ kfree(data);
+ return rc;
+}
+
+/**
+ * smk_write_cipso - write() for /smack/cipso
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_set_cipso(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_cipso_ops = {
+ .open = smk_open_cipso,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_cipso,
+ .release = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/cipso2
+ */
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known *skp =
+ list_entry_rcu(list, struct smack_known, list);
+ struct netlbl_lsm_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
+ char sep = '/';
+ int i;
+
+ seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
+
+ for (i = netlbl_catmap_walk(cmp, 0); i >= 0;
+ i = netlbl_catmap_walk(cmp, i + 1)) {
+ seq_printf(s, "%c%d", sep, i);
+ sep = ',';
+ }
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations cipso2_seq_ops = {
+ .start = cipso_seq_start,
+ .next = cipso_seq_next,
+ .show = cipso2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_cipso2 - open() for /smack/cipso2
+ * @inode: inode structure representing file
+ * @file: "cipso2" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso2
+ * file_operations
+ */
+static int smk_open_cipso2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cipso2_seq_ops);
+}
+
+/**
+ * smk_write_cipso2 - write() for /smack/cipso2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_set_cipso(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_cipso2_ops = {
+ .open = smk_open_cipso2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_cipso2,
+ .release = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/netlabel
+ */
+
+static void *net4addr_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smk_net4addr_list);
+}
+
+static void *net4addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return smk_seq_next(s, v, pos, &smk_net4addr_list);
+}
+
+/*
+ * Print host/label pairs
+ */
+static int net4addr_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smk_net4addr *skp =
+ list_entry_rcu(list, struct smk_net4addr, list);
+ char *kp = SMACK_CIPSO_OPTION;
+
+ if (skp->smk_label != NULL)
+ kp = skp->smk_label->smk_known;
+ seq_printf(s, "%pI4/%d %s\n", &skp->smk_host.s_addr,
+ skp->smk_masks, kp);
+
+ return 0;
+}
+
+static const struct seq_operations net4addr_seq_ops = {
+ .start = net4addr_seq_start,
+ .next = net4addr_seq_next,
+ .show = net4addr_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_net4addr - open() for /smack/netlabel
+ * @inode: inode structure representing file
+ * @file: "netlabel" file pointer
+ *
+ * Connect our net4addr_seq_* operations with /smack/netlabel
+ * file_operations
+ */
+static int smk_open_net4addr(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &net4addr_seq_ops);
+}
+
+/**
+ * smk_net4addr_insert
+ * @new : netlabel to insert
+ *
+ * This helper insert netlabel in the smack_net4addrs list
+ * sorted by netmask length (longest to smallest)
+ * locked by &smk_net4addr_lock in smk_write_net4addr
+ *
+ */
+static void smk_net4addr_insert(struct smk_net4addr *new)
+{
+ struct smk_net4addr *m;
+ struct smk_net4addr *m_next;
+
+ if (list_empty(&smk_net4addr_list)) {
+ list_add_rcu(&new->list, &smk_net4addr_list);
+ return;
+ }
+
+ m = list_entry_rcu(smk_net4addr_list.next,
+ struct smk_net4addr, list);
+
+ /* the comparison '>' is a bit hacky, but works */
+ if (new->smk_masks > m->smk_masks) {
+ list_add_rcu(&new->list, &smk_net4addr_list);
+ return;
+ }
+
+ list_for_each_entry_rcu(m, &smk_net4addr_list, list) {
+ if (list_is_last(&m->list, &smk_net4addr_list)) {
+ list_add_rcu(&new->list, &m->list);
+ return;
+ }
+ m_next = list_entry_rcu(m->list.next,
+ struct smk_net4addr, list);
+ if (new->smk_masks > m_next->smk_masks) {
+ list_add_rcu(&new->list, &m->list);
+ return;
+ }
+ }
+}
+
+
+/**
+ * smk_write_net4addr - write() for /smack/netlabel
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one net4addr per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smk_net4addr *snp;
+ struct sockaddr_in newname;
+ char *smack;
+ struct smack_known *skp = NULL;
+ char *data;
+ char *host = (char *)&newname.sin_addr.s_addr;
+ int rc;
+ struct netlbl_audit audit_info;
+ struct in_addr mask;
+ unsigned int m;
+ unsigned int masks;
+ int found;
+ u32 mask_bits = (1<<31);
+ __be32 nsa;
+ u32 temp_mask;
+
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ * "<addr/mask, as a.b.c.d/e><space><label>"
+ * "<addr, as a.b.c.d><space><label>"
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+ if (*ppos != 0)
+ return -EINVAL;
+ if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ smack = kzalloc(count + 1, GFP_KERNEL);
+ if (smack == NULL) {
+ rc = -ENOMEM;
+ goto free_data_out;
+ }
+
+ rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s",
+ &host[0], &host[1], &host[2], &host[3], &masks, smack);
+ if (rc != 6) {
+ rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s",
+ &host[0], &host[1], &host[2], &host[3], smack);
+ if (rc != 5) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+ masks = 32;
+ }
+ if (masks > BEBITS) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+
+ /*
+ * If smack begins with '-', it is an option, don't import it
+ */
+ if (smack[0] != '-') {
+ skp = smk_import_entry(smack, 0);
+ if (IS_ERR(skp)) {
+ rc = PTR_ERR(skp);
+ goto free_out;
+ }
+ } else {
+ /*
+ * Only the -CIPSO option is supported for IPv4
+ */
+ if (strcmp(smack, SMACK_CIPSO_OPTION) != 0) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+ }
+
+ for (m = masks, temp_mask = 0; m > 0; m--) {
+ temp_mask |= mask_bits;
+ mask_bits >>= 1;
+ }
+ mask.s_addr = cpu_to_be32(temp_mask);
+
+ newname.sin_addr.s_addr &= mask.s_addr;
+ /*
+ * Only allow one writer at a time. Writes should be
+ * quite rare and small in any case.
+ */
+ mutex_lock(&smk_net4addr_lock);
+
+ nsa = newname.sin_addr.s_addr;
+ /* try to find if the prefix is already in the list */
+ found = 0;
+ list_for_each_entry_rcu(snp, &smk_net4addr_list, list) {
+ if (snp->smk_host.s_addr == nsa && snp->smk_masks == masks) {
+ found = 1;
+ break;
+ }
+ }
+ smk_netlabel_audit_set(&audit_info);
+
+ if (found == 0) {
+ snp = kzalloc(sizeof(*snp), GFP_KERNEL);
+ if (snp == NULL)
+ rc = -ENOMEM;
+ else {
+ rc = 0;
+ snp->smk_host.s_addr = newname.sin_addr.s_addr;
+ snp->smk_mask.s_addr = mask.s_addr;
+ snp->smk_label = skp;
+ snp->smk_masks = masks;
+ smk_net4addr_insert(snp);
+ }
+ } else {
+ /*
+ * Delete the unlabeled entry, only if the previous label
+ * wasn't the special CIPSO option
+ */
+ if (snp->smk_label != NULL)
+ rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
+ &snp->smk_host, &snp->smk_mask,
+ PF_INET, &audit_info);
+ else
+ rc = 0;
+ snp->smk_label = skp;
+ }
+
+ /*
+ * Now tell netlabel about the single label nature of
+ * this host so that incoming packets get labeled.
+ * but only if we didn't get the special CIPSO option
+ */
+ if (rc == 0 && skp != NULL)
+ rc = netlbl_cfg_unlbl_static_add(&init_net, NULL,
+ &snp->smk_host, &snp->smk_mask, PF_INET,
+ snp->smk_label->smk_secid, &audit_info);
+
+ if (rc == 0)
+ rc = count;
+
+ mutex_unlock(&smk_net4addr_lock);
+
+free_out:
+ kfree(smack);
+free_data_out:
+ kfree(data);
+
+ return rc;
+}
+
+static const struct file_operations smk_net4addr_ops = {
+ .open = smk_open_net4addr,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_net4addr,
+ .release = seq_release,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+/*
+ * Seq_file read operations for /smack/netlabel6
+ */
+
+static void *net6addr_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smk_net6addr_list);
+}
+
+static void *net6addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return smk_seq_next(s, v, pos, &smk_net6addr_list);
+}
+
+/*
+ * Print host/label pairs
+ */
+static int net6addr_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smk_net6addr *skp =
+ list_entry(list, struct smk_net6addr, list);
+
+ if (skp->smk_label != NULL)
+ seq_printf(s, "%pI6/%d %s\n", &skp->smk_host, skp->smk_masks,
+ skp->smk_label->smk_known);
+
+ return 0;
+}
+
+static const struct seq_operations net6addr_seq_ops = {
+ .start = net6addr_seq_start,
+ .next = net6addr_seq_next,
+ .show = net6addr_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_net6addr - open() for /smack/netlabel
+ * @inode: inode structure representing file
+ * @file: "netlabel" file pointer
+ *
+ * Connect our net6addr_seq_* operations with /smack/netlabel
+ * file_operations
+ */
+static int smk_open_net6addr(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &net6addr_seq_ops);
+}
+
+/**
+ * smk_net6addr_insert
+ * @new : entry to insert
+ *
+ * This inserts an entry in the smack_net6addrs list
+ * sorted by netmask length (longest to smallest)
+ * locked by &smk_net6addr_lock in smk_write_net6addr
+ *
+ */
+static void smk_net6addr_insert(struct smk_net6addr *new)
+{
+ struct smk_net6addr *m_next;
+ struct smk_net6addr *m;
+
+ if (list_empty(&smk_net6addr_list)) {
+ list_add_rcu(&new->list, &smk_net6addr_list);
+ return;
+ }
+
+ m = list_entry_rcu(smk_net6addr_list.next,
+ struct smk_net6addr, list);
+
+ if (new->smk_masks > m->smk_masks) {
+ list_add_rcu(&new->list, &smk_net6addr_list);
+ return;
+ }
+
+ list_for_each_entry_rcu(m, &smk_net6addr_list, list) {
+ if (list_is_last(&m->list, &smk_net6addr_list)) {
+ list_add_rcu(&new->list, &m->list);
+ return;
+ }
+ m_next = list_entry_rcu(m->list.next,
+ struct smk_net6addr, list);
+ if (new->smk_masks > m_next->smk_masks) {
+ list_add_rcu(&new->list, &m->list);
+ return;
+ }
+ }
+}
+
+
+/**
+ * smk_write_net6addr - write() for /smack/netlabel
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one net6addr per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smk_net6addr *snp;
+ struct in6_addr newname;
+ struct in6_addr fullmask;
+ struct smack_known *skp = NULL;
+ char *smack;
+ char *data;
+ int rc = 0;
+ int found = 0;
+ int i;
+ unsigned int scanned[8];
+ unsigned int m;
+ unsigned int mask = 128;
+
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ * "<addr/mask, as a:b:c:d:e:f:g:h/e><space><label>"
+ * "<addr, as a:b:c:d:e:f:g:h><space><label>"
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+ if (*ppos != 0)
+ return -EINVAL;
+ if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ smack = kzalloc(count + 1, GFP_KERNEL);
+ if (smack == NULL) {
+ rc = -ENOMEM;
+ goto free_data_out;
+ }
+
+ i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x/%u %s",
+ &scanned[0], &scanned[1], &scanned[2], &scanned[3],
+ &scanned[4], &scanned[5], &scanned[6], &scanned[7],
+ &mask, smack);
+ if (i != 10) {
+ i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x %s",
+ &scanned[0], &scanned[1], &scanned[2],
+ &scanned[3], &scanned[4], &scanned[5],
+ &scanned[6], &scanned[7], smack);
+ if (i != 9) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+ }
+ if (mask > 128) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+ for (i = 0; i < 8; i++) {
+ if (scanned[i] > 0xffff) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+ newname.s6_addr16[i] = htons(scanned[i]);
+ }
+
+ /*
+ * If smack begins with '-', it is an option, don't import it
+ */
+ if (smack[0] != '-') {
+ skp = smk_import_entry(smack, 0);
+ if (IS_ERR(skp)) {
+ rc = PTR_ERR(skp);
+ goto free_out;
+ }
+ } else {
+ /*
+ * Only -DELETE is supported for IPv6
+ */
+ if (strcmp(smack, SMACK_DELETE_OPTION) != 0) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+ }
+
+ for (i = 0, m = mask; i < 8; i++) {
+ if (m >= 16) {
+ fullmask.s6_addr16[i] = 0xffff;
+ m -= 16;
+ } else if (m > 0) {
+ fullmask.s6_addr16[i] = (1 << m) - 1;
+ m = 0;
+ } else
+ fullmask.s6_addr16[i] = 0;
+ newname.s6_addr16[i] &= fullmask.s6_addr16[i];
+ }
+
+ /*
+ * Only allow one writer at a time. Writes should be
+ * quite rare and small in any case.
+ */
+ mutex_lock(&smk_net6addr_lock);
+ /*
+ * Try to find the prefix in the list
+ */
+ list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
+ if (mask != snp->smk_masks)
+ continue;
+ for (found = 1, i = 0; i < 8; i++) {
+ if (newname.s6_addr16[i] !=
+ snp->smk_host.s6_addr16[i]) {
+ found = 0;
+ break;
+ }
+ }
+ if (found == 1)
+ break;
+ }
+ if (found == 0) {
+ snp = kzalloc(sizeof(*snp), GFP_KERNEL);
+ if (snp == NULL)
+ rc = -ENOMEM;
+ else {
+ snp->smk_host = newname;
+ snp->smk_mask = fullmask;
+ snp->smk_masks = mask;
+ snp->smk_label = skp;
+ smk_net6addr_insert(snp);
+ }
+ } else {
+ snp->smk_label = skp;
+ }
+
+ if (rc == 0)
+ rc = count;
+
+ mutex_unlock(&smk_net6addr_lock);
+
+free_out:
+ kfree(smack);
+free_data_out:
+ kfree(data);
+
+ return rc;
+}
+
+static const struct file_operations smk_net6addr_ops = {
+ .open = smk_open_net6addr,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_net6addr,
+ .release = seq_release,
+};
+#endif /* CONFIG_IPV6 */
+
+/**
+ * smk_read_doi - read() for /smack/doi
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_doi(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smk_cipso_doi_value);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_doi - write() for /smack/doi
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_doi(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ smk_cipso_doi_value = i;
+
+ smk_cipso_doi();
+
+ return count;
+}
+
+static const struct file_operations smk_doi_ops = {
+ .read = smk_read_doi,
+ .write = smk_write_doi,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_read_direct - read() for /smack/direct
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_direct(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smack_cipso_direct);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_direct - write() for /smack/direct
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_direct(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_known *skp;
+ char temp[80];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ /*
+ * Don't do anything if the value hasn't actually changed.
+ * If it is changing reset the level on entries that were
+ * set up to be direct when they were created.
+ */
+ if (smack_cipso_direct != i) {
+ mutex_lock(&smack_known_lock);
+ list_for_each_entry_rcu(skp, &smack_known_list, list)
+ if (skp->smk_netlabel.attr.mls.lvl ==
+ smack_cipso_direct)
+ skp->smk_netlabel.attr.mls.lvl = i;
+ smack_cipso_direct = i;
+ mutex_unlock(&smack_known_lock);
+ }
+
+ return count;
+}
+
+static const struct file_operations smk_direct_ops = {
+ .read = smk_read_direct,
+ .write = smk_write_direct,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_read_mapped - read() for /smack/mapped
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_mapped(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smack_cipso_mapped);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_mapped - write() for /smack/mapped
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_mapped(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_known *skp;
+ char temp[80];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ /*
+ * Don't do anything if the value hasn't actually changed.
+ * If it is changing reset the level on entries that were
+ * set up to be mapped when they were created.
+ */
+ if (smack_cipso_mapped != i) {
+ mutex_lock(&smack_known_lock);
+ list_for_each_entry_rcu(skp, &smack_known_list, list)
+ if (skp->smk_netlabel.attr.mls.lvl ==
+ smack_cipso_mapped)
+ skp->smk_netlabel.attr.mls.lvl = i;
+ smack_cipso_mapped = i;
+ mutex_unlock(&smack_known_lock);
+ }
+
+ return count;
+}
+
+static const struct file_operations smk_mapped_ops = {
+ .read = smk_read_mapped,
+ .write = smk_write_mapped,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_read_ambient - read() for /smack/ambient
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ ssize_t rc;
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+ /*
+ * Being careful to avoid a problem in the case where
+ * smack_net_ambient gets changed in midstream.
+ */
+ mutex_lock(&smack_ambient_lock);
+
+ asize = strlen(smack_net_ambient->smk_known) + 1;
+
+ if (cn >= asize)
+ rc = simple_read_from_buffer(buf, cn, ppos,
+ smack_net_ambient->smk_known,
+ asize);
+ else
+ rc = -EINVAL;
+
+ mutex_unlock(&smack_ambient_lock);
+
+ return rc;
+}
+
+/**
+ * smk_write_ambient - write() for /smack/ambient
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_known *skp;
+ char *oldambient;
+ char *data;
+ int rc = count;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ /* Enough data must be present */
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ skp = smk_import_entry(data, count);
+ if (IS_ERR(skp)) {
+ rc = PTR_ERR(skp);
+ goto out;
+ }
+
+ mutex_lock(&smack_ambient_lock);
+
+ oldambient = smack_net_ambient->smk_known;
+ smack_net_ambient = skp;
+ smk_unlbl_ambient(oldambient);
+
+ mutex_unlock(&smack_ambient_lock);
+
+out:
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_ambient_ops = {
+ .read = smk_read_ambient,
+ .write = smk_write_ambient,
+ .llseek = default_llseek,
+};
+
+/*
+ * Seq_file operations for /smack/onlycap
+ */
+static void *onlycap_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smack_onlycap_list);
+}
+
+static void *onlycap_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return smk_seq_next(s, v, pos, &smack_onlycap_list);
+}
+
+static int onlycap_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known_list_elem *sklep =
+ list_entry_rcu(list, struct smack_known_list_elem, list);
+
+ seq_puts(s, sklep->smk_label->smk_known);
+ seq_putc(s, ' ');
+
+ return 0;
+}
+
+static const struct seq_operations onlycap_seq_ops = {
+ .start = onlycap_seq_start,
+ .next = onlycap_seq_next,
+ .show = onlycap_seq_show,
+ .stop = smk_seq_stop,
+};
+
+static int smk_open_onlycap(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &onlycap_seq_ops);
+}
+
+/**
+ * smk_list_swap_rcu - swap public list with a private one in RCU-safe way
+ * The caller must hold appropriate mutex to prevent concurrent modifications
+ * to the public list.
+ * Private list is assumed to be not accessible to other threads yet.
+ *
+ * @public: public list
+ * @private: private list
+ */
+static void smk_list_swap_rcu(struct list_head *public,
+ struct list_head *private)
+{
+ struct list_head *first, *last;
+
+ if (list_empty(public)) {
+ list_splice_init_rcu(private, public, synchronize_rcu);
+ } else {
+ /* Remember public list before replacing it */
+ first = public->next;
+ last = public->prev;
+
+ /* Publish private list in place of public in RCU-safe way */
+ private->prev->next = public;
+ private->next->prev = public;
+ rcu_assign_pointer(public->next, private->next);
+ public->prev = private->prev;
+
+ synchronize_rcu();
+
+ /* When all readers are done with the old public list,
+ * attach it in place of private */
+ private->next = first;
+ private->prev = last;
+ first->prev = private;
+ last->next = private;
+ }
+}
+
+/**
+ * smk_parse_label_list - parse list of Smack labels, separated by spaces
+ *
+ * @data: the string to parse
+ * @list: destination list
+ *
+ * Returns zero on success or error code, as appropriate
+ */
+static int smk_parse_label_list(char *data, struct list_head *list)
+{
+ char *tok;
+ struct smack_known *skp;
+ struct smack_known_list_elem *sklep;
+
+ while ((tok = strsep(&data, " ")) != NULL) {
+ if (!*tok)
+ continue;
+
+ skp = smk_import_entry(tok, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+
+ sklep = kzalloc(sizeof(*sklep), GFP_KERNEL);
+ if (sklep == NULL)
+ return -ENOMEM;
+
+ sklep->smk_label = skp;
+ list_add(&sklep->list, list);
+ }
+
+ return 0;
+}
+
+/**
+ * smk_destroy_label_list - destroy a list of smack_known_list_elem
+ * @list: header pointer of the list to destroy
+ */
+void smk_destroy_label_list(struct list_head *list)
+{
+ struct smack_known_list_elem *sklep;
+ struct smack_known_list_elem *sklep2;
+
+ list_for_each_entry_safe(sklep, sklep2, list, list)
+ kfree(sklep);
+
+ INIT_LIST_HEAD(list);
+}
+
+/**
+ * smk_write_onlycap - write() for smackfs/onlycap
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ LIST_HEAD(list_tmp);
+ int rc;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count > PAGE_SIZE)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ rc = smk_parse_label_list(data, &list_tmp);
+ kfree(data);
+
+ /*
+ * Clear the smack_onlycap on invalid label errors. This means
+ * that we can pass a null string to unset the onlycap value.
+ *
+ * Importing will also reject a label beginning with '-',
+ * so "-usecapabilities" will also work.
+ *
+ * But do so only on invalid label, not on system errors.
+ * The invalid label must be first to count as clearing attempt.
+ */
+ if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+ mutex_lock(&smack_onlycap_lock);
+ smk_list_swap_rcu(&smack_onlycap_list, &list_tmp);
+ mutex_unlock(&smack_onlycap_lock);
+ rc = count;
+ }
+
+ smk_destroy_label_list(&list_tmp);
+
+ return rc;
+}
+
+static const struct file_operations smk_onlycap_ops = {
+ .open = smk_open_onlycap,
+ .read = seq_read,
+ .write = smk_write_onlycap,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+/**
+ * smk_read_unconfined - read() for smackfs/unconfined
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_unconfined(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ char *smack = "";
+ ssize_t rc = -EINVAL;
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (smack_unconfined != NULL)
+ smack = smack_unconfined->smk_known;
+
+ asize = strlen(smack) + 1;
+
+ if (cn >= asize)
+ rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
+
+ return rc;
+}
+
+/**
+ * smk_write_unconfined - write() for smackfs/unconfined
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ struct smack_known *skp;
+ int rc = count;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count > PAGE_SIZE)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /*
+ * Clear the smack_unconfined on invalid label errors. This means
+ * that we can pass a null string to unset the unconfined value.
+ *
+ * Importing will also reject a label beginning with '-',
+ * so "-confine" will also work.
+ *
+ * But do so only on invalid label, not on system errors.
+ */
+ skp = smk_import_entry(data, count);
+ if (PTR_ERR(skp) == -EINVAL)
+ skp = NULL;
+ else if (IS_ERR(skp)) {
+ rc = PTR_ERR(skp);
+ goto freeout;
+ }
+
+ smack_unconfined = skp;
+
+freeout:
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_unconfined_ops = {
+ .read = smk_read_unconfined,
+ .write = smk_write_unconfined,
+ .llseek = default_llseek,
+};
+#endif /* CONFIG_SECURITY_SMACK_BRINGUP */
+
+/**
+ * smk_read_logging - read() for /smack/logging
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_logging(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d\n", log_policy);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+ return rc;
+}
+
+/**
+ * smk_write_logging - write() for /smack/logging
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_logging(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+ if (i < 0 || i > 3)
+ return -EINVAL;
+ log_policy = i;
+ return count;
+}
+
+
+
+static const struct file_operations smk_logging_ops = {
+ .read = smk_read_logging,
+ .write = smk_write_logging,
+ .llseek = default_llseek,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self
+ */
+
+static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry_rcu(list, struct smack_rule, list);
+
+ smk_rule_show(s, srp, SMK_LABELLEN);
+
+ return 0;
+}
+
+static const struct seq_operations load_self_seq_ops = {
+ .start = load_self_seq_start,
+ .next = load_self_seq_next,
+ .show = load_self_seq_show,
+ .stop = smk_seq_stop,
+};
+
+
+/**
+ * smk_open_load_self - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_self_seq_ops);
+}
+
+/**
+ * smk_write_load_self - write() for /smack/load-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_load_self_ops = {
+ .open = smk_open_load_self,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load_self,
+ .release = seq_release,
+};
+
+/**
+ * smk_user_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ * @format: /smack/load or /smack/load2 or /smack/change-rule format.
+ */
+static ssize_t smk_user_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int format)
+{
+ struct smack_parsed_rule rule;
+ char *data;
+ int res;
+
+ data = simple_transaction_get(file, buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (format == SMK_FIXED24_FMT) {
+ if (count < SMK_LOADLEN)
+ return -EINVAL;
+ res = smk_parse_rule(data, &rule, 0);
+ } else {
+ /*
+ * simple_transaction_get() returns null-terminated data
+ */
+ res = smk_parse_long_rule(data, &rule, 0, 3);
+ }
+
+ if (res >= 0)
+ res = smk_access(rule.smk_subject, rule.smk_object,
+ rule.smk_access1, NULL);
+ else if (res != -ENOENT)
+ return res;
+
+ /*
+ * smk_access() can return a value > 0 in the "bringup" case.
+ */
+ data[0] = res >= 0 ? '1' : '0';
+ data[1] = '\0';
+
+ simple_transaction_set(file, 2);
+
+ if (format == SMK_FIXED24_FMT)
+ return SMK_LOADLEN;
+ return count;
+}
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_user_access(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_access_ops = {
+ .write = smk_write_access,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+
+/*
+ * Seq_file read operations for /smack/load2
+ */
+
+static int load2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp;
+ struct smack_known *skp =
+ list_entry_rcu(list, struct smack_known, list);
+
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list)
+ smk_rule_show(s, srp, SMK_LONGLABEL);
+
+ return 0;
+}
+
+static const struct seq_operations load2_seq_ops = {
+ .start = load2_seq_start,
+ .next = load2_seq_next,
+ .show = load2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load2 - open() for /smack/load2
+ * @inode: inode structure representing file
+ * @file: "load2" file pointer
+ *
+ * For reading, use load2_seq_* seq_file reading operations.
+ */
+static int smk_open_load2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load2_seq_ops);
+}
+
+/**
+ * smk_write_load2 - write() for /smack/load2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load2_ops = {
+ .open = smk_open_load2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load2,
+ .release = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self2
+ */
+
+static void *load_self2_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry_rcu(list, struct smack_rule, list);
+
+ smk_rule_show(s, srp, SMK_LONGLABEL);
+
+ return 0;
+}
+
+static const struct seq_operations load_self2_seq_ops = {
+ .start = load_self2_seq_start,
+ .next = load_self2_seq_next,
+ .show = load_self2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load_self2 - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_self2_seq_ops);
+}
+
+/**
+ * smk_write_load_self2 - write() for /smack/load-self2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load_self2_ops = {
+ .open = smk_open_load_self2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load_self2,
+ .release = seq_release,
+};
+
+/**
+ * smk_write_access2 - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_user_access(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_access2_ops = {
+ .write = smk_write_access2,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/**
+ * smk_write_revoke_subj - write() for /smack/revoke-subject
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ const char *cp;
+ struct smack_known *skp;
+ struct smack_rule *sp;
+ struct list_head *rule_list;
+ struct mutex *rule_lock;
+ int rc = count;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count == 0 || count > SMK_LONGLABEL)
+ return -EINVAL;
+
+ data = memdup_user(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ cp = smk_parse_smack(data, count);
+ if (IS_ERR(cp)) {
+ rc = PTR_ERR(cp);
+ goto out_data;
+ }
+
+ skp = smk_find_entry(cp);
+ if (skp == NULL)
+ goto out_cp;
+
+ rule_list = &skp->smk_rules;
+ rule_lock = &skp->smk_rules_lock;
+
+ mutex_lock(rule_lock);
+
+ list_for_each_entry_rcu(sp, rule_list, list)
+ sp->smk_access = 0;
+
+ mutex_unlock(rule_lock);
+
+out_cp:
+ kfree(cp);
+out_data:
+ kfree(data);
+
+ return rc;
+}
+
+static const struct file_operations smk_revoke_subj_ops = {
+ .write = smk_write_revoke_subj,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/**
+ * smk_init_sysfs - initialize /sys/fs/smackfs
+ *
+ */
+static int smk_init_sysfs(void)
+{
+ return sysfs_create_mount_point(fs_kobj, "smackfs");
+}
+
+/**
+ * smk_write_change_rule - write() for /smack/change-rule
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_change_rule(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_CHANGE_FMT);
+}
+
+static const struct file_operations smk_change_rule_ops = {
+ .write = smk_write_change_rule,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/**
+ * smk_read_syslog - read() for smackfs/syslog
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_syslog(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ struct smack_known *skp;
+ ssize_t rc = -EINVAL;
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (smack_syslog_label == NULL)
+ skp = &smack_known_star;
+ else
+ skp = smack_syslog_label;
+
+ asize = strlen(skp->smk_known) + 1;
+
+ if (cn >= asize)
+ rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known,
+ asize);
+
+ return rc;
+}
+
+/**
+ * smk_write_syslog - write() for smackfs/syslog
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ struct smack_known *skp;
+ int rc = count;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ /* Enough data must be present */
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ skp = smk_import_entry(data, count);
+ if (IS_ERR(skp))
+ rc = PTR_ERR(skp);
+ else
+ smack_syslog_label = skp;
+
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_syslog_ops = {
+ .read = smk_read_syslog,
+ .write = smk_write_syslog,
+ .llseek = default_llseek,
+};
+
+/*
+ * Seq_file read operations for /smack/relabel-self
+ */
+
+static void *relabel_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_seq_start(s, pos, &tsp->smk_relabel);
+}
+
+static void *relabel_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = smack_cred(current_cred());
+
+ return smk_seq_next(s, v, pos, &tsp->smk_relabel);
+}
+
+static int relabel_self_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known_list_elem *sklep =
+ list_entry(list, struct smack_known_list_elem, list);
+
+ seq_puts(s, sklep->smk_label->smk_known);
+ seq_putc(s, ' ');
+
+ return 0;
+}
+
+static const struct seq_operations relabel_self_seq_ops = {
+ .start = relabel_self_seq_start,
+ .next = relabel_self_seq_next,
+ .show = relabel_self_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_relabel_self - open() for /smack/relabel-self
+ * @inode: inode structure representing file
+ * @file: "relabel-self" file pointer
+ *
+ * Connect our relabel_self_seq_* operations with /smack/relabel-self
+ * file_operations
+ */
+static int smk_open_relabel_self(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &relabel_self_seq_ops);
+}
+
+/**
+ * smk_write_relabel_self - write() for /smack/relabel-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int rc;
+ LIST_HEAD(list_tmp);
+
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ /*
+ * No partial write.
+ * Enough data must be present.
+ */
+ if (*ppos != 0)
+ return -EINVAL;
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
+
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ rc = smk_parse_label_list(data, &list_tmp);
+ kfree(data);
+
+ if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+ struct cred *new;
+ struct task_smack *tsp;
+
+ new = prepare_creds();
+ if (!new) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ tsp = smack_cred(new);
+ smk_destroy_label_list(&tsp->smk_relabel);
+ list_splice(&list_tmp, &tsp->smk_relabel);
+ commit_creds(new);
+ return count;
+ }
+out:
+ smk_destroy_label_list(&list_tmp);
+ return rc;
+}
+
+static const struct file_operations smk_relabel_self_ops = {
+ .open = smk_open_relabel_self,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_relabel_self,
+ .release = seq_release,
+};
+
+/**
+ * smk_read_ptrace - read() for /smack/ptrace
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ptrace(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d\n", smack_ptrace_rule);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+ return rc;
+}
+
+/**
+ * smk_write_ptrace - write() for /smack/ptrace
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_ptrace(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0 || count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+ if (i < SMACK_PTRACE_DEFAULT || i > SMACK_PTRACE_MAX)
+ return -EINVAL;
+ smack_ptrace_rule = i;
+
+ return count;
+}
+
+static const struct file_operations smk_ptrace_ops = {
+ .write = smk_write_ptrace,
+ .read = smk_read_ptrace,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_fill_super - fill the smackfs superblock
+ * @sb: the empty superblock
+ * @fc: unused
+ *
+ * Fill in the well known entries for the smack filesystem
+ *
+ * Returns 0 on success, an error code on failure
+ */
+static int smk_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int rc;
+
+ static const struct tree_descr smack_files[] = {
+ [SMK_LOAD] = {
+ "load", &smk_load_ops, S_IRUGO|S_IWUSR},
+ [SMK_CIPSO] = {
+ "cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
+ [SMK_DOI] = {
+ "doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
+ [SMK_DIRECT] = {
+ "direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
+ [SMK_AMBIENT] = {
+ "ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
+ [SMK_NET4ADDR] = {
+ "netlabel", &smk_net4addr_ops, S_IRUGO|S_IWUSR},
+ [SMK_ONLYCAP] = {
+ "onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOGGING] = {
+ "logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD_SELF] = {
+ "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESSES] = {
+ "access", &smk_access_ops, S_IRUGO|S_IWUGO},
+ [SMK_MAPPED] = {
+ "mapped", &smk_mapped_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD2] = {
+ "load2", &smk_load2_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD_SELF2] = {
+ "load-self2", &smk_load_self2_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESS2] = {
+ "access2", &smk_access2_ops, S_IRUGO|S_IWUGO},
+ [SMK_CIPSO2] = {
+ "cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR},
+ [SMK_REVOKE_SUBJ] = {
+ "revoke-subject", &smk_revoke_subj_ops,
+ S_IRUGO|S_IWUSR},
+ [SMK_CHANGE_RULE] = {
+ "change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR},
+ [SMK_SYSLOG] = {
+ "syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR},
+ [SMK_PTRACE] = {
+ "ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR},
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ [SMK_UNCONFINED] = {
+ "unconfined", &smk_unconfined_ops, S_IRUGO|S_IWUSR},
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ [SMK_NET6ADDR] = {
+ "ipv6host", &smk_net6addr_ops, S_IRUGO|S_IWUSR},
+#endif /* CONFIG_IPV6 */
+ [SMK_RELABEL_SELF] = {
+ "relabel-self", &smk_relabel_self_ops,
+ S_IRUGO|S_IWUGO},
+ /* last one */
+ {""}
+ };
+
+ rc = simple_fill_super(sb, SMACK_MAGIC, smack_files);
+ if (rc != 0) {
+ printk(KERN_ERR "%s failed %d while creating inodes\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * smk_get_tree - get the smackfs superblock
+ * @fc: The mount context, including any options
+ *
+ * Just passes everything along.
+ *
+ * Returns what the lower level code does.
+ */
+static int smk_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, smk_fill_super);
+}
+
+static const struct fs_context_operations smk_context_ops = {
+ .get_tree = smk_get_tree,
+};
+
+/**
+ * smk_init_fs_context - Initialise a filesystem context for smackfs
+ * @fc: The blank mount context
+ */
+static int smk_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &smk_context_ops;
+ return 0;
+}
+
+static struct file_system_type smk_fs_type = {
+ .name = "smackfs",
+ .init_fs_context = smk_init_fs_context,
+ .kill_sb = kill_litter_super,
+};
+
+static struct vfsmount *smackfs_mount;
+
+/**
+ * init_smk_fs - get the smackfs superblock
+ *
+ * register the smackfs
+ *
+ * Do not register smackfs if Smack wasn't enabled
+ * on boot. We can not put this method normally under the
+ * smack_init() code path since the security subsystem get
+ * initialized before the vfs caches.
+ *
+ * Returns true if we were not chosen on boot or if
+ * we were chosen and filesystem registration succeeded.
+ */
+static int __init init_smk_fs(void)
+{
+ int err;
+ int rc;
+
+ if (smack_enabled == 0)
+ return 0;
+
+ err = smk_init_sysfs();
+ if (err)
+ printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
+
+ err = register_filesystem(&smk_fs_type);
+ if (!err) {
+ smackfs_mount = kern_mount(&smk_fs_type);
+ if (IS_ERR(smackfs_mount)) {
+ printk(KERN_ERR "smackfs: could not mount!\n");
+ err = PTR_ERR(smackfs_mount);
+ smackfs_mount = NULL;
+ }
+ }
+
+ smk_cipso_doi();
+ smk_unlbl_ambient(NULL);
+
+ rc = smack_populate_secattr(&smack_known_floor);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smack_populate_secattr(&smack_known_hat);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smack_populate_secattr(&smack_known_huh);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smack_populate_secattr(&smack_known_star);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smack_populate_secattr(&smack_known_web);
+ if (err == 0 && rc < 0)
+ err = rc;
+
+ return err;
+}
+
+__initcall(init_smk_fs);
diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
new file mode 100644
index 000000000..9f300cdce
--- /dev/null
+++ b/security/tomoyo/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+builtin-policy.h
+policy/*.conf
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
new file mode 100644
index 000000000..b9f867100
--- /dev/null
+++ b/security/tomoyo/Kconfig
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_TOMOYO
+ bool "TOMOYO Linux Support"
+ depends on SECURITY
+ depends on NET
+ select SECURITYFS
+ select SECURITY_PATH
+ select SECURITY_NETWORK
+ select SRCU
+ select BUILD_BIN2C
+ default n
+ help
+ This selects TOMOYO Linux, pathname-based access control.
+ Required userspace tools and further information may be
+ found at <http://tomoyo.sourceforge.jp/>.
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
+ int "Default maximal count for learning mode"
+ default 2048
+ range 0 2147483647
+ depends on SECURITY_TOMOYO
+ help
+ This is the default value for maximal ACL entries
+ that are automatically appended into policy at "learning mode".
+ Some programs access thousands of objects, so running
+ such programs in "learning mode" dulls the system response
+ and consumes much memory.
+ This is the safeguard for such programs.
+
+config SECURITY_TOMOYO_MAX_AUDIT_LOG
+ int "Default maximal count for audit log"
+ default 1024
+ range 0 2147483647
+ depends on SECURITY_TOMOYO
+ help
+ This is the default value for maximal entries for
+ audit logs that the kernel can hold on memory.
+ You can read the log via /sys/kernel/security/tomoyo/audit.
+ If you don't need audit logs, you may set this value to 0.
+
+config SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ bool "Activate without calling userspace policy loader."
+ default n
+ depends on SECURITY_TOMOYO
+ help
+ Say Y here if you want to activate access control as soon as built-in
+ policy was loaded. This option will be useful for systems where
+ operations which can lead to the hijacking of the boot sequence are
+ needed before loading the policy. For example, you can activate
+ immediately after loading the fixed part of policy which will allow
+ only operations needed for mounting a partition which contains the
+ variant part of policy and verifying (e.g. running GPG check) and
+ loading the variant part of policy. Since you can start using
+ enforcing mode from the beginning, you can reduce the possibility of
+ hijacking the boot sequence.
+
+config SECURITY_TOMOYO_POLICY_LOADER
+ string "Location of userspace policy loader"
+ default "/sbin/tomoyo-init"
+ depends on SECURITY_TOMOYO
+ depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ help
+ This is the default pathname of policy loader which is called before
+ activation. You can override this setting via TOMOYO_loader= kernel
+ command line option.
+
+config SECURITY_TOMOYO_ACTIVATION_TRIGGER
+ string "Trigger for calling userspace policy loader"
+ default "/sbin/init"
+ depends on SECURITY_TOMOYO
+ depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ help
+ This is the default pathname of activation trigger.
+ You can override this setting via TOMOYO_trigger= kernel command line
+ option. For example, if you pass init=/bin/systemd option, you may
+ want to also pass TOMOYO_trigger=/bin/systemd option.
+
+config SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
+ bool "Use insecure built-in settings for fuzzing tests."
+ default n
+ depends on SECURITY_TOMOYO
+ select SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ help
+ Enabling this option forces minimal built-in policy and disables
+ domain/program checks for run-time policy modifications. Please enable
+ this option only if this kernel is built for doing fuzzing tests.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
new file mode 100644
index 000000000..221eaadff
--- /dev/null
+++ b/security/tomoyo/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
+
+targets += builtin-policy.h
+define do_policy
+echo "static char tomoyo_builtin_$(1)[] __initdata ="; \
+$(objtree)/scripts/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
+echo ";"
+endef
+quiet_cmd_policy = POLICY $@
+ cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
+
+$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
+ $(call if_changed,policy)
+
+$(obj)/common.o: $(obj)/builtin-policy.h
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
new file mode 100644
index 000000000..7cf8fdbb2
--- /dev/null
+++ b/security/tomoyo/audit.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/audit.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/**
+ * tomoyo_print_bprm - Print "struct linux_binprm" for auditing.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ * @dump: Pointer to "struct tomoyo_page_dump".
+ *
+ * Returns the contents of @bprm on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+static char *tomoyo_print_bprm(struct linux_binprm *bprm,
+ struct tomoyo_page_dump *dump)
+{
+ static const int tomoyo_buffer_len = 4096 * 2;
+ char *buffer = kzalloc(tomoyo_buffer_len, GFP_NOFS);
+ char *cp;
+ char *last_start;
+ int len;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ bool truncated = false;
+
+ if (!buffer)
+ return NULL;
+ len = snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ ");
+ cp = buffer + len;
+ if (!argv_count) {
+ memmove(cp, "} envp[]={ ", 11);
+ cp += 11;
+ }
+ last_start = cp;
+ while (argv_count || envp_count) {
+ if (!tomoyo_dump_page(bprm, pos, dump))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (offset < PAGE_SIZE) {
+ const char *kaddr = dump->data;
+ const unsigned char c = kaddr[offset++];
+
+ if (cp == last_start)
+ *cp++ = '"';
+ if (cp >= buffer + tomoyo_buffer_len - 32) {
+ /* Reserve some room for "..." string. */
+ truncated = true;
+ } else if (c == '\\') {
+ *cp++ = '\\';
+ *cp++ = '\\';
+ } else if (c > ' ' && c < 127) {
+ *cp++ = c;
+ } else if (!c) {
+ *cp++ = '"';
+ *cp++ = ' ';
+ last_start = cp;
+ } else {
+ *cp++ = '\\';
+ *cp++ = (c >> 6) + '0';
+ *cp++ = ((c >> 3) & 7) + '0';
+ *cp++ = (c & 7) + '0';
+ }
+ if (c)
+ continue;
+ if (argv_count) {
+ if (--argv_count == 0) {
+ if (truncated) {
+ cp = last_start;
+ memmove(cp, "... ", 4);
+ cp += 4;
+ }
+ memmove(cp, "} envp[]={ ", 11);
+ cp += 11;
+ last_start = cp;
+ truncated = false;
+ }
+ } else if (envp_count) {
+ if (--envp_count == 0) {
+ if (truncated) {
+ cp = last_start;
+ memmove(cp, "... ", 4);
+ cp += 4;
+ }
+ }
+ }
+ if (!argv_count && !envp_count)
+ break;
+ }
+ offset = 0;
+ }
+ *cp++ = '}';
+ *cp = '\0';
+ return buffer;
+out:
+ snprintf(buffer, tomoyo_buffer_len - 1,
+ "argv[]={ ... } envp[]= { ... }");
+ return buffer;
+}
+
+/**
+ * tomoyo_filetype - Get string representation of file type.
+ *
+ * @mode: Mode value for stat().
+ *
+ * Returns file type string.
+ */
+static inline const char *tomoyo_filetype(const umode_t mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ case 0:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FILE];
+ case S_IFDIR:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_DIRECTORY];
+ case S_IFLNK:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SYMLINK];
+ case S_IFIFO:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FIFO];
+ case S_IFSOCK:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SOCKET];
+ case S_IFBLK:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_BLOCK_DEV];
+ case S_IFCHR:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_CHAR_DEV];
+ }
+ return "unknown"; /* This should not happen. */
+}
+
+/**
+ * tomoyo_print_header - Get header line of audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns string representation.
+ *
+ * This function uses kmalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+static char *tomoyo_print_header(struct tomoyo_request_info *r)
+{
+ struct tomoyo_time stamp;
+ const pid_t gpid = task_pid_nr(current);
+ struct tomoyo_obj_info *obj = r->obj;
+ static const int tomoyo_buffer_len = 4096;
+ char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+ int pos;
+ u8 i;
+
+ if (!buffer)
+ return NULL;
+
+ tomoyo_convert_time(ktime_get_real_seconds(), &stamp);
+
+ pos = snprintf(buffer, tomoyo_buffer_len - 1,
+ "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s granted=%s (global-pid=%u) task={ pid=%u ppid=%u uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
+ stamp.year, stamp.month, stamp.day, stamp.hour,
+ stamp.min, stamp.sec, r->profile, tomoyo_mode[r->mode],
+ str_yes_no(r->granted), gpid, tomoyo_sys_getpid(),
+ tomoyo_sys_getppid(),
+ from_kuid(&init_user_ns, current_uid()),
+ from_kgid(&init_user_ns, current_gid()),
+ from_kuid(&init_user_ns, current_euid()),
+ from_kgid(&init_user_ns, current_egid()),
+ from_kuid(&init_user_ns, current_suid()),
+ from_kgid(&init_user_ns, current_sgid()),
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()));
+ if (!obj)
+ goto no_obj_info;
+ if (!obj->validate_done) {
+ tomoyo_get_attributes(obj);
+ obj->validate_done = true;
+ }
+ for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
+ struct tomoyo_mini_stat *stat;
+ unsigned int dev;
+ umode_t mode;
+
+ if (!obj->stat_valid[i])
+ continue;
+ stat = &obj->stat[i];
+ dev = stat->dev;
+ mode = stat->mode;
+ if (i & 1) {
+ pos += snprintf(buffer + pos,
+ tomoyo_buffer_len - 1 - pos,
+ " path%u.parent={ uid=%u gid=%u ino=%lu perm=0%o }",
+ (i >> 1) + 1,
+ from_kuid(&init_user_ns, stat->uid),
+ from_kgid(&init_user_ns, stat->gid),
+ (unsigned long)stat->ino,
+ stat->mode & S_IALLUGO);
+ continue;
+ }
+ pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
+ " path%u={ uid=%u gid=%u ino=%lu major=%u minor=%u perm=0%o type=%s",
+ (i >> 1) + 1,
+ from_kuid(&init_user_ns, stat->uid),
+ from_kgid(&init_user_ns, stat->gid),
+ (unsigned long)stat->ino,
+ MAJOR(dev), MINOR(dev),
+ mode & S_IALLUGO, tomoyo_filetype(mode));
+ if (S_ISCHR(mode) || S_ISBLK(mode)) {
+ dev = stat->rdev;
+ pos += snprintf(buffer + pos,
+ tomoyo_buffer_len - 1 - pos,
+ " dev_major=%u dev_minor=%u",
+ MAJOR(dev), MINOR(dev));
+ }
+ pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
+ " }");
+ }
+no_obj_info:
+ if (pos < tomoyo_buffer_len - 1)
+ return buffer;
+ kfree(buffer);
+ return NULL;
+}
+
+/**
+ * tomoyo_init_log - Allocate buffer for audit logs.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @len: Buffer size needed for @fmt and @args.
+ * @fmt: The printf()'s format string.
+ * @args: va_list structure for @fmt.
+ *
+ * Returns pointer to allocated memory.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args)
+{
+ char *buf = NULL;
+ char *bprm_info = NULL;
+ const char *header = NULL;
+ char *realpath = NULL;
+ const char *symlink = NULL;
+ int pos;
+ const char *domainname = r->domain->domainname->name;
+
+ header = tomoyo_print_header(r);
+ if (!header)
+ return NULL;
+ /* +10 is for '\n' etc. and '\0'. */
+ len += strlen(domainname) + strlen(header) + 10;
+ if (r->ee) {
+ struct file *file = r->ee->bprm->file;
+
+ realpath = tomoyo_realpath_from_path(&file->f_path);
+ bprm_info = tomoyo_print_bprm(r->ee->bprm, &r->ee->dump);
+ if (!realpath || !bprm_info)
+ goto out;
+ /* +80 is for " exec={ realpath=\"%s\" argc=%d envc=%d %s }" */
+ len += strlen(realpath) + 80 + strlen(bprm_info);
+ } else if (r->obj && r->obj->symlink_target) {
+ symlink = r->obj->symlink_target->name;
+ /* +18 is for " symlink.target=\"%s\"" */
+ len += 18 + strlen(symlink);
+ }
+ len = tomoyo_round2(len);
+ buf = kzalloc(len, GFP_NOFS);
+ if (!buf)
+ goto out;
+ len--;
+ pos = snprintf(buf, len, "%s", header);
+ if (realpath) {
+ struct linux_binprm *bprm = r->ee->bprm;
+
+ pos += snprintf(buf + pos, len - pos,
+ " exec={ realpath=\"%s\" argc=%d envc=%d %s }",
+ realpath, bprm->argc, bprm->envc, bprm_info);
+ } else if (symlink)
+ pos += snprintf(buf + pos, len - pos, " symlink.target=\"%s\"",
+ symlink);
+ pos += snprintf(buf + pos, len - pos, "\n%s\n", domainname);
+ vsnprintf(buf + pos, len - pos, fmt, args);
+out:
+ kfree(realpath);
+ kfree(bprm_info);
+ kfree(header);
+ return buf;
+}
+
+/* Wait queue for /sys/kernel/security/tomoyo/audit. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_log_wait);
+
+/* Structure for audit log. */
+struct tomoyo_log {
+ struct list_head list;
+ char *log;
+ int size;
+};
+
+/* The list for "struct tomoyo_log". */
+static LIST_HEAD(tomoyo_log);
+
+/* Lock for "struct list_head tomoyo_log". */
+static DEFINE_SPINLOCK(tomoyo_log_lock);
+
+/* Length of "struct list_head tomoyo_log". */
+static unsigned int tomoyo_log_count;
+
+/**
+ * tomoyo_get_audit - Get audit mode.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number.
+ * @index: Index number of functionality.
+ * @matched_acl: Pointer to "struct tomoyo_acl_info".
+ * @is_granted: True if granted log, false otherwise.
+ *
+ * Returns true if this request should be audited, false otherwise.
+ */
+static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
+ const u8 profile, const u8 index,
+ const struct tomoyo_acl_info *matched_acl,
+ const bool is_granted)
+{
+ u8 mode;
+ const u8 category = tomoyo_index2category[index] +
+ TOMOYO_MAX_MAC_INDEX;
+ struct tomoyo_profile *p;
+
+ if (!tomoyo_policy_loaded)
+ return false;
+ p = tomoyo_profile(ns, profile);
+ if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
+ return false;
+ if (is_granted && matched_acl && matched_acl->cond &&
+ matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES;
+ mode = p->config[index];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->config[category];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->default_config;
+ if (is_granted)
+ return mode & TOMOYO_CONFIG_WANT_GRANT_LOG;
+ return mode & TOMOYO_CONFIG_WANT_REJECT_LOG;
+}
+
+/**
+ * tomoyo_write_log2 - Write an audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @len: Buffer size needed for @fmt and @args.
+ * @fmt: The printf()'s format string.
+ * @args: va_list structure for @fmt.
+ *
+ * Returns nothing.
+ */
+void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args)
+{
+ char *buf;
+ struct tomoyo_log *entry;
+ bool quota_exceeded = false;
+
+ if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
+ r->matched_acl, r->granted))
+ goto out;
+ buf = tomoyo_init_log(r, len, fmt, args);
+ if (!buf)
+ goto out;
+ entry = kzalloc(sizeof(*entry), GFP_NOFS);
+ if (!entry) {
+ kfree(buf);
+ goto out;
+ }
+ entry->log = buf;
+ len = tomoyo_round2(strlen(buf) + 1);
+ /*
+ * The entry->size is used for memory quota checks.
+ * Don't go beyond strlen(entry->log).
+ */
+ entry->size = len + tomoyo_round2(sizeof(*entry));
+ spin_lock(&tomoyo_log_lock);
+ if (tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT] &&
+ tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] + entry->size >=
+ tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT]) {
+ quota_exceeded = true;
+ } else {
+ tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] += entry->size;
+ list_add_tail(&entry->list, &tomoyo_log);
+ tomoyo_log_count++;
+ }
+ spin_unlock(&tomoyo_log_lock);
+ if (quota_exceeded) {
+ kfree(buf);
+ kfree(entry);
+ goto out;
+ }
+ wake_up(&tomoyo_log_wait);
+out:
+ return;
+}
+
+/**
+ * tomoyo_write_log - Write an audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @fmt: The printf()'s format string, followed by parameters.
+ *
+ * Returns nothing.
+ */
+void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+ va_start(args, fmt);
+ tomoyo_write_log2(r, len, fmt, args);
+ va_end(args);
+}
+
+/**
+ * tomoyo_read_log - Read an audit log.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+void tomoyo_read_log(struct tomoyo_io_buffer *head)
+{
+ struct tomoyo_log *ptr = NULL;
+
+ if (head->r.w_pos)
+ return;
+ kfree(head->read_buf);
+ head->read_buf = NULL;
+ spin_lock(&tomoyo_log_lock);
+ if (!list_empty(&tomoyo_log)) {
+ ptr = list_entry(tomoyo_log.next, typeof(*ptr), list);
+ list_del(&ptr->list);
+ tomoyo_log_count--;
+ tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] -= ptr->size;
+ }
+ spin_unlock(&tomoyo_log_lock);
+ if (ptr) {
+ head->read_buf = ptr->log;
+ head->r.w[head->r.w_pos++] = head->read_buf;
+ kfree(ptr);
+ }
+}
+
+/**
+ * tomoyo_poll_log - Wait for an audit log.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns EPOLLIN | EPOLLRDNORM when ready to read an audit log.
+ */
+__poll_t tomoyo_poll_log(struct file *file, poll_table *wait)
+{
+ if (tomoyo_log_count)
+ return EPOLLIN | EPOLLRDNORM;
+ poll_wait(file, &tomoyo_log_wait, wait);
+ if (tomoyo_log_count)
+ return EPOLLIN | EPOLLRDNORM;
+ return 0;
+}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
new file mode 100644
index 000000000..f4cd9b58b
--- /dev/null
+++ b/security/tomoyo/common.c
@@ -0,0 +1,2870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/common.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/string_helpers.h>
+#include "common.h"
+
+/* String table for operation mode. */
+const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
+ [TOMOYO_CONFIG_DISABLED] = "disabled",
+ [TOMOYO_CONFIG_LEARNING] = "learning",
+ [TOMOYO_CONFIG_PERMISSIVE] = "permissive",
+ [TOMOYO_CONFIG_ENFORCING] = "enforcing"
+};
+
+/* String table for /sys/kernel/security/tomoyo/profile */
+const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ /* CONFIG::file group */
+ [TOMOYO_MAC_FILE_EXECUTE] = "execute",
+ [TOMOYO_MAC_FILE_OPEN] = "open",
+ [TOMOYO_MAC_FILE_CREATE] = "create",
+ [TOMOYO_MAC_FILE_UNLINK] = "unlink",
+ [TOMOYO_MAC_FILE_GETATTR] = "getattr",
+ [TOMOYO_MAC_FILE_MKDIR] = "mkdir",
+ [TOMOYO_MAC_FILE_RMDIR] = "rmdir",
+ [TOMOYO_MAC_FILE_MKFIFO] = "mkfifo",
+ [TOMOYO_MAC_FILE_MKSOCK] = "mksock",
+ [TOMOYO_MAC_FILE_TRUNCATE] = "truncate",
+ [TOMOYO_MAC_FILE_SYMLINK] = "symlink",
+ [TOMOYO_MAC_FILE_MKBLOCK] = "mkblock",
+ [TOMOYO_MAC_FILE_MKCHAR] = "mkchar",
+ [TOMOYO_MAC_FILE_LINK] = "link",
+ [TOMOYO_MAC_FILE_RENAME] = "rename",
+ [TOMOYO_MAC_FILE_CHMOD] = "chmod",
+ [TOMOYO_MAC_FILE_CHOWN] = "chown",
+ [TOMOYO_MAC_FILE_CHGRP] = "chgrp",
+ [TOMOYO_MAC_FILE_IOCTL] = "ioctl",
+ [TOMOYO_MAC_FILE_CHROOT] = "chroot",
+ [TOMOYO_MAC_FILE_MOUNT] = "mount",
+ [TOMOYO_MAC_FILE_UMOUNT] = "unmount",
+ [TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send",
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind",
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = "env",
+ /* CONFIG group */
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
+};
+
+/* String table for conditions. */
+const char * const tomoyo_condition_keyword[TOMOYO_MAX_CONDITION_KEYWORD] = {
+ [TOMOYO_TASK_UID] = "task.uid",
+ [TOMOYO_TASK_EUID] = "task.euid",
+ [TOMOYO_TASK_SUID] = "task.suid",
+ [TOMOYO_TASK_FSUID] = "task.fsuid",
+ [TOMOYO_TASK_GID] = "task.gid",
+ [TOMOYO_TASK_EGID] = "task.egid",
+ [TOMOYO_TASK_SGID] = "task.sgid",
+ [TOMOYO_TASK_FSGID] = "task.fsgid",
+ [TOMOYO_TASK_PID] = "task.pid",
+ [TOMOYO_TASK_PPID] = "task.ppid",
+ [TOMOYO_EXEC_ARGC] = "exec.argc",
+ [TOMOYO_EXEC_ENVC] = "exec.envc",
+ [TOMOYO_TYPE_IS_SOCKET] = "socket",
+ [TOMOYO_TYPE_IS_SYMLINK] = "symlink",
+ [TOMOYO_TYPE_IS_FILE] = "file",
+ [TOMOYO_TYPE_IS_BLOCK_DEV] = "block",
+ [TOMOYO_TYPE_IS_DIRECTORY] = "directory",
+ [TOMOYO_TYPE_IS_CHAR_DEV] = "char",
+ [TOMOYO_TYPE_IS_FIFO] = "fifo",
+ [TOMOYO_MODE_SETUID] = "setuid",
+ [TOMOYO_MODE_SETGID] = "setgid",
+ [TOMOYO_MODE_STICKY] = "sticky",
+ [TOMOYO_MODE_OWNER_READ] = "owner_read",
+ [TOMOYO_MODE_OWNER_WRITE] = "owner_write",
+ [TOMOYO_MODE_OWNER_EXECUTE] = "owner_execute",
+ [TOMOYO_MODE_GROUP_READ] = "group_read",
+ [TOMOYO_MODE_GROUP_WRITE] = "group_write",
+ [TOMOYO_MODE_GROUP_EXECUTE] = "group_execute",
+ [TOMOYO_MODE_OTHERS_READ] = "others_read",
+ [TOMOYO_MODE_OTHERS_WRITE] = "others_write",
+ [TOMOYO_MODE_OTHERS_EXECUTE] = "others_execute",
+ [TOMOYO_EXEC_REALPATH] = "exec.realpath",
+ [TOMOYO_SYMLINK_TARGET] = "symlink.target",
+ [TOMOYO_PATH1_UID] = "path1.uid",
+ [TOMOYO_PATH1_GID] = "path1.gid",
+ [TOMOYO_PATH1_INO] = "path1.ino",
+ [TOMOYO_PATH1_MAJOR] = "path1.major",
+ [TOMOYO_PATH1_MINOR] = "path1.minor",
+ [TOMOYO_PATH1_PERM] = "path1.perm",
+ [TOMOYO_PATH1_TYPE] = "path1.type",
+ [TOMOYO_PATH1_DEV_MAJOR] = "path1.dev_major",
+ [TOMOYO_PATH1_DEV_MINOR] = "path1.dev_minor",
+ [TOMOYO_PATH2_UID] = "path2.uid",
+ [TOMOYO_PATH2_GID] = "path2.gid",
+ [TOMOYO_PATH2_INO] = "path2.ino",
+ [TOMOYO_PATH2_MAJOR] = "path2.major",
+ [TOMOYO_PATH2_MINOR] = "path2.minor",
+ [TOMOYO_PATH2_PERM] = "path2.perm",
+ [TOMOYO_PATH2_TYPE] = "path2.type",
+ [TOMOYO_PATH2_DEV_MAJOR] = "path2.dev_major",
+ [TOMOYO_PATH2_DEV_MINOR] = "path2.dev_minor",
+ [TOMOYO_PATH1_PARENT_UID] = "path1.parent.uid",
+ [TOMOYO_PATH1_PARENT_GID] = "path1.parent.gid",
+ [TOMOYO_PATH1_PARENT_INO] = "path1.parent.ino",
+ [TOMOYO_PATH1_PARENT_PERM] = "path1.parent.perm",
+ [TOMOYO_PATH2_PARENT_UID] = "path2.parent.uid",
+ [TOMOYO_PATH2_PARENT_GID] = "path2.parent.gid",
+ [TOMOYO_PATH2_PARENT_INO] = "path2.parent.ino",
+ [TOMOYO_PATH2_PARENT_PERM] = "path2.parent.perm",
+};
+
+/* String table for PREFERENCE keyword. */
+static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = {
+ [TOMOYO_PREF_MAX_AUDIT_LOG] = "max_audit_log",
+ [TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry",
+};
+
+/* String table for path operation. */
+const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
+ [TOMOYO_TYPE_EXECUTE] = "execute",
+ [TOMOYO_TYPE_READ] = "read",
+ [TOMOYO_TYPE_WRITE] = "write",
+ [TOMOYO_TYPE_APPEND] = "append",
+ [TOMOYO_TYPE_UNLINK] = "unlink",
+ [TOMOYO_TYPE_GETATTR] = "getattr",
+ [TOMOYO_TYPE_RMDIR] = "rmdir",
+ [TOMOYO_TYPE_TRUNCATE] = "truncate",
+ [TOMOYO_TYPE_SYMLINK] = "symlink",
+ [TOMOYO_TYPE_CHROOT] = "chroot",
+ [TOMOYO_TYPE_UMOUNT] = "unmount",
+};
+
+/* String table for socket's operation. */
+const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
+ [TOMOYO_NETWORK_BIND] = "bind",
+ [TOMOYO_NETWORK_LISTEN] = "listen",
+ [TOMOYO_NETWORK_CONNECT] = "connect",
+ [TOMOYO_NETWORK_SEND] = "send",
+};
+
+/* String table for categories. */
+static const char * const tomoyo_category_keywords
+[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAC_CATEGORY_MISC] = "misc",
+};
+
+/* Permit policy management by non-root user? */
+static bool tomoyo_manage_by_non_root;
+
+/* Utility functions. */
+
+/**
+ * tomoyo_addprintf - strncat()-like-snprintf().
+ *
+ * @buffer: Buffer to write to. Must be '\0'-terminated.
+ * @len: Size of @buffer.
+ * @fmt: The printf()'s format string, followed by parameters.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...)
+{
+ va_list args;
+ const int pos = strlen(buffer);
+
+ va_start(args, fmt);
+ vsnprintf(buffer + pos, len - pos - 1, fmt, args);
+ va_end(args);
+}
+
+/**
+ * tomoyo_flush - Flush queued string to userspace's buffer.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns true if all data was flushed, false otherwise.
+ */
+static bool tomoyo_flush(struct tomoyo_io_buffer *head)
+{
+ while (head->r.w_pos) {
+ const char *w = head->r.w[0];
+ size_t len = strlen(w);
+
+ if (len) {
+ if (len > head->read_user_buf_avail)
+ len = head->read_user_buf_avail;
+ if (!len)
+ return false;
+ if (copy_to_user(head->read_user_buf, w, len))
+ return false;
+ head->read_user_buf_avail -= len;
+ head->read_user_buf += len;
+ w += len;
+ }
+ head->r.w[0] = w;
+ if (*w)
+ return false;
+ /* Add '\0' for audit logs and query. */
+ if (head->poll) {
+ if (!head->read_user_buf_avail ||
+ copy_to_user(head->read_user_buf, "", 1))
+ return false;
+ head->read_user_buf_avail--;
+ head->read_user_buf++;
+ }
+ head->r.w_pos--;
+ for (len = 0; len < head->r.w_pos; len++)
+ head->r.w[len] = head->r.w[len + 1];
+ }
+ head->r.avail = 0;
+ return true;
+}
+
+/**
+ * tomoyo_set_string - Queue string to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @string: String to print.
+ *
+ * Note that @string has to be kept valid until @head is kfree()d.
+ * This means that char[] allocated on stack memory cannot be passed to
+ * this function. Use tomoyo_io_printf() for char[] allocated on stack memory.
+ */
+static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
+{
+ if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) {
+ head->r.w[head->r.w_pos++] = string;
+ tomoyo_flush(head);
+ } else
+ WARN_ON(1);
+}
+
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...) __printf(2, 3);
+
+/**
+ * tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @fmt: The printf()'s format string, followed by parameters.
+ */
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...)
+{
+ va_list args;
+ size_t len;
+ size_t pos = head->r.avail;
+ int size = head->readbuf_size - pos;
+
+ if (size <= 0)
+ return;
+ va_start(args, fmt);
+ len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1;
+ va_end(args);
+ if (pos + len >= head->readbuf_size) {
+ WARN_ON(1);
+ return;
+ }
+ head->r.avail += len;
+ tomoyo_set_string(head, head->read_buf + pos);
+}
+
+/**
+ * tomoyo_set_space - Put a space to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_space(struct tomoyo_io_buffer *head)
+{
+ tomoyo_set_string(head, " ");
+}
+
+/**
+ * tomoyo_set_lf - Put a line feed to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static bool tomoyo_set_lf(struct tomoyo_io_buffer *head)
+{
+ tomoyo_set_string(head, "\n");
+ return !head->r.w_pos;
+}
+
+/**
+ * tomoyo_set_slash - Put a shash to "struct tomoyo_io_buffer" structure.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_slash(struct tomoyo_io_buffer *head)
+{
+ tomoyo_set_string(head, "/");
+}
+
+/* List of namespaces. */
+LIST_HEAD(tomoyo_namespace_list);
+/* True if namespace other than tomoyo_kernel_namespace is defined. */
+static bool tomoyo_namespace_enabled;
+
+/**
+ * tomoyo_init_policy_namespace - Initialize namespace.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ *
+ * Returns nothing.
+ */
+void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++)
+ INIT_LIST_HEAD(&ns->acl_group[idx]);
+ for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++)
+ INIT_LIST_HEAD(&ns->group_list[idx]);
+ for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
+ INIT_LIST_HEAD(&ns->policy_list[idx]);
+ ns->profile_version = 20150505;
+ tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
+ list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
+}
+
+/**
+ * tomoyo_print_namespace - Print namespace header.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_namespace(struct tomoyo_io_buffer *head)
+{
+ if (!tomoyo_namespace_enabled)
+ return;
+ tomoyo_set_string(head,
+ container_of(head->r.ns,
+ struct tomoyo_policy_namespace,
+ namespace_list)->name);
+ tomoyo_set_space(head);
+}
+
+/**
+ * tomoyo_print_name_union - Print a tomoyo_name_union.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ */
+static void tomoyo_print_name_union(struct tomoyo_io_buffer *head,
+ const struct tomoyo_name_union *ptr)
+{
+ tomoyo_set_space(head);
+ if (ptr->group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->group->group_name->name);
+ } else {
+ tomoyo_set_string(head, ptr->filename->name);
+ }
+}
+
+/**
+ * tomoyo_print_name_union_quoted - Print a tomoyo_name_union with a quote.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head,
+ const struct tomoyo_name_union *ptr)
+{
+ if (ptr->group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->group->group_name->name);
+ } else {
+ tomoyo_set_string(head, "\"");
+ tomoyo_set_string(head, ptr->filename->name);
+ tomoyo_set_string(head, "\"");
+ }
+}
+
+/**
+ * tomoyo_print_number_union_nospace - Print a tomoyo_number_union without a space.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_number_union_nospace
+(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr)
+{
+ if (ptr->group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->group->group_name->name);
+ } else {
+ int i;
+ unsigned long min = ptr->values[0];
+ const unsigned long max = ptr->values[1];
+ u8 min_type = ptr->value_type[0];
+ const u8 max_type = ptr->value_type[1];
+ char buffer[128];
+
+ buffer[0] = '\0';
+ for (i = 0; i < 2; i++) {
+ switch (min_type) {
+ case TOMOYO_VALUE_TYPE_HEXADECIMAL:
+ tomoyo_addprintf(buffer, sizeof(buffer),
+ "0x%lX", min);
+ break;
+ case TOMOYO_VALUE_TYPE_OCTAL:
+ tomoyo_addprintf(buffer, sizeof(buffer),
+ "0%lo", min);
+ break;
+ default:
+ tomoyo_addprintf(buffer, sizeof(buffer), "%lu",
+ min);
+ break;
+ }
+ if (min == max && min_type == max_type)
+ break;
+ tomoyo_addprintf(buffer, sizeof(buffer), "-");
+ min_type = max_type;
+ min = max;
+ }
+ tomoyo_io_printf(head, "%s", buffer);
+ }
+}
+
+/**
+ * tomoyo_print_number_union - Print a tomoyo_number_union.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_number_union(struct tomoyo_io_buffer *head,
+ const struct tomoyo_number_union *ptr)
+{
+ tomoyo_set_space(head);
+ tomoyo_print_number_union_nospace(head, ptr);
+}
+
+/**
+ * tomoyo_assign_profile - Create a new profile.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number to create.
+ *
+ * Returns pointer to "struct tomoyo_profile" on success, NULL otherwise.
+ */
+static struct tomoyo_profile *tomoyo_assign_profile
+(struct tomoyo_policy_namespace *ns, const unsigned int profile)
+{
+ struct tomoyo_profile *ptr;
+ struct tomoyo_profile *entry;
+
+ if (profile >= TOMOYO_MAX_PROFILES)
+ return NULL;
+ ptr = ns->profile_ptr[profile];
+ if (ptr)
+ return ptr;
+ entry = kzalloc(sizeof(*entry), GFP_NOFS | __GFP_NOWARN);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ ptr = ns->profile_ptr[profile];
+ if (!ptr && tomoyo_memory_ok(entry)) {
+ ptr = entry;
+ ptr->default_config = TOMOYO_CONFIG_DISABLED |
+ TOMOYO_CONFIG_WANT_GRANT_LOG |
+ TOMOYO_CONFIG_WANT_REJECT_LOG;
+ memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
+ sizeof(ptr->config));
+ ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
+ CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
+ ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
+ CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
+ mb(); /* Avoid out-of-order execution. */
+ ns->profile_ptr[profile] = ptr;
+ entry = NULL;
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ out:
+ kfree(entry);
+ return ptr;
+}
+
+/**
+ * tomoyo_profile - Find a profile.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number to find.
+ *
+ * Returns pointer to "struct tomoyo_profile".
+ */
+struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
+ const u8 profile)
+{
+ static struct tomoyo_profile tomoyo_null_profile;
+ struct tomoyo_profile *ptr = ns->profile_ptr[profile];
+
+ if (!ptr)
+ ptr = &tomoyo_null_profile;
+ return ptr;
+}
+
+/**
+ * tomoyo_find_yesno - Find values for specified keyword.
+ *
+ * @string: String to check.
+ * @find: Name of keyword.
+ *
+ * Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise.
+ */
+static s8 tomoyo_find_yesno(const char *string, const char *find)
+{
+ const char *cp = strstr(string, find);
+
+ if (cp) {
+ cp += strlen(find);
+ if (!strncmp(cp, "=yes", 4))
+ return 1;
+ else if (!strncmp(cp, "=no", 3))
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * tomoyo_set_uint - Set value for specified preference.
+ *
+ * @i: Pointer to "unsigned int".
+ * @string: String to check.
+ * @find: Name of keyword.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_uint(unsigned int *i, const char *string,
+ const char *find)
+{
+ const char *cp = strstr(string, find);
+
+ if (cp)
+ sscanf(cp + strlen(find), "=%u", i);
+}
+
+/**
+ * tomoyo_set_mode - Set mode for specified profile.
+ *
+ * @name: Name of functionality.
+ * @value: Mode for @name.
+ * @profile: Pointer to "struct tomoyo_profile".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_set_mode(char *name, const char *value,
+ struct tomoyo_profile *profile)
+{
+ u8 i;
+ u8 config;
+
+ if (!strcmp(name, "CONFIG")) {
+ i = TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX;
+ config = profile->default_config;
+ } else if (tomoyo_str_starts(&name, "CONFIG::")) {
+ config = 0;
+ for (i = 0; i < TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) {
+ int len = 0;
+
+ if (i < TOMOYO_MAX_MAC_INDEX) {
+ const u8 c = tomoyo_index2category[i];
+ const char *category =
+ tomoyo_category_keywords[c];
+
+ len = strlen(category);
+ if (strncmp(name, category, len) ||
+ name[len++] != ':' || name[len++] != ':')
+ continue;
+ }
+ if (strcmp(name + len, tomoyo_mac_keywords[i]))
+ continue;
+ config = profile->config[i];
+ break;
+ }
+ if (i == TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ if (strstr(value, "use_default")) {
+ config = TOMOYO_CONFIG_USE_DEFAULT;
+ } else {
+ u8 mode;
+
+ for (mode = 0; mode < 4; mode++)
+ if (strstr(value, tomoyo_mode[mode]))
+ /*
+ * Update lower 3 bits in order to distinguish
+ * 'config' from 'TOMOYO_CONFIG_USE_DEFAULT'.
+ */
+ config = (config & ~7) | mode;
+ if (config != TOMOYO_CONFIG_USE_DEFAULT) {
+ switch (tomoyo_find_yesno(value, "grant_log")) {
+ case 1:
+ config |= TOMOYO_CONFIG_WANT_GRANT_LOG;
+ break;
+ case 0:
+ config &= ~TOMOYO_CONFIG_WANT_GRANT_LOG;
+ break;
+ }
+ switch (tomoyo_find_yesno(value, "reject_log")) {
+ case 1:
+ config |= TOMOYO_CONFIG_WANT_REJECT_LOG;
+ break;
+ case 0:
+ config &= ~TOMOYO_CONFIG_WANT_REJECT_LOG;
+ break;
+ }
+ }
+ }
+ if (i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
+ profile->config[i] = config;
+ else if (config != TOMOYO_CONFIG_USE_DEFAULT)
+ profile->default_config = config;
+ return 0;
+}
+
+/**
+ * tomoyo_write_profile - Write profile table.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+ unsigned int i;
+ char *cp;
+ struct tomoyo_profile *profile;
+
+ if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version)
+ == 1)
+ return 0;
+ i = simple_strtoul(data, &cp, 10);
+ if (*cp != '-')
+ return -EINVAL;
+ data = cp + 1;
+ profile = tomoyo_assign_profile(head->w.ns, i);
+ if (!profile)
+ return -EINVAL;
+ cp = strchr(data, '=');
+ if (!cp)
+ return -EINVAL;
+ *cp++ = '\0';
+ if (!strcmp(data, "COMMENT")) {
+ static DEFINE_SPINLOCK(lock);
+ const struct tomoyo_path_info *new_comment
+ = tomoyo_get_name(cp);
+ const struct tomoyo_path_info *old_comment;
+
+ if (!new_comment)
+ return -ENOMEM;
+ spin_lock(&lock);
+ old_comment = profile->comment;
+ profile->comment = new_comment;
+ spin_unlock(&lock);
+ tomoyo_put_name(old_comment);
+ return 0;
+ }
+ if (!strcmp(data, "PREFERENCE")) {
+ for (i = 0; i < TOMOYO_MAX_PREF; i++)
+ tomoyo_set_uint(&profile->pref[i], cp,
+ tomoyo_pref_keywords[i]);
+ return 0;
+ }
+ return tomoyo_set_mode(data, cp, profile);
+}
+
+/**
+ * tomoyo_print_config - Print mode for specified functionality.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @config: Mode for that functionality.
+ *
+ * Returns nothing.
+ *
+ * Caller prints functionality's name.
+ */
+static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config)
+{
+ tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n",
+ tomoyo_mode[config & 3],
+ str_yes_no(config & TOMOYO_CONFIG_WANT_GRANT_LOG),
+ str_yes_no(config & TOMOYO_CONFIG_WANT_REJECT_LOG));
+}
+
+/**
+ * tomoyo_read_profile - Read profile table.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
+{
+ u8 index;
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ const struct tomoyo_profile *profile;
+
+ if (head->r.eof)
+ return;
+ next:
+ index = head->r.index;
+ profile = ns->profile_ptr[index];
+ switch (head->r.step) {
+ case 0:
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "PROFILE_VERSION=%u\n",
+ ns->profile_version);
+ head->r.step++;
+ break;
+ case 1:
+ for ( ; head->r.index < TOMOYO_MAX_PROFILES;
+ head->r.index++)
+ if (ns->profile_ptr[head->r.index])
+ break;
+ if (head->r.index == TOMOYO_MAX_PROFILES) {
+ head->r.eof = true;
+ return;
+ }
+ head->r.step++;
+ break;
+ case 2:
+ {
+ u8 i;
+ const struct tomoyo_path_info *comment =
+ profile->comment;
+
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "%u-COMMENT=", index);
+ tomoyo_set_string(head, comment ? comment->name : "");
+ tomoyo_set_lf(head);
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "%u-PREFERENCE={ ", index);
+ for (i = 0; i < TOMOYO_MAX_PREF; i++)
+ tomoyo_io_printf(head, "%s=%u ",
+ tomoyo_pref_keywords[i],
+ profile->pref[i]);
+ tomoyo_set_string(head, "}\n");
+ head->r.step++;
+ }
+ break;
+ case 3:
+ {
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "%u-%s", index, "CONFIG");
+ tomoyo_print_config(head, profile->default_config);
+ head->r.bit = 0;
+ head->r.step++;
+ }
+ break;
+ case 4:
+ for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) {
+ const u8 i = head->r.bit;
+ const u8 config = profile->config[i];
+
+ if (config == TOMOYO_CONFIG_USE_DEFAULT)
+ continue;
+ tomoyo_print_namespace(head);
+ if (i < TOMOYO_MAX_MAC_INDEX)
+ tomoyo_io_printf(head, "%u-CONFIG::%s::%s",
+ index,
+ tomoyo_category_keywords
+ [tomoyo_index2category[i]],
+ tomoyo_mac_keywords[i]);
+ else
+ tomoyo_io_printf(head, "%u-CONFIG::%s", index,
+ tomoyo_mac_keywords[i]);
+ tomoyo_print_config(head, config);
+ head->r.bit++;
+ break;
+ }
+ if (head->r.bit == TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX) {
+ head->r.index++;
+ head->r.step = 1;
+ }
+ break;
+ }
+ if (tomoyo_flush(head))
+ goto next;
+}
+
+/**
+ * tomoyo_same_manager - Check for duplicated "struct tomoyo_manager" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_manager(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ return container_of(a, struct tomoyo_manager, head)->manager ==
+ container_of(b, struct tomoyo_manager, head)->manager;
+}
+
+/**
+ * tomoyo_update_manager_entry - Add a manager entry.
+ *
+ * @manager: The path to manager or the domainnamme.
+ * @is_delete: True if it is a delete request.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_manager_entry(const char *manager,
+ const bool is_delete)
+{
+ struct tomoyo_manager e = { };
+ struct tomoyo_acl_param param = {
+ /* .ns = &tomoyo_kernel_namespace, */
+ .is_delete = is_delete,
+ .list = &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER],
+ };
+ int error = is_delete ? -ENOENT : -ENOMEM;
+
+ if (!tomoyo_correct_domain(manager) &&
+ !tomoyo_correct_word(manager))
+ return -EINVAL;
+ e.manager = tomoyo_get_name(manager);
+ if (e.manager) {
+ error = tomoyo_update_policy(&e.head, sizeof(e), &param,
+ tomoyo_same_manager);
+ tomoyo_put_name(e.manager);
+ }
+ return error;
+}
+
+/**
+ * tomoyo_write_manager - Write manager policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_manager(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+
+ if (!strcmp(data, "manage_by_non_root")) {
+ tomoyo_manage_by_non_root = !head->w.is_delete;
+ return 0;
+ }
+ return tomoyo_update_manager_entry(data, head->w.is_delete);
+}
+
+/**
+ * tomoyo_read_manager - Read manager policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_manager(struct tomoyo_io_buffer *head)
+{
+ if (head->r.eof)
+ return;
+ list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER]) {
+ struct tomoyo_manager *ptr =
+ list_entry(head->r.acl, typeof(*ptr), head.list);
+
+ if (ptr->head.is_deleted)
+ continue;
+ if (!tomoyo_flush(head))
+ return;
+ tomoyo_set_string(head, ptr->manager->name);
+ tomoyo_set_lf(head);
+ }
+ head->r.eof = true;
+}
+
+/**
+ * tomoyo_manager - Check whether the current process is a policy manager.
+ *
+ * Returns true if the current process is permitted to modify policy
+ * via /sys/kernel/security/tomoyo/ interface.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_manager(void)
+{
+ struct tomoyo_manager *ptr;
+ const char *exe;
+ const struct task_struct *task = current;
+ const struct tomoyo_path_info *domainname = tomoyo_domain()->domainname;
+ bool found = IS_ENABLED(CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING);
+
+ if (!tomoyo_policy_loaded)
+ return true;
+ if (!tomoyo_manage_by_non_root &&
+ (!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) ||
+ !uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
+ return false;
+ exe = tomoyo_get_exe();
+ if (!exe)
+ return false;
+ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (!ptr->head.is_deleted &&
+ (!tomoyo_pathcmp(domainname, ptr->manager) ||
+ !strcmp(exe, ptr->manager->name))) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) { /* Reduce error messages. */
+ static pid_t last_pid;
+ const pid_t pid = current->pid;
+
+ if (last_pid != pid) {
+ pr_warn("%s ( %s ) is not permitted to update policies.\n",
+ domainname->name, exe);
+ last_pid = pid;
+ }
+ }
+ kfree(exe);
+ return found;
+}
+
+static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
+(unsigned int serial);
+
+/**
+ * tomoyo_select_domain - Parse select command.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @data: String to parse.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
+ const char *data)
+{
+ unsigned int pid;
+ struct tomoyo_domain_info *domain = NULL;
+ bool global_pid = false;
+
+ if (strncmp(data, "select ", 7))
+ return false;
+ data += 7;
+ if (sscanf(data, "pid=%u", &pid) == 1 ||
+ (global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
+ struct task_struct *p;
+
+ rcu_read_lock();
+ if (global_pid)
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ else
+ p = find_task_by_vpid(pid);
+ if (p)
+ domain = tomoyo_task(p)->domain_info;
+ rcu_read_unlock();
+ } else if (!strncmp(data, "domain=", 7)) {
+ if (tomoyo_domain_def(data + 7))
+ domain = tomoyo_find_domain(data + 7);
+ } else if (sscanf(data, "Q=%u", &pid) == 1) {
+ domain = tomoyo_find_domain_by_qid(pid);
+ } else
+ return false;
+ head->w.domain = domain;
+ /* Accessing read_buf is safe because head->io_sem is held. */
+ if (!head->read_buf)
+ return true; /* Do nothing if open(O_WRONLY). */
+ memset(&head->r, 0, sizeof(head->r));
+ head->r.print_this_domain_only = true;
+ if (domain)
+ head->r.domain = &domain->list;
+ else
+ head->r.eof = true;
+ tomoyo_io_printf(head, "# select %s\n", data);
+ if (domain && domain->is_deleted)
+ tomoyo_io_printf(head, "# This is a deleted domain.\n");
+ return true;
+}
+
+/**
+ * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->domainname == p2->domainname;
+}
+
+/**
+ * tomoyo_write_task - Update task related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_task(struct tomoyo_acl_param *param)
+{
+ int error = -EINVAL;
+
+ if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
+ struct tomoyo_task_acl e = {
+ .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
+ .domainname = tomoyo_get_domainname(param),
+ };
+
+ if (e.domainname)
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_task_acl,
+ NULL);
+ tomoyo_put_name(e.domainname);
+ }
+ return error;
+}
+
+/**
+ * tomoyo_delete_domain - Delete a domain.
+ *
+ * @domainname: The name of domain.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_delete_domain(char *domainname)
+{
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_path_info name;
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -EINTR;
+ /* Is there an active domain? */
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ /* Never delete tomoyo_kernel_domain */
+ if (domain == &tomoyo_kernel_domain)
+ continue;
+ if (domain->is_deleted ||
+ tomoyo_pathcmp(domain->domainname, &name))
+ continue;
+ domain->is_deleted = true;
+ break;
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ return 0;
+}
+
+/**
+ * tomoyo_write_domain2 - Write domain policy.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @list: Pointer to "struct list_head".
+ * @data: Policy to be interpreted.
+ * @is_delete: True if it is a delete request.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
+ struct list_head *list, char *data,
+ const bool is_delete)
+{
+ struct tomoyo_acl_param param = {
+ .ns = ns,
+ .list = list,
+ .data = data,
+ .is_delete = is_delete,
+ };
+ static const struct {
+ const char *keyword;
+ int (*write)(struct tomoyo_acl_param *param);
+ } tomoyo_callback[5] = {
+ { "file ", tomoyo_write_file },
+ { "network inet ", tomoyo_write_inet_network },
+ { "network unix ", tomoyo_write_unix_network },
+ { "misc ", tomoyo_write_misc },
+ { "task ", tomoyo_write_task },
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
+ if (!tomoyo_str_starts(&param.data,
+ tomoyo_callback[i].keyword))
+ continue;
+ return tomoyo_callback[i].write(&param);
+ }
+ return -EINVAL;
+}
+
+/* String table for domain flags. */
+const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS] = {
+ [TOMOYO_DIF_QUOTA_WARNED] = "quota_exceeded\n",
+ [TOMOYO_DIF_TRANSITION_FAILED] = "transition_failed\n",
+};
+
+/**
+ * tomoyo_write_domain - Write domain policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+ struct tomoyo_policy_namespace *ns;
+ struct tomoyo_domain_info *domain = head->w.domain;
+ const bool is_delete = head->w.is_delete;
+ bool is_select = !is_delete && tomoyo_str_starts(&data, "select ");
+ unsigned int idx;
+
+ if (*data == '<') {
+ int ret = 0;
+
+ domain = NULL;
+ if (is_delete)
+ ret = tomoyo_delete_domain(data);
+ else if (is_select)
+ domain = tomoyo_find_domain(data);
+ else
+ domain = tomoyo_assign_domain(data, false);
+ head->w.domain = domain;
+ return ret;
+ }
+ if (!domain)
+ return -EINVAL;
+ ns = domain->ns;
+ if (sscanf(data, "use_profile %u", &idx) == 1
+ && idx < TOMOYO_MAX_PROFILES) {
+ if (!tomoyo_policy_loaded || ns->profile_ptr[idx])
+ if (!is_delete)
+ domain->profile = (u8) idx;
+ return 0;
+ }
+ if (sscanf(data, "use_group %u\n", &idx) == 1
+ && idx < TOMOYO_MAX_ACL_GROUPS) {
+ if (!is_delete)
+ set_bit(idx, domain->group);
+ else
+ clear_bit(idx, domain->group);
+ return 0;
+ }
+ for (idx = 0; idx < TOMOYO_MAX_DOMAIN_INFO_FLAGS; idx++) {
+ const char *cp = tomoyo_dif[idx];
+
+ if (strncmp(data, cp, strlen(cp) - 1))
+ continue;
+ domain->flags[idx] = !is_delete;
+ return 0;
+ }
+ return tomoyo_write_domain2(ns, &domain->acl_info_list, data,
+ is_delete);
+}
+
+/**
+ * tomoyo_print_condition - Print condition part.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @cond: Pointer to "struct tomoyo_condition".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
+ const struct tomoyo_condition *cond)
+{
+ switch (head->r.cond_step) {
+ case 0:
+ head->r.cond_index = 0;
+ head->r.cond_step++;
+ if (cond->transit) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, cond->transit->name);
+ }
+ fallthrough;
+ case 1:
+ {
+ const u16 condc = cond->condc;
+ const struct tomoyo_condition_element *condp =
+ (typeof(condp)) (cond + 1);
+ const struct tomoyo_number_union *numbers_p =
+ (typeof(numbers_p)) (condp + condc);
+ const struct tomoyo_name_union *names_p =
+ (typeof(names_p))
+ (numbers_p + cond->numbers_count);
+ const struct tomoyo_argv *argv =
+ (typeof(argv)) (names_p + cond->names_count);
+ const struct tomoyo_envp *envp =
+ (typeof(envp)) (argv + cond->argc);
+ u16 skip;
+
+ for (skip = 0; skip < head->r.cond_index; skip++) {
+ const u8 left = condp->left;
+ const u8 right = condp->right;
+
+ condp++;
+ switch (left) {
+ case TOMOYO_ARGV_ENTRY:
+ argv++;
+ continue;
+ case TOMOYO_ENVP_ENTRY:
+ envp++;
+ continue;
+ case TOMOYO_NUMBER_UNION:
+ numbers_p++;
+ break;
+ }
+ switch (right) {
+ case TOMOYO_NAME_UNION:
+ names_p++;
+ break;
+ case TOMOYO_NUMBER_UNION:
+ numbers_p++;
+ break;
+ }
+ }
+ while (head->r.cond_index < condc) {
+ const u8 match = condp->equals;
+ const u8 left = condp->left;
+ const u8 right = condp->right;
+
+ if (!tomoyo_flush(head))
+ return false;
+ condp++;
+ head->r.cond_index++;
+ tomoyo_set_space(head);
+ switch (left) {
+ case TOMOYO_ARGV_ENTRY:
+ tomoyo_io_printf(head,
+ "exec.argv[%lu]%s=\"",
+ argv->index, argv->is_not ? "!" : "");
+ tomoyo_set_string(head,
+ argv->value->name);
+ tomoyo_set_string(head, "\"");
+ argv++;
+ continue;
+ case TOMOYO_ENVP_ENTRY:
+ tomoyo_set_string(head,
+ "exec.envp[\"");
+ tomoyo_set_string(head,
+ envp->name->name);
+ tomoyo_io_printf(head, "\"]%s=", envp->is_not ? "!" : "");
+ if (envp->value) {
+ tomoyo_set_string(head, "\"");
+ tomoyo_set_string(head, envp->value->name);
+ tomoyo_set_string(head, "\"");
+ } else {
+ tomoyo_set_string(head,
+ "NULL");
+ }
+ envp++;
+ continue;
+ case TOMOYO_NUMBER_UNION:
+ tomoyo_print_number_union_nospace
+ (head, numbers_p++);
+ break;
+ default:
+ tomoyo_set_string(head,
+ tomoyo_condition_keyword[left]);
+ break;
+ }
+ tomoyo_set_string(head, match ? "=" : "!=");
+ switch (right) {
+ case TOMOYO_NAME_UNION:
+ tomoyo_print_name_union_quoted
+ (head, names_p++);
+ break;
+ case TOMOYO_NUMBER_UNION:
+ tomoyo_print_number_union_nospace
+ (head, numbers_p++);
+ break;
+ default:
+ tomoyo_set_string(head,
+ tomoyo_condition_keyword[right]);
+ break;
+ }
+ }
+ }
+ head->r.cond_step++;
+ fallthrough;
+ case 2:
+ if (!tomoyo_flush(head))
+ break;
+ head->r.cond_step++;
+ fallthrough;
+ case 3:
+ if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ tomoyo_io_printf(head, " grant_log=%s",
+ str_yes_no(cond->grant_log ==
+ TOMOYO_GRANTLOG_YES));
+ tomoyo_set_lf(head);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_set_group - Print "acl_group " header keyword and category name.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @category: Category name.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_set_group(struct tomoyo_io_buffer *head,
+ const char *category)
+{
+ if (head->type == TOMOYO_EXCEPTIONPOLICY) {
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "acl_group %u ",
+ head->r.acl_group_index);
+ }
+ tomoyo_set_string(head, category);
+}
+
+/**
+ * tomoyo_print_entry - Print an ACL entry.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @acl: Pointer to an ACL entry.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
+ struct tomoyo_acl_info *acl)
+{
+ const u8 acl_type = acl->type;
+ bool first = true;
+ u8 bit;
+
+ if (head->r.print_cond_part)
+ goto print_cond_part;
+ if (acl->is_deleted)
+ return true;
+ if (!tomoyo_flush(head))
+ return false;
+ else if (acl_type == TOMOYO_TYPE_PATH_ACL) {
+ struct tomoyo_path_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u16 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (head->r.print_transition_related_only &&
+ bit != TOMOYO_TYPE_EXECUTE)
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_path_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
+ struct tomoyo_task_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "task ");
+ tomoyo_set_string(head, "manual_domain_transition ");
+ tomoyo_set_string(head, ptr->domainname->name);
+ } else if (head->r.print_transition_related_only) {
+ return true;
+ } else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
+ struct tomoyo_path2_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_mac_keywords
+ [tomoyo_pp2mac[bit]]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name1);
+ tomoyo_print_name_union(head, &ptr->name2);
+ } else if (acl_type == TOMOYO_TYPE_PATH_NUMBER_ACL) {
+ struct tomoyo_path_number_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_mac_keywords
+ [tomoyo_pn2mac[bit]]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ tomoyo_print_number_union(head, &ptr->number);
+ } else if (acl_type == TOMOYO_TYPE_MKDEV_ACL) {
+ struct tomoyo_mkdev_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_mac_keywords
+ [tomoyo_pnnn2mac[bit]]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ tomoyo_print_number_union(head, &ptr->mode);
+ tomoyo_print_number_union(head, &ptr->major);
+ tomoyo_print_number_union(head, &ptr->minor);
+ } else if (acl_type == TOMOYO_TYPE_INET_ACL) {
+ struct tomoyo_inet_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network inet ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_set_space(head);
+ if (ptr->address.group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->address.group->group_name
+ ->name);
+ } else {
+ char buf[128];
+
+ tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
+ tomoyo_io_printf(head, "%s", buf);
+ }
+ tomoyo_print_number_union(head, &ptr->port);
+ } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
+ struct tomoyo_unix_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network unix ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
+ struct tomoyo_mount_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "file mount");
+ tomoyo_print_name_union(head, &ptr->dev_name);
+ tomoyo_print_name_union(head, &ptr->dir_name);
+ tomoyo_print_name_union(head, &ptr->fs_type);
+ tomoyo_print_number_union(head, &ptr->flags);
+ } else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
+ struct tomoyo_env_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "misc env ");
+ tomoyo_set_string(head, ptr->env->name);
+ }
+ if (acl->cond) {
+ head->r.print_cond_part = true;
+ head->r.cond_step = 0;
+ if (!tomoyo_flush(head))
+ return false;
+print_cond_part:
+ if (!tomoyo_print_condition(head, acl->cond))
+ return false;
+ head->r.print_cond_part = false;
+ } else {
+ tomoyo_set_lf(head);
+ }
+ return true;
+}
+
+/**
+ * tomoyo_read_domain2 - Read domain policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @list: Pointer to "struct list_head".
+ *
+ * Caller holds tomoyo_read_lock().
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head,
+ struct list_head *list)
+{
+ list_for_each_cookie(head->r.acl, list) {
+ struct tomoyo_acl_info *ptr =
+ list_entry(head->r.acl, typeof(*ptr), list);
+
+ if (!tomoyo_print_entry(head, ptr))
+ return false;
+ }
+ head->r.acl = NULL;
+ return true;
+}
+
+/**
+ * tomoyo_read_domain - Read domain policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_domain(struct tomoyo_io_buffer *head)
+{
+ if (head->r.eof)
+ return;
+ list_for_each_cookie(head->r.domain, &tomoyo_domain_list) {
+ struct tomoyo_domain_info *domain =
+ list_entry(head->r.domain, typeof(*domain), list);
+ u8 i;
+
+ switch (head->r.step) {
+ case 0:
+ if (domain->is_deleted &&
+ !head->r.print_this_domain_only)
+ continue;
+ /* Print domainname and flags. */
+ tomoyo_set_string(head, domain->domainname->name);
+ tomoyo_set_lf(head);
+ tomoyo_io_printf(head, "use_profile %u\n",
+ domain->profile);
+ for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++)
+ if (domain->flags[i])
+ tomoyo_set_string(head, tomoyo_dif[i]);
+ head->r.index = 0;
+ head->r.step++;
+ fallthrough;
+ case 1:
+ while (head->r.index < TOMOYO_MAX_ACL_GROUPS) {
+ i = head->r.index++;
+ if (!test_bit(i, domain->group))
+ continue;
+ tomoyo_io_printf(head, "use_group %u\n", i);
+ if (!tomoyo_flush(head))
+ return;
+ }
+ head->r.index = 0;
+ head->r.step++;
+ tomoyo_set_lf(head);
+ fallthrough;
+ case 2:
+ if (!tomoyo_read_domain2(head, &domain->acl_info_list))
+ return;
+ head->r.step++;
+ if (!tomoyo_set_lf(head))
+ return;
+ fallthrough;
+ case 3:
+ head->r.step = 0;
+ if (head->r.print_this_domain_only)
+ goto done;
+ }
+ }
+ done:
+ head->r.eof = true;
+}
+
+/**
+ * tomoyo_write_pid: Specify PID to obtain domainname.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0.
+ */
+static int tomoyo_write_pid(struct tomoyo_io_buffer *head)
+{
+ head->r.eof = false;
+ return 0;
+}
+
+/**
+ * tomoyo_read_pid - Get domainname of the specified PID.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns the domainname which the specified PID is in on success,
+ * empty string otherwise.
+ * The PID is specified by tomoyo_write_pid() so that the user can obtain
+ * using read()/write() interface rather than sysctl() interface.
+ */
+static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
+{
+ char *buf = head->write_buf;
+ bool global_pid = false;
+ unsigned int pid;
+ struct task_struct *p;
+ struct tomoyo_domain_info *domain = NULL;
+
+ /* Accessing write_buf is safe because head->io_sem is held. */
+ if (!buf) {
+ head->r.eof = true;
+ return; /* Do nothing if open(O_RDONLY). */
+ }
+ if (head->r.w_pos || head->r.eof)
+ return;
+ head->r.eof = true;
+ if (tomoyo_str_starts(&buf, "global-pid "))
+ global_pid = true;
+ if (kstrtouint(buf, 10, &pid))
+ return;
+ rcu_read_lock();
+ if (global_pid)
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ else
+ p = find_task_by_vpid(pid);
+ if (p)
+ domain = tomoyo_task(p)->domain_info;
+ rcu_read_unlock();
+ if (!domain)
+ return;
+ tomoyo_io_printf(head, "%u %u ", pid, domain->profile);
+ tomoyo_set_string(head, domain->domainname->name);
+}
+
+/* String table for domain transition control keywords. */
+static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
+ [TOMOYO_TRANSITION_CONTROL_NO_RESET] = "no_reset_domain ",
+ [TOMOYO_TRANSITION_CONTROL_RESET] = "reset_domain ",
+ [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain ",
+ [TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain ",
+ [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain ",
+ [TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain ",
+};
+
+/* String table for grouping keywords. */
+static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
+ [TOMOYO_PATH_GROUP] = "path_group ",
+ [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_ADDRESS_GROUP] = "address_group ",
+};
+
+/**
+ * tomoyo_write_exception - Write exception policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
+{
+ const bool is_delete = head->w.is_delete;
+ struct tomoyo_acl_param param = {
+ .ns = head->w.ns,
+ .is_delete = is_delete,
+ .data = head->write_buf,
+ };
+ u8 i;
+
+ if (tomoyo_str_starts(&param.data, "aggregator "))
+ return tomoyo_write_aggregator(&param);
+ for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++)
+ if (tomoyo_str_starts(&param.data, tomoyo_transition_type[i]))
+ return tomoyo_write_transition_control(&param, i);
+ for (i = 0; i < TOMOYO_MAX_GROUP; i++)
+ if (tomoyo_str_starts(&param.data, tomoyo_group_name[i]))
+ return tomoyo_write_group(&param, i);
+ if (tomoyo_str_starts(&param.data, "acl_group ")) {
+ unsigned int group;
+ char *data;
+
+ group = simple_strtoul(param.data, &data, 10);
+ if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ')
+ return tomoyo_write_domain2
+ (head->w.ns, &head->w.ns->acl_group[group],
+ data, is_delete);
+ }
+ return -EINVAL;
+}
+
+/**
+ * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @idx: Index number.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
+{
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ struct list_head *list = &ns->group_list[idx];
+
+ list_for_each_cookie(head->r.group, list) {
+ struct tomoyo_group *group =
+ list_entry(head->r.group, typeof(*group), head.list);
+
+ list_for_each_cookie(head->r.acl, &group->member_list) {
+ struct tomoyo_acl_head *ptr =
+ list_entry(head->r.acl, typeof(*ptr), list);
+
+ if (ptr->is_deleted)
+ continue;
+ if (!tomoyo_flush(head))
+ return false;
+ tomoyo_print_namespace(head);
+ tomoyo_set_string(head, tomoyo_group_name[idx]);
+ tomoyo_set_string(head, group->group_name->name);
+ if (idx == TOMOYO_PATH_GROUP) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, container_of
+ (ptr, struct tomoyo_path_group,
+ head)->member_name->name);
+ } else if (idx == TOMOYO_NUMBER_GROUP) {
+ tomoyo_print_number_union(head, &container_of
+ (ptr,
+ struct tomoyo_number_group,
+ head)->number);
+ } else if (idx == TOMOYO_ADDRESS_GROUP) {
+ char buffer[128];
+ struct tomoyo_address_group *member =
+ container_of(ptr, typeof(*member),
+ head);
+
+ tomoyo_print_ip(buffer, sizeof(buffer),
+ &member->address);
+ tomoyo_io_printf(head, " %s", buffer);
+ }
+ tomoyo_set_lf(head);
+ }
+ head->r.acl = NULL;
+ }
+ head->r.group = NULL;
+ return true;
+}
+
+/**
+ * tomoyo_read_policy - Read "struct tomoyo_..._entry" list.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @idx: Index number.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
+{
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ struct list_head *list = &ns->policy_list[idx];
+
+ list_for_each_cookie(head->r.acl, list) {
+ struct tomoyo_acl_head *acl =
+ container_of(head->r.acl, typeof(*acl), list);
+ if (acl->is_deleted)
+ continue;
+ if (!tomoyo_flush(head))
+ return false;
+ switch (idx) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ {
+ struct tomoyo_transition_control *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_print_namespace(head);
+ tomoyo_set_string(head, tomoyo_transition_type
+ [ptr->type]);
+ tomoyo_set_string(head, ptr->program ?
+ ptr->program->name : "any");
+ tomoyo_set_string(head, " from ");
+ tomoyo_set_string(head, ptr->domainname ?
+ ptr->domainname->name :
+ "any");
+ }
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ {
+ struct tomoyo_aggregator *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_print_namespace(head);
+ tomoyo_set_string(head, "aggregator ");
+ tomoyo_set_string(head,
+ ptr->original_name->name);
+ tomoyo_set_space(head);
+ tomoyo_set_string(head,
+ ptr->aggregated_name->name);
+ }
+ break;
+ default:
+ continue;
+ }
+ tomoyo_set_lf(head);
+ }
+ head->r.acl = NULL;
+ return true;
+}
+
+/**
+ * tomoyo_read_exception - Read exception policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_exception(struct tomoyo_io_buffer *head)
+{
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+
+ if (head->r.eof)
+ return;
+ while (head->r.step < TOMOYO_MAX_POLICY &&
+ tomoyo_read_policy(head, head->r.step))
+ head->r.step++;
+ if (head->r.step < TOMOYO_MAX_POLICY)
+ return;
+ while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP &&
+ tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY))
+ head->r.step++;
+ if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP)
+ return;
+ while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP
+ + TOMOYO_MAX_ACL_GROUPS) {
+ head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY
+ - TOMOYO_MAX_GROUP;
+ if (!tomoyo_read_domain2(head, &ns->acl_group
+ [head->r.acl_group_index]))
+ return;
+ head->r.step++;
+ }
+ head->r.eof = true;
+}
+
+/* Wait queue for kernel -> userspace notification. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_query_wait);
+/* Wait queue for userspace -> kernel notification. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_answer_wait);
+
+/* Structure for query. */
+struct tomoyo_query {
+ struct list_head list;
+ struct tomoyo_domain_info *domain;
+ char *query;
+ size_t query_len;
+ unsigned int serial;
+ u8 timer;
+ u8 answer;
+ u8 retry;
+};
+
+/* The list for "struct tomoyo_query". */
+static LIST_HEAD(tomoyo_query_list);
+
+/* Lock for manipulating tomoyo_query_list. */
+static DEFINE_SPINLOCK(tomoyo_query_list_lock);
+
+/*
+ * Number of "struct file" referring /sys/kernel/security/tomoyo/query
+ * interface.
+ */
+static atomic_t tomoyo_query_observers = ATOMIC_INIT(0);
+
+/**
+ * tomoyo_truncate - Truncate a line.
+ *
+ * @str: String to truncate.
+ *
+ * Returns length of truncated @str.
+ */
+static int tomoyo_truncate(char *str)
+{
+ char *start = str;
+
+ while (*(unsigned char *) str > (unsigned char) ' ')
+ str++;
+ *str = '\0';
+ return strlen(start) + 1;
+}
+
+/**
+ * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode.
+ *
+ * @domain: Pointer to "struct tomoyo_domain_info".
+ * @header: Lines containing ACL.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
+{
+ char *buffer;
+ char *realpath = NULL;
+ char *argv0 = NULL;
+ char *symlink = NULL;
+ char *cp = strchr(header, '\n');
+ int len;
+
+ if (!cp)
+ return;
+ cp = strchr(cp + 1, '\n');
+ if (!cp)
+ return;
+ *cp++ = '\0';
+ len = strlen(cp) + 1;
+ /* strstr() will return NULL if ordering is wrong. */
+ if (*cp == 'f') {
+ argv0 = strstr(header, " argv[]={ \"");
+ if (argv0) {
+ argv0 += 10;
+ len += tomoyo_truncate(argv0) + 14;
+ }
+ realpath = strstr(header, " exec={ realpath=\"");
+ if (realpath) {
+ realpath += 8;
+ len += tomoyo_truncate(realpath) + 6;
+ }
+ symlink = strstr(header, " symlink.target=\"");
+ if (symlink)
+ len += tomoyo_truncate(symlink + 1) + 1;
+ }
+ buffer = kmalloc(len, GFP_NOFS);
+ if (!buffer)
+ return;
+ snprintf(buffer, len - 1, "%s", cp);
+ if (realpath)
+ tomoyo_addprintf(buffer, len, " exec.%s", realpath);
+ if (argv0)
+ tomoyo_addprintf(buffer, len, " exec.argv[0]=%s", argv0);
+ if (symlink)
+ tomoyo_addprintf(buffer, len, "%s", symlink);
+ tomoyo_normalize_line(buffer);
+ if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer,
+ false))
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+ kfree(buffer);
+}
+
+/**
+ * tomoyo_supervisor - Ask for the supervisor's decision.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @fmt: The printf()'s format string, followed by parameters.
+ *
+ * Returns 0 if the supervisor decided to permit the access request which
+ * violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the
+ * supervisor decided to retry the access request which violated the policy in
+ * enforcing mode, 0 if it is not in enforcing mode, -EPERM otherwise.
+ */
+int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+ int len;
+ static unsigned int tomoyo_serial;
+ struct tomoyo_query entry = { };
+ bool quota_exceeded = false;
+
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+ /* Write /sys/kernel/security/tomoyo/audit. */
+ va_start(args, fmt);
+ tomoyo_write_log2(r, len, fmt, args);
+ va_end(args);
+ /* Nothing more to do if granted. */
+ if (r->granted)
+ return 0;
+ if (r->mode)
+ tomoyo_update_stat(r->mode);
+ switch (r->mode) {
+ case TOMOYO_CONFIG_ENFORCING:
+ error = -EPERM;
+ if (atomic_read(&tomoyo_query_observers))
+ break;
+ goto out;
+ case TOMOYO_CONFIG_LEARNING:
+ error = 0;
+ /* Check max_learning_entry parameter. */
+ if (tomoyo_domain_quota_is_ok(r))
+ break;
+ fallthrough;
+ default:
+ return 0;
+ }
+ /* Get message. */
+ va_start(args, fmt);
+ entry.query = tomoyo_init_log(r, len, fmt, args);
+ va_end(args);
+ if (!entry.query)
+ goto out;
+ entry.query_len = strlen(entry.query) + 1;
+ if (!error) {
+ tomoyo_add_entry(r->domain, entry.query);
+ goto out;
+ }
+ len = tomoyo_round2(entry.query_len);
+ entry.domain = r->domain;
+ spin_lock(&tomoyo_query_list_lock);
+ if (tomoyo_memory_quota[TOMOYO_MEMORY_QUERY] &&
+ tomoyo_memory_used[TOMOYO_MEMORY_QUERY] + len
+ >= tomoyo_memory_quota[TOMOYO_MEMORY_QUERY]) {
+ quota_exceeded = true;
+ } else {
+ entry.serial = tomoyo_serial++;
+ entry.retry = r->retry;
+ tomoyo_memory_used[TOMOYO_MEMORY_QUERY] += len;
+ list_add_tail(&entry.list, &tomoyo_query_list);
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (quota_exceeded)
+ goto out;
+ /* Give 10 seconds for supervisor's opinion. */
+ while (entry.timer < 10) {
+ wake_up_all(&tomoyo_query_wait);
+ if (wait_event_interruptible_timeout
+ (tomoyo_answer_wait, entry.answer ||
+ !atomic_read(&tomoyo_query_observers), HZ))
+ break;
+ entry.timer++;
+ }
+ spin_lock(&tomoyo_query_list_lock);
+ list_del(&entry.list);
+ tomoyo_memory_used[TOMOYO_MEMORY_QUERY] -= len;
+ spin_unlock(&tomoyo_query_list_lock);
+ switch (entry.answer) {
+ case 3: /* Asked to retry by administrator. */
+ error = TOMOYO_RETRY_REQUEST;
+ r->retry++;
+ break;
+ case 1:
+ /* Granted by administrator. */
+ error = 0;
+ break;
+ default:
+ /* Timed out or rejected by administrator. */
+ break;
+ }
+out:
+ kfree(entry.query);
+ return error;
+}
+
+/**
+ * tomoyo_find_domain_by_qid - Get domain by query id.
+ *
+ * @serial: Query ID assigned by tomoyo_supervisor().
+ *
+ * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ */
+static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
+(unsigned int serial)
+{
+ struct tomoyo_query *ptr;
+ struct tomoyo_domain_info *domain = NULL;
+
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each_entry(ptr, &tomoyo_query_list, list) {
+ if (ptr->serial != serial)
+ continue;
+ domain = ptr->domain;
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ return domain;
+}
+
+/**
+ * tomoyo_poll_query - poll() for /sys/kernel/security/tomoyo/query.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table".
+ *
+ * Returns EPOLLIN | EPOLLRDNORM when ready to read, 0 otherwise.
+ *
+ * Waits for access requests which violated policy in enforcing mode.
+ */
+static __poll_t tomoyo_poll_query(struct file *file, poll_table *wait)
+{
+ if (!list_empty(&tomoyo_query_list))
+ return EPOLLIN | EPOLLRDNORM;
+ poll_wait(file, &tomoyo_query_wait, wait);
+ if (!list_empty(&tomoyo_query_list))
+ return EPOLLIN | EPOLLRDNORM;
+ return 0;
+}
+
+/**
+ * tomoyo_read_query - Read access requests which violated policy in enforcing mode.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ */
+static void tomoyo_read_query(struct tomoyo_io_buffer *head)
+{
+ struct list_head *tmp;
+ unsigned int pos = 0;
+ size_t len = 0;
+ char *buf;
+
+ if (head->r.w_pos)
+ return;
+ kfree(head->read_buf);
+ head->read_buf = NULL;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
+ if (pos++ != head->r.query_index)
+ continue;
+ len = ptr->query_len;
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (!len) {
+ head->r.query_index = 0;
+ return;
+ }
+ buf = kzalloc(len + 32, GFP_NOFS);
+ if (!buf)
+ return;
+ pos = 0;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
+ if (pos++ != head->r.query_index)
+ continue;
+ /*
+ * Some query can be skipped because tomoyo_query_list
+ * can change, but I don't care.
+ */
+ if (len == ptr->query_len)
+ snprintf(buf, len + 31, "Q%u-%hu\n%s", ptr->serial,
+ ptr->retry, ptr->query);
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (buf[0]) {
+ head->read_buf = buf;
+ head->r.w[head->r.w_pos++] = buf;
+ head->r.query_index++;
+ } else {
+ kfree(buf);
+ }
+}
+
+/**
+ * tomoyo_write_answer - Write the supervisor's decision.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, -EINVAL otherwise.
+ */
+static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+ struct list_head *tmp;
+ unsigned int serial;
+ unsigned int answer;
+
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
+ ptr->timer = 0;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (sscanf(data, "A%u=%u", &serial, &answer) != 2)
+ return -EINVAL;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
+ if (ptr->serial != serial)
+ continue;
+ ptr->answer = answer;
+ /* Remove from tomoyo_query_list. */
+ if (ptr->answer)
+ list_del_init(&ptr->list);
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ return 0;
+}
+
+/**
+ * tomoyo_read_version: Get version.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns version information.
+ */
+static void tomoyo_read_version(struct tomoyo_io_buffer *head)
+{
+ if (!head->r.eof) {
+ tomoyo_io_printf(head, "2.6.0");
+ head->r.eof = true;
+ }
+}
+
+/* String table for /sys/kernel/security/tomoyo/stat interface. */
+static const char * const tomoyo_policy_headers[TOMOYO_MAX_POLICY_STAT] = {
+ [TOMOYO_STAT_POLICY_UPDATES] = "update:",
+ [TOMOYO_STAT_POLICY_LEARNING] = "violation in learning mode:",
+ [TOMOYO_STAT_POLICY_PERMISSIVE] = "violation in permissive mode:",
+ [TOMOYO_STAT_POLICY_ENFORCING] = "violation in enforcing mode:",
+};
+
+/* String table for /sys/kernel/security/tomoyo/stat interface. */
+static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
+ [TOMOYO_MEMORY_POLICY] = "policy:",
+ [TOMOYO_MEMORY_AUDIT] = "audit log:",
+ [TOMOYO_MEMORY_QUERY] = "query message:",
+};
+
+/* Counter for number of updates. */
+static atomic_t tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
+/* Timestamp counter for last updated. */
+static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
+
+/**
+ * tomoyo_update_stat - Update statistic counters.
+ *
+ * @index: Index for policy type.
+ *
+ * Returns nothing.
+ */
+void tomoyo_update_stat(const u8 index)
+{
+ atomic_inc(&tomoyo_stat_updated[index]);
+ tomoyo_stat_modified[index] = ktime_get_real_seconds();
+}
+
+/**
+ * tomoyo_read_stat - Read statistic data.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
+{
+ u8 i;
+ unsigned int total = 0;
+
+ if (head->r.eof)
+ return;
+ for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) {
+ tomoyo_io_printf(head, "Policy %-30s %10u",
+ tomoyo_policy_headers[i],
+ atomic_read(&tomoyo_stat_updated[i]));
+ if (tomoyo_stat_modified[i]) {
+ struct tomoyo_time stamp;
+
+ tomoyo_convert_time(tomoyo_stat_modified[i], &stamp);
+ tomoyo_io_printf(head, " (Last: %04u/%02u/%02u %02u:%02u:%02u)",
+ stamp.year, stamp.month, stamp.day,
+ stamp.hour, stamp.min, stamp.sec);
+ }
+ tomoyo_set_lf(head);
+ }
+ for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) {
+ unsigned int used = tomoyo_memory_used[i];
+
+ total += used;
+ tomoyo_io_printf(head, "Memory used by %-22s %10u",
+ tomoyo_memory_headers[i], used);
+ used = tomoyo_memory_quota[i];
+ if (used)
+ tomoyo_io_printf(head, " (Quota: %10u)", used);
+ tomoyo_set_lf(head);
+ }
+ tomoyo_io_printf(head, "Total memory used: %10u\n",
+ total);
+ head->r.eof = true;
+}
+
+/**
+ * tomoyo_write_stat - Set memory quota.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0.
+ */
+static int tomoyo_write_stat(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+ u8 i;
+
+ if (tomoyo_str_starts(&data, "Memory used by "))
+ for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++)
+ if (tomoyo_str_starts(&data, tomoyo_memory_headers[i]))
+ sscanf(data, "%u", &tomoyo_memory_quota[i]);
+ return 0;
+}
+
+/**
+ * tomoyo_open_control - open() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @type: Type of interface.
+ * @file: Pointer to "struct file".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_open_control(const u8 type, struct file *file)
+{
+ struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS);
+
+ if (!head)
+ return -ENOMEM;
+ mutex_init(&head->io_sem);
+ head->type = type;
+ switch (type) {
+ case TOMOYO_DOMAINPOLICY:
+ /* /sys/kernel/security/tomoyo/domain_policy */
+ head->write = tomoyo_write_domain;
+ head->read = tomoyo_read_domain;
+ break;
+ case TOMOYO_EXCEPTIONPOLICY:
+ /* /sys/kernel/security/tomoyo/exception_policy */
+ head->write = tomoyo_write_exception;
+ head->read = tomoyo_read_exception;
+ break;
+ case TOMOYO_AUDIT:
+ /* /sys/kernel/security/tomoyo/audit */
+ head->poll = tomoyo_poll_log;
+ head->read = tomoyo_read_log;
+ break;
+ case TOMOYO_PROCESS_STATUS:
+ /* /sys/kernel/security/tomoyo/.process_status */
+ head->write = tomoyo_write_pid;
+ head->read = tomoyo_read_pid;
+ break;
+ case TOMOYO_VERSION:
+ /* /sys/kernel/security/tomoyo/version */
+ head->read = tomoyo_read_version;
+ head->readbuf_size = 128;
+ break;
+ case TOMOYO_STAT:
+ /* /sys/kernel/security/tomoyo/stat */
+ head->write = tomoyo_write_stat;
+ head->read = tomoyo_read_stat;
+ head->readbuf_size = 1024;
+ break;
+ case TOMOYO_PROFILE:
+ /* /sys/kernel/security/tomoyo/profile */
+ head->write = tomoyo_write_profile;
+ head->read = tomoyo_read_profile;
+ break;
+ case TOMOYO_QUERY: /* /sys/kernel/security/tomoyo/query */
+ head->poll = tomoyo_poll_query;
+ head->write = tomoyo_write_answer;
+ head->read = tomoyo_read_query;
+ break;
+ case TOMOYO_MANAGER:
+ /* /sys/kernel/security/tomoyo/manager */
+ head->write = tomoyo_write_manager;
+ head->read = tomoyo_read_manager;
+ break;
+ }
+ if (!(file->f_mode & FMODE_READ)) {
+ /*
+ * No need to allocate read_buf since it is not opened
+ * for reading.
+ */
+ head->read = NULL;
+ head->poll = NULL;
+ } else if (!head->poll) {
+ /* Don't allocate read_buf for poll() access. */
+ if (!head->readbuf_size)
+ head->readbuf_size = 4096 * 2;
+ head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS);
+ if (!head->read_buf) {
+ kfree(head);
+ return -ENOMEM;
+ }
+ }
+ if (!(file->f_mode & FMODE_WRITE)) {
+ /*
+ * No need to allocate write_buf since it is not opened
+ * for writing.
+ */
+ head->write = NULL;
+ } else if (head->write) {
+ head->writebuf_size = 4096 * 2;
+ head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS);
+ if (!head->write_buf) {
+ kfree(head->read_buf);
+ kfree(head);
+ return -ENOMEM;
+ }
+ }
+ /*
+ * If the file is /sys/kernel/security/tomoyo/query , increment the
+ * observer counter.
+ * The obserber counter is used by tomoyo_supervisor() to see if
+ * there is some process monitoring /sys/kernel/security/tomoyo/query.
+ */
+ if (type == TOMOYO_QUERY)
+ atomic_inc(&tomoyo_query_observers);
+ file->private_data = head;
+ tomoyo_notify_gc(head, true);
+ return 0;
+}
+
+/**
+ * tomoyo_poll_control - poll() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write,
+ * EPOLLOUT | EPOLLWRNORM otherwise.
+ */
+__poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
+{
+ struct tomoyo_io_buffer *head = file->private_data;
+
+ if (head->poll)
+ return head->poll(file, wait) | EPOLLOUT | EPOLLWRNORM;
+ return EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM;
+}
+
+/**
+ * tomoyo_set_namespace_cursor - Set namespace to read.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head)
+{
+ struct list_head *ns;
+
+ if (head->type != TOMOYO_EXCEPTIONPOLICY &&
+ head->type != TOMOYO_PROFILE)
+ return;
+ /*
+ * If this is the first read, or reading previous namespace finished
+ * and has more namespaces to read, update the namespace cursor.
+ */
+ ns = head->r.ns;
+ if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) {
+ /* Clearing is OK because tomoyo_flush() returned true. */
+ memset(&head->r, 0, sizeof(head->r));
+ head->r.ns = ns ? ns->next : tomoyo_namespace_list.next;
+ }
+}
+
+/**
+ * tomoyo_has_more_namespace - Check for unread namespaces.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns true if we have more entries to print, false otherwise.
+ */
+static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head)
+{
+ return (head->type == TOMOYO_EXCEPTIONPOLICY ||
+ head->type == TOMOYO_PROFILE) && head->r.eof &&
+ head->r.ns->next != &tomoyo_namespace_list;
+}
+
+/**
+ * tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @buffer: Pointer to buffer to write to.
+ * @buffer_len: Size of @buffer.
+ *
+ * Returns bytes read on success, negative value otherwise.
+ */
+ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
+ const int buffer_len)
+{
+ int len;
+ int idx;
+
+ if (!head->read)
+ return -EINVAL;
+ if (mutex_lock_interruptible(&head->io_sem))
+ return -EINTR;
+ head->read_user_buf = buffer;
+ head->read_user_buf_avail = buffer_len;
+ idx = tomoyo_read_lock();
+ if (tomoyo_flush(head))
+ /* Call the policy handler. */
+ do {
+ tomoyo_set_namespace_cursor(head);
+ head->read(head);
+ } while (tomoyo_flush(head) &&
+ tomoyo_has_more_namespace(head));
+ tomoyo_read_unlock(idx);
+ len = head->read_user_buf - buffer;
+ mutex_unlock(&head->io_sem);
+ return len;
+}
+
+/**
+ * tomoyo_parse_policy - Parse a policy line.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @line: Line to parse.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line)
+{
+ /* Delete request? */
+ head->w.is_delete = !strncmp(line, "delete ", 7);
+ if (head->w.is_delete)
+ memmove(line, line + 7, strlen(line + 7) + 1);
+ /* Selecting namespace to update. */
+ if (head->type == TOMOYO_EXCEPTIONPOLICY ||
+ head->type == TOMOYO_PROFILE) {
+ if (*line == '<') {
+ char *cp = strchr(line, ' ');
+
+ if (cp) {
+ *cp++ = '\0';
+ head->w.ns = tomoyo_assign_namespace(line);
+ memmove(line, cp, strlen(cp) + 1);
+ } else
+ head->w.ns = NULL;
+ } else
+ head->w.ns = &tomoyo_kernel_namespace;
+ /* Don't allow updating if namespace is invalid. */
+ if (!head->w.ns)
+ return -ENOENT;
+ }
+ /* Do the update. */
+ return head->write(head);
+}
+
+/**
+ * tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @buffer: Pointer to buffer to read from.
+ * @buffer_len: Size of @buffer.
+ *
+ * Returns @buffer_len on success, negative value otherwise.
+ */
+ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ const char __user *buffer, const int buffer_len)
+{
+ int error = buffer_len;
+ size_t avail_len = buffer_len;
+ char *cp0 = head->write_buf;
+ int idx;
+
+ if (!head->write)
+ return -EINVAL;
+ if (mutex_lock_interruptible(&head->io_sem))
+ return -EINTR;
+ head->read_user_buf_avail = 0;
+ idx = tomoyo_read_lock();
+ /* Read a line and dispatch it to the policy handler. */
+ while (avail_len > 0) {
+ char c;
+
+ if (head->w.avail >= head->writebuf_size - 1) {
+ const int len = head->writebuf_size * 2;
+ char *cp = kzalloc(len, GFP_NOFS);
+
+ if (!cp) {
+ error = -ENOMEM;
+ break;
+ }
+ memmove(cp, cp0, head->w.avail);
+ kfree(cp0);
+ head->write_buf = cp;
+ cp0 = cp;
+ head->writebuf_size = len;
+ }
+ if (get_user(c, buffer)) {
+ error = -EFAULT;
+ break;
+ }
+ buffer++;
+ avail_len--;
+ cp0[head->w.avail++] = c;
+ if (c != '\n')
+ continue;
+ cp0[head->w.avail - 1] = '\0';
+ head->w.avail = 0;
+ tomoyo_normalize_line(cp0);
+ if (!strcmp(cp0, "reset")) {
+ head->w.ns = &tomoyo_kernel_namespace;
+ head->w.domain = NULL;
+ memset(&head->r, 0, sizeof(head->r));
+ continue;
+ }
+ /* Don't allow updating policies by non manager programs. */
+ switch (head->type) {
+ case TOMOYO_PROCESS_STATUS:
+ /* This does not write anything. */
+ break;
+ case TOMOYO_DOMAINPOLICY:
+ if (tomoyo_select_domain(head, cp0))
+ continue;
+ fallthrough;
+ case TOMOYO_EXCEPTIONPOLICY:
+ if (!strcmp(cp0, "select transition_only")) {
+ head->r.print_transition_related_only = true;
+ continue;
+ }
+ fallthrough;
+ default:
+ if (!tomoyo_manager()) {
+ error = -EPERM;
+ goto out;
+ }
+ }
+ switch (tomoyo_parse_policy(head, cp0)) {
+ case -EPERM:
+ error = -EPERM;
+ goto out;
+ case 0:
+ switch (head->type) {
+ case TOMOYO_DOMAINPOLICY:
+ case TOMOYO_EXCEPTIONPOLICY:
+ case TOMOYO_STAT:
+ case TOMOYO_PROFILE:
+ case TOMOYO_MANAGER:
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+out:
+ tomoyo_read_unlock(idx);
+ mutex_unlock(&head->io_sem);
+ return error;
+}
+
+/**
+ * tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ */
+void tomoyo_close_control(struct tomoyo_io_buffer *head)
+{
+ /*
+ * If the file is /sys/kernel/security/tomoyo/query , decrement the
+ * observer counter.
+ */
+ if (head->type == TOMOYO_QUERY &&
+ atomic_dec_and_test(&tomoyo_query_observers))
+ wake_up_all(&tomoyo_answer_wait);
+ tomoyo_notify_gc(head, false);
+}
+
+/**
+ * tomoyo_check_profile - Check all profiles currently assigned to domains are defined.
+ */
+void tomoyo_check_profile(void)
+{
+ struct tomoyo_domain_info *domain;
+ const int idx = tomoyo_read_lock();
+
+ tomoyo_policy_loaded = true;
+ pr_info("TOMOYO: 2.6.0\n");
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ const u8 profile = domain->profile;
+ struct tomoyo_policy_namespace *ns = domain->ns;
+
+ if (ns->profile_version == 20110903) {
+ pr_info_once("Converting profile version from %u to %u.\n",
+ 20110903, 20150505);
+ ns->profile_version = 20150505;
+ }
+ if (ns->profile_version != 20150505)
+ pr_err("Profile version %u is not supported.\n",
+ ns->profile_version);
+ else if (!ns->profile_ptr[profile])
+ pr_err("Profile %u (used by '%s') is not defined.\n",
+ profile, domain->domainname->name);
+ else
+ continue;
+ pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n");
+ pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n");
+ panic("STOP!");
+ }
+ tomoyo_read_unlock(idx);
+ pr_info("Mandatory Access Control activated.\n");
+}
+
+/**
+ * tomoyo_load_builtin_policy - Load built-in policy.
+ *
+ * Returns nothing.
+ */
+void __init tomoyo_load_builtin_policy(void)
+{
+#ifdef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
+ static char tomoyo_builtin_profile[] __initdata =
+ "PROFILE_VERSION=20150505\n"
+ "0-CONFIG={ mode=learning grant_log=no reject_log=yes }\n";
+ static char tomoyo_builtin_exception_policy[] __initdata =
+ "aggregator proc:/self/exe /proc/self/exe\n";
+ static char tomoyo_builtin_domain_policy[] __initdata = "";
+ static char tomoyo_builtin_manager[] __initdata = "";
+ static char tomoyo_builtin_stat[] __initdata = "";
+#else
+ /*
+ * This include file is manually created and contains built-in policy
+ * named "tomoyo_builtin_profile", "tomoyo_builtin_exception_policy",
+ * "tomoyo_builtin_domain_policy", "tomoyo_builtin_manager",
+ * "tomoyo_builtin_stat" in the form of "static char [] __initdata".
+ */
+#include "builtin-policy.h"
+#endif
+ u8 i;
+ const int idx = tomoyo_read_lock();
+
+ for (i = 0; i < 5; i++) {
+ struct tomoyo_io_buffer head = { };
+ char *start = "";
+
+ switch (i) {
+ case 0:
+ start = tomoyo_builtin_profile;
+ head.type = TOMOYO_PROFILE;
+ head.write = tomoyo_write_profile;
+ break;
+ case 1:
+ start = tomoyo_builtin_exception_policy;
+ head.type = TOMOYO_EXCEPTIONPOLICY;
+ head.write = tomoyo_write_exception;
+ break;
+ case 2:
+ start = tomoyo_builtin_domain_policy;
+ head.type = TOMOYO_DOMAINPOLICY;
+ head.write = tomoyo_write_domain;
+ break;
+ case 3:
+ start = tomoyo_builtin_manager;
+ head.type = TOMOYO_MANAGER;
+ head.write = tomoyo_write_manager;
+ break;
+ case 4:
+ start = tomoyo_builtin_stat;
+ head.type = TOMOYO_STAT;
+ head.write = tomoyo_write_stat;
+ break;
+ }
+ while (1) {
+ char *end = strchr(start, '\n');
+
+ if (!end)
+ break;
+ *end = '\0';
+ tomoyo_normalize_line(start);
+ head.write_buf = start;
+ tomoyo_parse_policy(&head, start);
+ start = end + 1;
+ }
+ }
+ tomoyo_read_unlock(idx);
+#ifdef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ tomoyo_check_profile();
+#endif
+}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
new file mode 100644
index 000000000..ca285f362
--- /dev/null
+++ b/security/tomoyo/common.h
@@ -0,0 +1,1333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * security/tomoyo/common.h
+ *
+ * Header file for TOMOYO.
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#ifndef _SECURITY_TOMOYO_COMMON_H
+#define _SECURITY_TOMOYO_COMMON_H
+
+#define pr_fmt(fmt) fmt
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/kmod.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/list.h>
+#include <linux/cred.h>
+#include <linux/poll.h>
+#include <linux/binfmts.h>
+#include <linux/highmem.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/un.h>
+#include <linux/lsm_hooks.h>
+#include <net/sock.h>
+#include <net/af_unix.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+/********** Constants definitions. **********/
+
+/*
+ * TOMOYO uses this hash only when appending a string into the string
+ * table. Frequency of appending strings is very low. So we don't need
+ * large (e.g. 64k) hash size. 256 will be sufficient.
+ */
+#define TOMOYO_HASH_BITS 8
+#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+
+/*
+ * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET.
+ * Therefore, we don't need SOCK_MAX.
+ */
+#define TOMOYO_SOCK_MAX 6
+
+#define TOMOYO_EXEC_TMPSIZE 4096
+
+/* Garbage collector is trying to kfree() this element. */
+#define TOMOYO_GC_IN_PROGRESS -1
+
+/* Profile number is an integer between 0 and 255. */
+#define TOMOYO_MAX_PROFILES 256
+
+/* Group number is an integer between 0 and 255. */
+#define TOMOYO_MAX_ACL_GROUPS 256
+
+/* Index numbers for "struct tomoyo_condition". */
+enum tomoyo_conditions_index {
+ TOMOYO_TASK_UID, /* current_uid() */
+ TOMOYO_TASK_EUID, /* current_euid() */
+ TOMOYO_TASK_SUID, /* current_suid() */
+ TOMOYO_TASK_FSUID, /* current_fsuid() */
+ TOMOYO_TASK_GID, /* current_gid() */
+ TOMOYO_TASK_EGID, /* current_egid() */
+ TOMOYO_TASK_SGID, /* current_sgid() */
+ TOMOYO_TASK_FSGID, /* current_fsgid() */
+ TOMOYO_TASK_PID, /* sys_getpid() */
+ TOMOYO_TASK_PPID, /* sys_getppid() */
+ TOMOYO_EXEC_ARGC, /* "struct linux_binprm *"->argc */
+ TOMOYO_EXEC_ENVC, /* "struct linux_binprm *"->envc */
+ TOMOYO_TYPE_IS_SOCKET, /* S_IFSOCK */
+ TOMOYO_TYPE_IS_SYMLINK, /* S_IFLNK */
+ TOMOYO_TYPE_IS_FILE, /* S_IFREG */
+ TOMOYO_TYPE_IS_BLOCK_DEV, /* S_IFBLK */
+ TOMOYO_TYPE_IS_DIRECTORY, /* S_IFDIR */
+ TOMOYO_TYPE_IS_CHAR_DEV, /* S_IFCHR */
+ TOMOYO_TYPE_IS_FIFO, /* S_IFIFO */
+ TOMOYO_MODE_SETUID, /* S_ISUID */
+ TOMOYO_MODE_SETGID, /* S_ISGID */
+ TOMOYO_MODE_STICKY, /* S_ISVTX */
+ TOMOYO_MODE_OWNER_READ, /* S_IRUSR */
+ TOMOYO_MODE_OWNER_WRITE, /* S_IWUSR */
+ TOMOYO_MODE_OWNER_EXECUTE, /* S_IXUSR */
+ TOMOYO_MODE_GROUP_READ, /* S_IRGRP */
+ TOMOYO_MODE_GROUP_WRITE, /* S_IWGRP */
+ TOMOYO_MODE_GROUP_EXECUTE, /* S_IXGRP */
+ TOMOYO_MODE_OTHERS_READ, /* S_IROTH */
+ TOMOYO_MODE_OTHERS_WRITE, /* S_IWOTH */
+ TOMOYO_MODE_OTHERS_EXECUTE, /* S_IXOTH */
+ TOMOYO_EXEC_REALPATH,
+ TOMOYO_SYMLINK_TARGET,
+ TOMOYO_PATH1_UID,
+ TOMOYO_PATH1_GID,
+ TOMOYO_PATH1_INO,
+ TOMOYO_PATH1_MAJOR,
+ TOMOYO_PATH1_MINOR,
+ TOMOYO_PATH1_PERM,
+ TOMOYO_PATH1_TYPE,
+ TOMOYO_PATH1_DEV_MAJOR,
+ TOMOYO_PATH1_DEV_MINOR,
+ TOMOYO_PATH2_UID,
+ TOMOYO_PATH2_GID,
+ TOMOYO_PATH2_INO,
+ TOMOYO_PATH2_MAJOR,
+ TOMOYO_PATH2_MINOR,
+ TOMOYO_PATH2_PERM,
+ TOMOYO_PATH2_TYPE,
+ TOMOYO_PATH2_DEV_MAJOR,
+ TOMOYO_PATH2_DEV_MINOR,
+ TOMOYO_PATH1_PARENT_UID,
+ TOMOYO_PATH1_PARENT_GID,
+ TOMOYO_PATH1_PARENT_INO,
+ TOMOYO_PATH1_PARENT_PERM,
+ TOMOYO_PATH2_PARENT_UID,
+ TOMOYO_PATH2_PARENT_GID,
+ TOMOYO_PATH2_PARENT_INO,
+ TOMOYO_PATH2_PARENT_PERM,
+ TOMOYO_MAX_CONDITION_KEYWORD,
+ TOMOYO_NUMBER_UNION,
+ TOMOYO_NAME_UNION,
+ TOMOYO_ARGV_ENTRY,
+ TOMOYO_ENVP_ENTRY,
+};
+
+
+/* Index numbers for stat(). */
+enum tomoyo_path_stat_index {
+ /* Do not change this order. */
+ TOMOYO_PATH1,
+ TOMOYO_PATH1_PARENT,
+ TOMOYO_PATH2,
+ TOMOYO_PATH2_PARENT,
+ TOMOYO_MAX_PATH_STAT
+};
+
+/* Index numbers for operation mode. */
+enum tomoyo_mode_index {
+ TOMOYO_CONFIG_DISABLED,
+ TOMOYO_CONFIG_LEARNING,
+ TOMOYO_CONFIG_PERMISSIVE,
+ TOMOYO_CONFIG_ENFORCING,
+ TOMOYO_CONFIG_MAX_MODE,
+ TOMOYO_CONFIG_WANT_REJECT_LOG = 64,
+ TOMOYO_CONFIG_WANT_GRANT_LOG = 128,
+ TOMOYO_CONFIG_USE_DEFAULT = 255,
+};
+
+/* Index numbers for entry type. */
+enum tomoyo_policy_id {
+ TOMOYO_ID_GROUP,
+ TOMOYO_ID_ADDRESS_GROUP,
+ TOMOYO_ID_PATH_GROUP,
+ TOMOYO_ID_NUMBER_GROUP,
+ TOMOYO_ID_TRANSITION_CONTROL,
+ TOMOYO_ID_AGGREGATOR,
+ TOMOYO_ID_MANAGER,
+ TOMOYO_ID_CONDITION,
+ TOMOYO_ID_NAME,
+ TOMOYO_ID_ACL,
+ TOMOYO_ID_DOMAIN,
+ TOMOYO_MAX_POLICY
+};
+
+/* Index numbers for domain's attributes. */
+enum tomoyo_domain_info_flags_index {
+ /* Quota warnning flag. */
+ TOMOYO_DIF_QUOTA_WARNED,
+ /*
+ * This domain was unable to create a new domain at
+ * tomoyo_find_next_domain() because the name of the domain to be
+ * created was too long or it could not allocate memory.
+ * More than one process continued execve() without domain transition.
+ */
+ TOMOYO_DIF_TRANSITION_FAILED,
+ TOMOYO_MAX_DOMAIN_INFO_FLAGS
+};
+
+/* Index numbers for audit type. */
+enum tomoyo_grant_log {
+ /* Follow profile's configuration. */
+ TOMOYO_GRANTLOG_AUTO,
+ /* Do not generate grant log. */
+ TOMOYO_GRANTLOG_NO,
+ /* Generate grant_log. */
+ TOMOYO_GRANTLOG_YES,
+};
+
+/* Index numbers for group entries. */
+enum tomoyo_group_id {
+ TOMOYO_PATH_GROUP,
+ TOMOYO_NUMBER_GROUP,
+ TOMOYO_ADDRESS_GROUP,
+ TOMOYO_MAX_GROUP
+};
+
+/* Index numbers for type of numeric values. */
+enum tomoyo_value_type {
+ TOMOYO_VALUE_TYPE_INVALID,
+ TOMOYO_VALUE_TYPE_DECIMAL,
+ TOMOYO_VALUE_TYPE_OCTAL,
+ TOMOYO_VALUE_TYPE_HEXADECIMAL,
+};
+
+/* Index numbers for domain transition control keywords. */
+enum tomoyo_transition_type {
+ /* Do not change this order, */
+ TOMOYO_TRANSITION_CONTROL_NO_RESET,
+ TOMOYO_TRANSITION_CONTROL_RESET,
+ TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE,
+ TOMOYO_TRANSITION_CONTROL_INITIALIZE,
+ TOMOYO_TRANSITION_CONTROL_NO_KEEP,
+ TOMOYO_TRANSITION_CONTROL_KEEP,
+ TOMOYO_MAX_TRANSITION_TYPE
+};
+
+/* Index numbers for Access Controls. */
+enum tomoyo_acl_entry_type_index {
+ TOMOYO_TYPE_PATH_ACL,
+ TOMOYO_TYPE_PATH2_ACL,
+ TOMOYO_TYPE_PATH_NUMBER_ACL,
+ TOMOYO_TYPE_MKDEV_ACL,
+ TOMOYO_TYPE_MOUNT_ACL,
+ TOMOYO_TYPE_INET_ACL,
+ TOMOYO_TYPE_UNIX_ACL,
+ TOMOYO_TYPE_ENV_ACL,
+ TOMOYO_TYPE_MANUAL_TASK_ACL,
+};
+
+/* Index numbers for access controls with one pathname. */
+enum tomoyo_path_acl_index {
+ TOMOYO_TYPE_EXECUTE,
+ TOMOYO_TYPE_READ,
+ TOMOYO_TYPE_WRITE,
+ TOMOYO_TYPE_APPEND,
+ TOMOYO_TYPE_UNLINK,
+ TOMOYO_TYPE_GETATTR,
+ TOMOYO_TYPE_RMDIR,
+ TOMOYO_TYPE_TRUNCATE,
+ TOMOYO_TYPE_SYMLINK,
+ TOMOYO_TYPE_CHROOT,
+ TOMOYO_TYPE_UMOUNT,
+ TOMOYO_MAX_PATH_OPERATION
+};
+
+/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */
+enum tomoyo_memory_stat_type {
+ TOMOYO_MEMORY_POLICY,
+ TOMOYO_MEMORY_AUDIT,
+ TOMOYO_MEMORY_QUERY,
+ TOMOYO_MAX_MEMORY_STAT
+};
+
+enum tomoyo_mkdev_acl_index {
+ TOMOYO_TYPE_MKBLOCK,
+ TOMOYO_TYPE_MKCHAR,
+ TOMOYO_MAX_MKDEV_OPERATION
+};
+
+/* Index numbers for socket operations. */
+enum tomoyo_network_acl_index {
+ TOMOYO_NETWORK_BIND, /* bind() operation. */
+ TOMOYO_NETWORK_LISTEN, /* listen() operation. */
+ TOMOYO_NETWORK_CONNECT, /* connect() operation. */
+ TOMOYO_NETWORK_SEND, /* send() operation. */
+ TOMOYO_MAX_NETWORK_OPERATION
+};
+
+/* Index numbers for access controls with two pathnames. */
+enum tomoyo_path2_acl_index {
+ TOMOYO_TYPE_LINK,
+ TOMOYO_TYPE_RENAME,
+ TOMOYO_TYPE_PIVOT_ROOT,
+ TOMOYO_MAX_PATH2_OPERATION
+};
+
+/* Index numbers for access controls with one pathname and one number. */
+enum tomoyo_path_number_acl_index {
+ TOMOYO_TYPE_CREATE,
+ TOMOYO_TYPE_MKDIR,
+ TOMOYO_TYPE_MKFIFO,
+ TOMOYO_TYPE_MKSOCK,
+ TOMOYO_TYPE_IOCTL,
+ TOMOYO_TYPE_CHMOD,
+ TOMOYO_TYPE_CHOWN,
+ TOMOYO_TYPE_CHGRP,
+ TOMOYO_MAX_PATH_NUMBER_OPERATION
+};
+
+/* Index numbers for /sys/kernel/security/tomoyo/ interfaces. */
+enum tomoyo_securityfs_interface_index {
+ TOMOYO_DOMAINPOLICY,
+ TOMOYO_EXCEPTIONPOLICY,
+ TOMOYO_PROCESS_STATUS,
+ TOMOYO_STAT,
+ TOMOYO_AUDIT,
+ TOMOYO_VERSION,
+ TOMOYO_PROFILE,
+ TOMOYO_QUERY,
+ TOMOYO_MANAGER
+};
+
+/* Index numbers for special mount operations. */
+enum tomoyo_special_mount {
+ TOMOYO_MOUNT_BIND, /* mount --bind /source /dest */
+ TOMOYO_MOUNT_MOVE, /* mount --move /old /new */
+ TOMOYO_MOUNT_REMOUNT, /* mount -o remount /dir */
+ TOMOYO_MOUNT_MAKE_UNBINDABLE, /* mount --make-unbindable /dir */
+ TOMOYO_MOUNT_MAKE_PRIVATE, /* mount --make-private /dir */
+ TOMOYO_MOUNT_MAKE_SLAVE, /* mount --make-slave /dir */
+ TOMOYO_MOUNT_MAKE_SHARED, /* mount --make-shared /dir */
+ TOMOYO_MAX_SPECIAL_MOUNT
+};
+
+/* Index numbers for functionality. */
+enum tomoyo_mac_index {
+ TOMOYO_MAC_FILE_EXECUTE,
+ TOMOYO_MAC_FILE_OPEN,
+ TOMOYO_MAC_FILE_CREATE,
+ TOMOYO_MAC_FILE_UNLINK,
+ TOMOYO_MAC_FILE_GETATTR,
+ TOMOYO_MAC_FILE_MKDIR,
+ TOMOYO_MAC_FILE_RMDIR,
+ TOMOYO_MAC_FILE_MKFIFO,
+ TOMOYO_MAC_FILE_MKSOCK,
+ TOMOYO_MAC_FILE_TRUNCATE,
+ TOMOYO_MAC_FILE_SYMLINK,
+ TOMOYO_MAC_FILE_MKBLOCK,
+ TOMOYO_MAC_FILE_MKCHAR,
+ TOMOYO_MAC_FILE_LINK,
+ TOMOYO_MAC_FILE_RENAME,
+ TOMOYO_MAC_FILE_CHMOD,
+ TOMOYO_MAC_FILE_CHOWN,
+ TOMOYO_MAC_FILE_CHGRP,
+ TOMOYO_MAC_FILE_IOCTL,
+ TOMOYO_MAC_FILE_CHROOT,
+ TOMOYO_MAC_FILE_MOUNT,
+ TOMOYO_MAC_FILE_UMOUNT,
+ TOMOYO_MAC_FILE_PIVOT_ROOT,
+ TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ TOMOYO_MAC_ENVIRON,
+ TOMOYO_MAX_MAC_INDEX
+};
+
+/* Index numbers for category of functionality. */
+enum tomoyo_mac_category_index {
+ TOMOYO_MAC_CATEGORY_FILE,
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ TOMOYO_MAC_CATEGORY_MISC,
+ TOMOYO_MAX_MAC_CATEGORY_INDEX
+};
+
+/*
+ * Retry this request. Returned by tomoyo_supervisor() if policy violation has
+ * occurred in enforcing mode and the userspace daemon decided to retry.
+ *
+ * We must choose a positive value in order to distinguish "granted" (which is
+ * 0) and "rejected" (which is a negative value) and "retry".
+ */
+#define TOMOYO_RETRY_REQUEST 1
+
+/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */
+enum tomoyo_policy_stat_type {
+ /* Do not change this order. */
+ TOMOYO_STAT_POLICY_UPDATES,
+ TOMOYO_STAT_POLICY_LEARNING, /* == TOMOYO_CONFIG_LEARNING */
+ TOMOYO_STAT_POLICY_PERMISSIVE, /* == TOMOYO_CONFIG_PERMISSIVE */
+ TOMOYO_STAT_POLICY_ENFORCING, /* == TOMOYO_CONFIG_ENFORCING */
+ TOMOYO_MAX_POLICY_STAT
+};
+
+/* Index numbers for profile's PREFERENCE values. */
+enum tomoyo_pref_index {
+ TOMOYO_PREF_MAX_AUDIT_LOG,
+ TOMOYO_PREF_MAX_LEARNING_ENTRY,
+ TOMOYO_MAX_PREF
+};
+
+/********** Structure definitions. **********/
+
+/* Common header for holding ACL entries. */
+struct tomoyo_acl_head {
+ struct list_head list;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
+} __packed;
+
+/* Common header for shared entries. */
+struct tomoyo_shared_acl_head {
+ struct list_head list;
+ atomic_t users;
+} __packed;
+
+struct tomoyo_policy_namespace;
+
+/* Structure for request info. */
+struct tomoyo_request_info {
+ /*
+ * For holding parameters specific to operations which deal files.
+ * NULL if not dealing files.
+ */
+ struct tomoyo_obj_info *obj;
+ /*
+ * For holding parameters specific to execve() request.
+ * NULL if not dealing execve().
+ */
+ struct tomoyo_execve *ee;
+ struct tomoyo_domain_info *domain;
+ /* For holding parameters. */
+ union {
+ struct {
+ const struct tomoyo_path_info *filename;
+ /* For using wildcards at tomoyo_find_next_domain(). */
+ const struct tomoyo_path_info *matched_path;
+ /* One of values in "enum tomoyo_path_acl_index". */
+ u8 operation;
+ } path;
+ struct {
+ const struct tomoyo_path_info *filename1;
+ const struct tomoyo_path_info *filename2;
+ /* One of values in "enum tomoyo_path2_acl_index". */
+ u8 operation;
+ } path2;
+ struct {
+ const struct tomoyo_path_info *filename;
+ unsigned int mode;
+ unsigned int major;
+ unsigned int minor;
+ /* One of values in "enum tomoyo_mkdev_acl_index". */
+ u8 operation;
+ } mkdev;
+ struct {
+ const struct tomoyo_path_info *filename;
+ unsigned long number;
+ /*
+ * One of values in
+ * "enum tomoyo_path_number_acl_index".
+ */
+ u8 operation;
+ } path_number;
+ struct {
+ const struct tomoyo_path_info *name;
+ } environ;
+ struct {
+ const __be32 *address;
+ u16 port;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ bool is_ipv6;
+ } inet_network;
+ struct {
+ const struct tomoyo_path_info *address;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ } unix_network;
+ struct {
+ const struct tomoyo_path_info *type;
+ const struct tomoyo_path_info *dir;
+ const struct tomoyo_path_info *dev;
+ unsigned long flags;
+ int need_dev;
+ } mount;
+ struct {
+ const struct tomoyo_path_info *domainname;
+ } task;
+ } param;
+ struct tomoyo_acl_info *matched_acl;
+ u8 param_type;
+ bool granted;
+ u8 retry;
+ u8 profile;
+ u8 mode; /* One of tomoyo_mode_index . */
+ u8 type;
+};
+
+/* Structure for holding a token. */
+struct tomoyo_path_info {
+ const char *name;
+ u32 hash; /* = full_name_hash(name, strlen(name)) */
+ u16 const_len; /* = tomoyo_const_part_length(name) */
+ bool is_dir; /* = tomoyo_strendswith(name, "/") */
+ bool is_patterned; /* = tomoyo_path_contains_pattern(name) */
+};
+
+/* Structure for holding string data. */
+struct tomoyo_name {
+ struct tomoyo_shared_acl_head head;
+ struct tomoyo_path_info entry;
+};
+
+/* Structure for holding a word. */
+struct tomoyo_name_union {
+ /* Either @filename or @group is NULL. */
+ const struct tomoyo_path_info *filename;
+ struct tomoyo_group *group;
+};
+
+/* Structure for holding a number. */
+struct tomoyo_number_union {
+ unsigned long values[2];
+ struct tomoyo_group *group; /* Maybe NULL. */
+ /* One of values in "enum tomoyo_value_type". */
+ u8 value_type[2];
+};
+
+/* Structure for holding an IP address. */
+struct tomoyo_ipaddr_union {
+ struct in6_addr ip[2]; /* Big endian. */
+ struct tomoyo_group *group; /* Pointer to address group. */
+ bool is_ipv6; /* Valid only if @group == NULL. */
+};
+
+/* Structure for "path_group"/"number_group"/"address_group" directive. */
+struct tomoyo_group {
+ struct tomoyo_shared_acl_head head;
+ const struct tomoyo_path_info *group_name;
+ struct list_head member_list;
+};
+
+/* Structure for "path_group" directive. */
+struct tomoyo_path_group {
+ struct tomoyo_acl_head head;
+ const struct tomoyo_path_info *member_name;
+};
+
+/* Structure for "number_group" directive. */
+struct tomoyo_number_group {
+ struct tomoyo_acl_head head;
+ struct tomoyo_number_union number;
+};
+
+/* Structure for "address_group" directive. */
+struct tomoyo_address_group {
+ struct tomoyo_acl_head head;
+ /* Structure for holding an IP address. */
+ struct tomoyo_ipaddr_union address;
+};
+
+/* Subset of "struct stat". Used by conditional ACL and audit logs. */
+struct tomoyo_mini_stat {
+ kuid_t uid;
+ kgid_t gid;
+ ino_t ino;
+ umode_t mode;
+ dev_t dev;
+ dev_t rdev;
+};
+
+/* Structure for dumping argv[] and envp[] of "struct linux_binprm". */
+struct tomoyo_page_dump {
+ struct page *page; /* Previously dumped page. */
+ char *data; /* Contents of "page". Size is PAGE_SIZE. */
+};
+
+/* Structure for attribute checks in addition to pathname checks. */
+struct tomoyo_obj_info {
+ /*
+ * True if tomoyo_get_attributes() was already called, false otherwise.
+ */
+ bool validate_done;
+ /* True if @stat[] is valid. */
+ bool stat_valid[TOMOYO_MAX_PATH_STAT];
+ /* First pathname. Initialized with { NULL, NULL } if no path. */
+ struct path path1;
+ /* Second pathname. Initialized with { NULL, NULL } if no path. */
+ struct path path2;
+ /*
+ * Information on @path1, @path1's parent directory, @path2, @path2's
+ * parent directory.
+ */
+ struct tomoyo_mini_stat stat[TOMOYO_MAX_PATH_STAT];
+ /*
+ * Content of symbolic link to be created. NULL for operations other
+ * than symlink().
+ */
+ struct tomoyo_path_info *symlink_target;
+};
+
+/* Structure for argv[]. */
+struct tomoyo_argv {
+ unsigned long index;
+ const struct tomoyo_path_info *value;
+ bool is_not;
+};
+
+/* Structure for envp[]. */
+struct tomoyo_envp {
+ const struct tomoyo_path_info *name;
+ const struct tomoyo_path_info *value;
+ bool is_not;
+};
+
+/* Structure for execve() operation. */
+struct tomoyo_execve {
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj;
+ struct linux_binprm *bprm;
+ const struct tomoyo_path_info *transition;
+ /* For dumping argv[] and envp[]. */
+ struct tomoyo_page_dump dump;
+ /* For temporary use. */
+ char *tmp; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+};
+
+/* Structure for entries which follows "struct tomoyo_condition". */
+struct tomoyo_condition_element {
+ /*
+ * Left hand operand. A "struct tomoyo_argv" for TOMOYO_ARGV_ENTRY, a
+ * "struct tomoyo_envp" for TOMOYO_ENVP_ENTRY is attached to the tail
+ * of the array of this struct.
+ */
+ u8 left;
+ /*
+ * Right hand operand. A "struct tomoyo_number_union" for
+ * TOMOYO_NUMBER_UNION, a "struct tomoyo_name_union" for
+ * TOMOYO_NAME_UNION is attached to the tail of the array of this
+ * struct.
+ */
+ u8 right;
+ /* Equation operator. True if equals or overlaps, false otherwise. */
+ bool equals;
+};
+
+/* Structure for optional arguments. */
+struct tomoyo_condition {
+ struct tomoyo_shared_acl_head head;
+ u32 size; /* Memory size allocated for this entry. */
+ u16 condc; /* Number of conditions in this struct. */
+ u16 numbers_count; /* Number of "struct tomoyo_number_union values". */
+ u16 names_count; /* Number of "struct tomoyo_name_union names". */
+ u16 argc; /* Number of "struct tomoyo_argv". */
+ u16 envc; /* Number of "struct tomoyo_envp". */
+ u8 grant_log; /* One of values in "enum tomoyo_grant_log". */
+ const struct tomoyo_path_info *transit; /* Maybe NULL. */
+ /*
+ * struct tomoyo_condition_element condition[condc];
+ * struct tomoyo_number_union values[numbers_count];
+ * struct tomoyo_name_union names[names_count];
+ * struct tomoyo_argv argv[argc];
+ * struct tomoyo_envp envp[envc];
+ */
+};
+
+/* Common header for individual entries. */
+struct tomoyo_acl_info {
+ struct list_head list;
+ struct tomoyo_condition *cond; /* Maybe NULL. */
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
+ u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
+} __packed;
+
+/* Structure for domain information. */
+struct tomoyo_domain_info {
+ struct list_head list;
+ struct list_head acl_info_list;
+ /* Name of this domain. Never NULL. */
+ const struct tomoyo_path_info *domainname;
+ /* Namespace for this domain. Never NULL. */
+ struct tomoyo_policy_namespace *ns;
+ /* Group numbers to use. */
+ unsigned long group[TOMOYO_MAX_ACL_GROUPS / BITS_PER_LONG];
+ u8 profile; /* Profile number to use. */
+ bool is_deleted; /* Delete flag. */
+ bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
+ atomic_t users; /* Number of referring tasks. */
+};
+
+/*
+ * Structure for "task manual_domain_transition" directive.
+ */
+struct tomoyo_task_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */
+ /* Pointer to domainname. */
+ const struct tomoyo_path_info *domainname;
+};
+
+/*
+ * Structure for "file execute", "file read", "file write", "file append",
+ * "file unlink", "file getattr", "file rmdir", "file truncate",
+ * "file symlink", "file chroot" and "file unmount" directive.
+ */
+struct tomoyo_path_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */
+ u16 perm; /* Bitmask of values in "enum tomoyo_path_acl_index". */
+ struct tomoyo_name_union name;
+};
+
+/*
+ * Structure for "file create", "file mkdir", "file mkfifo", "file mksock",
+ * "file ioctl", "file chmod", "file chown" and "file chgrp" directive.
+ */
+struct tomoyo_path_number_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */
+ /* Bitmask of values in "enum tomoyo_path_number_acl_index". */
+ u8 perm;
+ struct tomoyo_name_union name;
+ struct tomoyo_number_union number;
+};
+
+/* Structure for "file mkblock" and "file mkchar" directive. */
+struct tomoyo_mkdev_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */
+ u8 perm; /* Bitmask of values in "enum tomoyo_mkdev_acl_index". */
+ struct tomoyo_name_union name;
+ struct tomoyo_number_union mode;
+ struct tomoyo_number_union major;
+ struct tomoyo_number_union minor;
+};
+
+/*
+ * Structure for "file rename", "file link" and "file pivot_root" directive.
+ */
+struct tomoyo_path2_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */
+ u8 perm; /* Bitmask of values in "enum tomoyo_path2_acl_index". */
+ struct tomoyo_name_union name1;
+ struct tomoyo_name_union name2;
+};
+
+/* Structure for "file mount" directive. */
+struct tomoyo_mount_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MOUNT_ACL */
+ struct tomoyo_name_union dev_name;
+ struct tomoyo_name_union dir_name;
+ struct tomoyo_name_union fs_type;
+ struct tomoyo_number_union flags;
+};
+
+/* Structure for "misc env" directive in domain policy. */
+struct tomoyo_env_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */
+ const struct tomoyo_path_info *env; /* environment variable */
+};
+
+/* Structure for "network inet" directive. */
+struct tomoyo_inet_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_ipaddr_union address;
+ struct tomoyo_number_union port;
+};
+
+/* Structure for "network unix" directive. */
+struct tomoyo_unix_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_name_union name;
+};
+
+/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */
+struct tomoyo_acl_param {
+ char *data;
+ struct list_head *list;
+ struct tomoyo_policy_namespace *ns;
+ bool is_delete;
+};
+
+#define TOMOYO_MAX_IO_READ_QUEUE 64
+
+/*
+ * Structure for reading/writing policy via /sys/kernel/security/tomoyo
+ * interfaces.
+ */
+struct tomoyo_io_buffer {
+ void (*read)(struct tomoyo_io_buffer *head);
+ int (*write)(struct tomoyo_io_buffer *head);
+ __poll_t (*poll)(struct file *file, poll_table *wait);
+ /* Exclusive lock for this structure. */
+ struct mutex io_sem;
+ char __user *read_user_buf;
+ size_t read_user_buf_avail;
+ struct {
+ struct list_head *ns;
+ struct list_head *domain;
+ struct list_head *group;
+ struct list_head *acl;
+ size_t avail;
+ unsigned int step;
+ unsigned int query_index;
+ u16 index;
+ u16 cond_index;
+ u8 acl_group_index;
+ u8 cond_step;
+ u8 bit;
+ u8 w_pos;
+ bool eof;
+ bool print_this_domain_only;
+ bool print_transition_related_only;
+ bool print_cond_part;
+ const char *w[TOMOYO_MAX_IO_READ_QUEUE];
+ } r;
+ struct {
+ struct tomoyo_policy_namespace *ns;
+ /* The position currently writing to. */
+ struct tomoyo_domain_info *domain;
+ /* Bytes available for writing. */
+ size_t avail;
+ bool is_delete;
+ } w;
+ /* Buffer for reading. */
+ char *read_buf;
+ /* Size of read buffer. */
+ size_t readbuf_size;
+ /* Buffer for writing. */
+ char *write_buf;
+ /* Size of write buffer. */
+ size_t writebuf_size;
+ /* Type of this interface. */
+ enum tomoyo_securityfs_interface_index type;
+ /* Users counter protected by tomoyo_io_buffer_list_lock. */
+ u8 users;
+ /* List for telling GC not to kfree() elements. */
+ struct list_head list;
+};
+
+/*
+ * Structure for "initialize_domain"/"no_initialize_domain"/"keep_domain"/
+ * "no_keep_domain" keyword.
+ */
+struct tomoyo_transition_control {
+ struct tomoyo_acl_head head;
+ u8 type; /* One of values in "enum tomoyo_transition_type". */
+ /* True if the domainname is tomoyo_get_last_name(). */
+ bool is_last_name;
+ const struct tomoyo_path_info *domainname; /* Maybe NULL */
+ const struct tomoyo_path_info *program; /* Maybe NULL */
+};
+
+/* Structure for "aggregator" keyword. */
+struct tomoyo_aggregator {
+ struct tomoyo_acl_head head;
+ const struct tomoyo_path_info *original_name;
+ const struct tomoyo_path_info *aggregated_name;
+};
+
+/* Structure for policy manager. */
+struct tomoyo_manager {
+ struct tomoyo_acl_head head;
+ /* A path to program or a domainname. */
+ const struct tomoyo_path_info *manager;
+};
+
+struct tomoyo_preference {
+ unsigned int learning_max_entry;
+ bool enforcing_verbose;
+ bool learning_verbose;
+ bool permissive_verbose;
+};
+
+/* Structure for /sys/kernel/security/tomnoyo/profile interface. */
+struct tomoyo_profile {
+ const struct tomoyo_path_info *comment;
+ struct tomoyo_preference *learning;
+ struct tomoyo_preference *permissive;
+ struct tomoyo_preference *enforcing;
+ struct tomoyo_preference preference;
+ u8 default_config;
+ u8 config[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX];
+ unsigned int pref[TOMOYO_MAX_PREF];
+};
+
+/* Structure for representing YYYY/MM/DD hh/mm/ss. */
+struct tomoyo_time {
+ u16 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 min;
+ u8 sec;
+};
+
+/* Structure for policy namespace. */
+struct tomoyo_policy_namespace {
+ /* Profile table. Memory is allocated as needed. */
+ struct tomoyo_profile *profile_ptr[TOMOYO_MAX_PROFILES];
+ /* List of "struct tomoyo_group". */
+ struct list_head group_list[TOMOYO_MAX_GROUP];
+ /* List of policy. */
+ struct list_head policy_list[TOMOYO_MAX_POLICY];
+ /* The global ACL referred by "use_group" keyword. */
+ struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
+ /* List for connecting to tomoyo_namespace_list list. */
+ struct list_head namespace_list;
+ /* Profile version. Currently only 20150505 is defined. */
+ unsigned int profile_version;
+ /* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
+ const char *name;
+};
+
+/* Structure for "struct task_struct"->security. */
+struct tomoyo_task {
+ struct tomoyo_domain_info *domain_info;
+ struct tomoyo_domain_info *old_domain_info;
+};
+
+/********** Function prototypes. **********/
+
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group);
+bool tomoyo_compare_number_union(const unsigned long value,
+ const struct tomoyo_number_union *ptr);
+bool tomoyo_condition(struct tomoyo_request_info *r,
+ const struct tomoyo_condition *cond);
+bool tomoyo_correct_domain(const unsigned char *domainname);
+bool tomoyo_correct_path(const char *filename);
+bool tomoyo_correct_word(const char *string);
+bool tomoyo_domain_def(const unsigned char *buffer);
+bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r);
+bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
+ struct tomoyo_page_dump *dump);
+bool tomoyo_memory_ok(void *ptr);
+bool tomoyo_number_matches_group(const unsigned long min,
+ const unsigned long max,
+ const struct tomoyo_group *group);
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr);
+bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
+ struct tomoyo_name_union *ptr);
+bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
+ struct tomoyo_number_union *ptr);
+bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
+ const struct tomoyo_path_info *pattern);
+bool tomoyo_permstr(const char *string, const char *keyword);
+bool tomoyo_str_starts(char **src, const char *find);
+char *tomoyo_encode(const char *str);
+char *tomoyo_encode2(const char *str, int str_len);
+char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args);
+char *tomoyo_read_token(struct tomoyo_acl_param *param);
+char *tomoyo_realpath_from_path(const struct path *path);
+char *tomoyo_realpath_nofollow(const char *pathname);
+const char *tomoyo_get_exe(void);
+const struct tomoyo_path_info *tomoyo_compare_name_union
+(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param);
+const struct tomoyo_path_info *tomoyo_get_name(const char *name);
+const struct tomoyo_path_info *tomoyo_path_matches_group
+(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+ const struct path *path, const int flag);
+void tomoyo_close_control(struct tomoyo_io_buffer *head);
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
+int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
+ const u8 index);
+int tomoyo_init_request_info(struct tomoyo_request_info *r,
+ struct tomoyo_domain_info *domain,
+ const u8 index);
+int tomoyo_mkdev_perm(const u8 operation, const struct path *path,
+ const unsigned int mode, unsigned int dev);
+int tomoyo_mount_permission(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags,
+ void *data_page);
+int tomoyo_open_control(const u8 type, struct file *file);
+int tomoyo_path2_perm(const u8 operation, const struct path *path1,
+ const struct path *path2);
+int tomoyo_path_number_perm(const u8 operation, const struct path *path,
+ unsigned long number);
+int tomoyo_path_perm(const u8 operation, const struct path *path,
+ const char *target);
+__poll_t tomoyo_poll_control(struct file *file, poll_table *wait);
+__poll_t tomoyo_poll_log(struct file *file, poll_table *wait);
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len);
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len);
+int tomoyo_socket_listen_permission(struct socket *sock);
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size);
+int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
+ __printf(2, 3);
+int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate)
+ (const struct tomoyo_acl_info *,
+ const struct tomoyo_acl_info *),
+ bool (*merge_duplicate)
+ (struct tomoyo_acl_info *, struct tomoyo_acl_info *,
+ const bool));
+int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate)
+ (const struct tomoyo_acl_head *,
+ const struct tomoyo_acl_head *));
+int tomoyo_write_aggregator(struct tomoyo_acl_param *param);
+int tomoyo_write_file(struct tomoyo_acl_param *param);
+int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type);
+int tomoyo_write_misc(struct tomoyo_acl_param *param);
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param);
+int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
+ const u8 type);
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param);
+ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
+ const int buffer_len);
+ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ const char __user *buffer, const int buffer_len);
+struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param);
+struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
+ const bool transit);
+struct tomoyo_domain_info *tomoyo_domain(void);
+struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname);
+struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
+ const u8 idx);
+struct tomoyo_policy_namespace *tomoyo_assign_namespace
+(const char *domainname);
+struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
+ const u8 profile);
+unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
+ const u8 index);
+u8 tomoyo_parse_ulong(unsigned long *result, char **str);
+void *tomoyo_commit_ok(void *data, const unsigned int size);
+void __init tomoyo_load_builtin_policy(void);
+void __init tomoyo_mm_init(void);
+void tomoyo_check_acl(struct tomoyo_request_info *r,
+ bool (*check_entry)(struct tomoyo_request_info *,
+ const struct tomoyo_acl_info *));
+void tomoyo_check_profile(void);
+void tomoyo_convert_time(time64_t time, struct tomoyo_time *stamp);
+void tomoyo_del_condition(struct list_head *element);
+void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
+void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
+void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns);
+void tomoyo_load_policy(const char *filename);
+void tomoyo_normalize_line(unsigned char *buffer);
+void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr);
+void tomoyo_print_ulong(char *buffer, const int buffer_len,
+ const unsigned long value, const u8 type);
+void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
+void tomoyo_put_number_union(struct tomoyo_number_union *ptr);
+void tomoyo_read_log(struct tomoyo_io_buffer *head);
+void tomoyo_update_stat(const u8 index);
+void tomoyo_warn_oom(const char *function);
+void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
+ __printf(2, 3);
+void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args);
+
+/********** External variable definitions. **********/
+
+extern bool tomoyo_policy_loaded;
+extern int tomoyo_enabled;
+extern const char * const tomoyo_condition_keyword
+[TOMOYO_MAX_CONDITION_KEYWORD];
+extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
+extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX];
+extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE];
+extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION];
+extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX];
+extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION];
+extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX];
+extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION];
+extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION];
+extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION];
+extern struct list_head tomoyo_condition_list;
+extern struct list_head tomoyo_domain_list;
+extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+extern struct list_head tomoyo_namespace_list;
+extern struct mutex tomoyo_policy_lock;
+extern struct srcu_struct tomoyo_ss;
+extern struct tomoyo_domain_info tomoyo_kernel_domain;
+extern struct tomoyo_policy_namespace tomoyo_kernel_namespace;
+extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
+extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+extern struct lsm_blob_sizes tomoyo_blob_sizes;
+
+/********** Inlined functions. **********/
+
+/**
+ * tomoyo_read_lock - Take lock for protecting policy.
+ *
+ * Returns index number for tomoyo_read_unlock().
+ */
+static inline int tomoyo_read_lock(void)
+{
+ return srcu_read_lock(&tomoyo_ss);
+}
+
+/**
+ * tomoyo_read_unlock - Release lock for protecting policy.
+ *
+ * @idx: Index number returned by tomoyo_read_lock().
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_read_unlock(int idx)
+{
+ srcu_read_unlock(&tomoyo_ss, idx);
+}
+
+/**
+ * tomoyo_sys_getppid - Copy of getppid().
+ *
+ * Returns parent process's PID.
+ *
+ * Alpha does not have getppid() defined. To be able to build this module on
+ * Alpha, I have to copy getppid() from kernel/timer.c.
+ */
+static inline pid_t tomoyo_sys_getppid(void)
+{
+ pid_t pid;
+
+ rcu_read_lock();
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
+ rcu_read_unlock();
+ return pid;
+}
+
+/**
+ * tomoyo_sys_getpid - Copy of getpid().
+ *
+ * Returns current thread's PID.
+ *
+ * Alpha does not have getpid() defined. To be able to build this module on
+ * Alpha, I have to copy getpid() from kernel/timer.c.
+ */
+static inline pid_t tomoyo_sys_getpid(void)
+{
+ return task_tgid_vnr(current);
+}
+
+/**
+ * tomoyo_pathcmp - strcmp() for "struct tomoyo_path_info" structure.
+ *
+ * @a: Pointer to "struct tomoyo_path_info".
+ * @b: Pointer to "struct tomoyo_path_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
+ const struct tomoyo_path_info *b)
+{
+ return a->hash != b->hash || strcmp(a->name, b->name);
+}
+
+/**
+ * tomoyo_put_name - Drop reference on "struct tomoyo_name".
+ *
+ * @name: Pointer to "struct tomoyo_path_info". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_name(const struct tomoyo_path_info *name)
+{
+ if (name) {
+ struct tomoyo_name *ptr =
+ container_of(name, typeof(*ptr), entry);
+ atomic_dec(&ptr->head.users);
+ }
+}
+
+/**
+ * tomoyo_put_condition - Drop reference on "struct tomoyo_condition".
+ *
+ * @cond: Pointer to "struct tomoyo_condition". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_condition(struct tomoyo_condition *cond)
+{
+ if (cond)
+ atomic_dec(&cond->head.users);
+}
+
+/**
+ * tomoyo_put_group - Drop reference on "struct tomoyo_group".
+ *
+ * @group: Pointer to "struct tomoyo_group". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_group(struct tomoyo_group *group)
+{
+ if (group)
+ atomic_dec(&group->head.users);
+}
+
+/**
+ * tomoyo_task - Get "struct tomoyo_task" for specified thread.
+ *
+ * @task - Pointer to "struct task_struct".
+ *
+ * Returns pointer to "struct tomoyo_task" for specified thread.
+ */
+static inline struct tomoyo_task *tomoyo_task(struct task_struct *task)
+{
+ return task->security + tomoyo_blob_sizes.lbs_task;
+}
+
+/**
+ * tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_name_union".
+ * @b: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_name_union
+(const struct tomoyo_name_union *a, const struct tomoyo_name_union *b)
+{
+ return a->filename == b->filename && a->group == b->group;
+}
+
+/**
+ * tomoyo_same_number_union - Check for duplicated "struct tomoyo_number_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_number_union".
+ * @b: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_number_union
+(const struct tomoyo_number_union *a, const struct tomoyo_number_union *b)
+{
+ return a->values[0] == b->values[0] && a->values[1] == b->values[1] &&
+ a->group == b->group && a->value_type[0] == b->value_type[0] &&
+ a->value_type[1] == b->value_type[1];
+}
+
+/**
+ * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_ipaddr_union".
+ * @b: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_ipaddr_union
+(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b)
+{
+ return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group &&
+ a->is_ipv6 == b->is_ipv6;
+}
+
+/**
+ * tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" for current thread.
+ */
+static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void)
+{
+ return tomoyo_domain()->ns;
+}
+
+#if defined(CONFIG_SLOB)
+
+/**
+ * tomoyo_round2 - Round up to power of 2 for calculating memory usage.
+ *
+ * @size: Size to be rounded up.
+ *
+ * Returns @size.
+ *
+ * Since SLOB does not round up, this function simply returns @size.
+ */
+static inline int tomoyo_round2(size_t size)
+{
+ return size;
+}
+
+#else
+
+/**
+ * tomoyo_round2 - Round up to power of 2 for calculating memory usage.
+ *
+ * @size: Size to be rounded up.
+ *
+ * Returns rounded size.
+ *
+ * Strictly speaking, SLAB may be able to allocate (e.g.) 96 bytes instead of
+ * (e.g.) 128 bytes.
+ */
+static inline int tomoyo_round2(size_t size)
+{
+#if PAGE_SIZE == 4096
+ size_t bsize = 32;
+#else
+ size_t bsize = 64;
+#endif
+ if (!size)
+ return 0;
+ while (size > bsize)
+ bsize <<= 1;
+ return bsize;
+}
+
+#endif
+
+/**
+ * list_for_each_cookie - iterate over a list with cookie.
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_cookie(pos, head) \
+ if (!pos) \
+ pos = srcu_dereference((head)->next, &tomoyo_ss); \
+ for ( ; pos != (head); pos = srcu_dereference(pos->next, &tomoyo_ss))
+
+#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
new file mode 100644
index 000000000..f8bcc083b
--- /dev/null
+++ b/security/tomoyo/condition.c
@@ -0,0 +1,1122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/condition.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* List of "struct tomoyo_condition". */
+LIST_HEAD(tomoyo_condition_list);
+
+/**
+ * tomoyo_argv - Check argv[] in "struct linux_binbrm".
+ *
+ * @index: Index number of @arg_ptr.
+ * @arg_ptr: Contents of argv[@index].
+ * @argc: Length of @argv.
+ * @argv: Pointer to "struct tomoyo_argv".
+ * @checked: Set to true if @argv[@index] was found.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_argv(const unsigned int index, const char *arg_ptr,
+ const int argc, const struct tomoyo_argv *argv,
+ u8 *checked)
+{
+ int i;
+ struct tomoyo_path_info arg;
+
+ arg.name = arg_ptr;
+ for (i = 0; i < argc; argv++, checked++, i++) {
+ bool result;
+
+ if (index != argv->index)
+ continue;
+ *checked = 1;
+ tomoyo_fill_path_info(&arg);
+ result = tomoyo_path_matches_pattern(&arg, argv->value);
+ if (argv->is_not)
+ result = !result;
+ if (!result)
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tomoyo_envp - Check envp[] in "struct linux_binbrm".
+ *
+ * @env_name: The name of environment variable.
+ * @env_value: The value of environment variable.
+ * @envc: Length of @envp.
+ * @envp: Pointer to "struct tomoyo_envp".
+ * @checked: Set to true if @envp[@env_name] was found.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_envp(const char *env_name, const char *env_value,
+ const int envc, const struct tomoyo_envp *envp,
+ u8 *checked)
+{
+ int i;
+ struct tomoyo_path_info name;
+ struct tomoyo_path_info value;
+
+ name.name = env_name;
+ tomoyo_fill_path_info(&name);
+ value.name = env_value;
+ tomoyo_fill_path_info(&value);
+ for (i = 0; i < envc; envp++, checked++, i++) {
+ bool result;
+
+ if (!tomoyo_path_matches_pattern(&name, envp->name))
+ continue;
+ *checked = 1;
+ if (envp->value) {
+ result = tomoyo_path_matches_pattern(&value,
+ envp->value);
+ if (envp->is_not)
+ result = !result;
+ } else {
+ result = true;
+ if (!envp->is_not)
+ result = !result;
+ }
+ if (!result)
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tomoyo_scan_bprm - Scan "struct linux_binprm".
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ * @argc: Length of @argc.
+ * @argv: Pointer to "struct tomoyo_argv".
+ * @envc: Length of @envp.
+ * @envp: Pointer to "struct tomoyo_envp".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
+ const u16 argc, const struct tomoyo_argv *argv,
+ const u16 envc, const struct tomoyo_envp *envp)
+{
+ struct linux_binprm *bprm = ee->bprm;
+ struct tomoyo_page_dump *dump = &ee->dump;
+ char *arg_ptr = ee->tmp;
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ bool result = true;
+ u8 local_checked[32];
+ u8 *checked;
+
+ if (argc + envc <= sizeof(local_checked)) {
+ checked = local_checked;
+ memset(local_checked, 0, sizeof(local_checked));
+ } else {
+ checked = kzalloc(argc + envc, GFP_NOFS);
+ if (!checked)
+ return false;
+ }
+ while (argv_count || envp_count) {
+ if (!tomoyo_dump_page(bprm, pos, dump)) {
+ result = false;
+ goto out;
+ }
+ pos += PAGE_SIZE - offset;
+ while (offset < PAGE_SIZE) {
+ /* Read. */
+ const char *kaddr = dump->data;
+ const unsigned char c = kaddr[offset++];
+
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++] =
+ ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ /* Check. */
+ if (argv_count) {
+ if (!tomoyo_argv(bprm->argc - argv_count,
+ arg_ptr, argc, argv,
+ checked)) {
+ result = false;
+ break;
+ }
+ argv_count--;
+ } else if (envp_count) {
+ char *cp = strchr(arg_ptr, '=');
+
+ if (cp) {
+ *cp = '\0';
+ if (!tomoyo_envp(arg_ptr, cp + 1,
+ envc, envp,
+ checked + argc)) {
+ result = false;
+ break;
+ }
+ }
+ envp_count--;
+ } else {
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
+ if (!result)
+ break;
+ }
+out:
+ if (result) {
+ int i;
+
+ /* Check not-yet-checked entries. */
+ for (i = 0; i < argc; i++) {
+ if (checked[i])
+ continue;
+ /*
+ * Return true only if all unchecked indexes in
+ * bprm->argv[] are not matched.
+ */
+ if (argv[i].is_not)
+ continue;
+ result = false;
+ break;
+ }
+ for (i = 0; i < envc; envp++, i++) {
+ if (checked[argc + i])
+ continue;
+ /*
+ * Return true only if all unchecked environ variables
+ * in bprm->envp[] are either undefined or not matched.
+ */
+ if ((!envp->value && !envp->is_not) ||
+ (envp->value && envp->is_not))
+ continue;
+ result = false;
+ break;
+ }
+ }
+ if (checked != local_checked)
+ kfree(checked);
+ return result;
+}
+
+/**
+ * tomoyo_scan_exec_realpath - Check "exec.realpath" parameter of "struct tomoyo_condition".
+ *
+ * @file: Pointer to "struct file".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ * @match: True if "exec.realpath=", false if "exec.realpath!=".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_scan_exec_realpath(struct file *file,
+ const struct tomoyo_name_union *ptr,
+ const bool match)
+{
+ bool result;
+ struct tomoyo_path_info exe;
+
+ if (!file)
+ return false;
+ exe.name = tomoyo_realpath_from_path(&file->f_path);
+ if (!exe.name)
+ return false;
+ tomoyo_fill_path_info(&exe);
+ result = tomoyo_compare_name_union(&exe, ptr);
+ kfree(exe.name);
+ return result == match;
+}
+
+/**
+ * tomoyo_get_dqword - tomoyo_get_name() for a quoted string.
+ *
+ * @start: String to save.
+ *
+ * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ */
+static const struct tomoyo_path_info *tomoyo_get_dqword(char *start)
+{
+ char *cp = start + strlen(start) - 1;
+
+ if (cp == start || *start++ != '"' || *cp != '"')
+ return NULL;
+ *cp = '\0';
+ if (*start && !tomoyo_correct_word(start))
+ return NULL;
+ return tomoyo_get_name(start);
+}
+
+/**
+ * tomoyo_parse_name_union_quoted - Parse a quoted word.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_name_union_quoted(struct tomoyo_acl_param *param,
+ struct tomoyo_name_union *ptr)
+{
+ char *filename = param->data;
+
+ if (*filename == '@')
+ return tomoyo_parse_name_union(param, ptr);
+ ptr->filename = tomoyo_get_dqword(filename);
+ return ptr->filename != NULL;
+}
+
+/**
+ * tomoyo_parse_argv - Parse an argv[] condition part.
+ *
+ * @left: Lefthand value.
+ * @right: Righthand value.
+ * @argv: Pointer to "struct tomoyo_argv".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_argv(char *left, char *right,
+ struct tomoyo_argv *argv)
+{
+ if (tomoyo_parse_ulong(&argv->index, &left) !=
+ TOMOYO_VALUE_TYPE_DECIMAL || *left++ != ']' || *left)
+ return false;
+ argv->value = tomoyo_get_dqword(right);
+ return argv->value != NULL;
+}
+
+/**
+ * tomoyo_parse_envp - Parse an envp[] condition part.
+ *
+ * @left: Lefthand value.
+ * @right: Righthand value.
+ * @envp: Pointer to "struct tomoyo_envp".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_envp(char *left, char *right,
+ struct tomoyo_envp *envp)
+{
+ const struct tomoyo_path_info *name;
+ const struct tomoyo_path_info *value;
+ char *cp = left + strlen(left) - 1;
+
+ if (*cp-- != ']' || *cp != '"')
+ goto out;
+ *cp = '\0';
+ if (!tomoyo_correct_word(left))
+ goto out;
+ name = tomoyo_get_name(left);
+ if (!name)
+ goto out;
+ if (!strcmp(right, "NULL")) {
+ value = NULL;
+ } else {
+ value = tomoyo_get_dqword(right);
+ if (!value) {
+ tomoyo_put_name(name);
+ goto out;
+ }
+ }
+ envp->name = name;
+ envp->value = value;
+ return true;
+out:
+ return false;
+}
+
+/**
+ * tomoyo_same_condition - Check for duplicated "struct tomoyo_condition" entry.
+ *
+ * @a: Pointer to "struct tomoyo_condition".
+ * @b: Pointer to "struct tomoyo_condition".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
+ const struct tomoyo_condition *b)
+{
+ return a->size == b->size && a->condc == b->condc &&
+ a->numbers_count == b->numbers_count &&
+ a->names_count == b->names_count &&
+ a->argc == b->argc && a->envc == b->envc &&
+ a->grant_log == b->grant_log && a->transit == b->transit &&
+ !memcmp(a + 1, b + 1, a->size - sizeof(*a));
+}
+
+/**
+ * tomoyo_condition_type - Get condition type.
+ *
+ * @word: Keyword string.
+ *
+ * Returns one of values in "enum tomoyo_conditions_index" on success,
+ * TOMOYO_MAX_CONDITION_KEYWORD otherwise.
+ */
+static u8 tomoyo_condition_type(const char *word)
+{
+ u8 i;
+
+ for (i = 0; i < TOMOYO_MAX_CONDITION_KEYWORD; i++) {
+ if (!strcmp(word, tomoyo_condition_keyword[i]))
+ break;
+ }
+ return i;
+}
+
+/* Define this to enable debug mode. */
+/* #define DEBUG_CONDITION */
+
+#ifdef DEBUG_CONDITION
+#define dprintk printk
+#else
+#define dprintk(...) do { } while (0)
+#endif
+
+/**
+ * tomoyo_commit_condition - Commit "struct tomoyo_condition".
+ *
+ * @entry: Pointer to "struct tomoyo_condition".
+ *
+ * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise.
+ *
+ * This function merges duplicated entries. This function returns NULL if
+ * @entry is not duplicated but memory quota for policy has exceeded.
+ */
+static struct tomoyo_condition *tomoyo_commit_condition
+(struct tomoyo_condition *entry)
+{
+ struct tomoyo_condition *ptr;
+ bool found = false;
+
+ if (mutex_lock_interruptible(&tomoyo_policy_lock)) {
+ dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
+ ptr = NULL;
+ found = true;
+ goto out;
+ }
+ list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
+ if (!tomoyo_same_condition(ptr, entry) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ /* Same entry found. Share this entry. */
+ atomic_inc(&ptr->head.users);
+ found = true;
+ break;
+ }
+ if (!found) {
+ if (tomoyo_memory_ok(entry)) {
+ atomic_set(&entry->head.users, 1);
+ list_add(&entry->head.list, &tomoyo_condition_list);
+ } else {
+ found = true;
+ ptr = NULL;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ if (found) {
+ tomoyo_del_condition(&entry->head.list);
+ kfree(entry);
+ entry = ptr;
+ }
+ return entry;
+}
+
+/**
+ * tomoyo_get_transit_preference - Parse domain transition preference for execve().
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @e: Pointer to "struct tomoyo_condition".
+ *
+ * Returns the condition string part.
+ */
+static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
+ struct tomoyo_condition *e)
+{
+ char * const pos = param->data;
+ bool flag;
+
+ if (*pos == '<') {
+ e->transit = tomoyo_get_domainname(param);
+ goto done;
+ }
+ {
+ char *cp = strchr(pos, ' ');
+
+ if (cp)
+ *cp = '\0';
+ flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
+ !strcmp(pos, "initialize") || !strcmp(pos, "reset") ||
+ !strcmp(pos, "child") || !strcmp(pos, "parent");
+ if (cp)
+ *cp = ' ';
+ }
+ if (!flag)
+ return pos;
+ e->transit = tomoyo_get_name(tomoyo_read_token(param));
+done:
+ if (e->transit)
+ return param->data;
+ /*
+ * Return a bad read-only condition string that will let
+ * tomoyo_get_condition() return NULL.
+ */
+ return "/";
+}
+
+/**
+ * tomoyo_get_condition - Parse condition part.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise.
+ */
+struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_condition *entry = NULL;
+ struct tomoyo_condition_element *condp = NULL;
+ struct tomoyo_number_union *numbers_p = NULL;
+ struct tomoyo_name_union *names_p = NULL;
+ struct tomoyo_argv *argv = NULL;
+ struct tomoyo_envp *envp = NULL;
+ struct tomoyo_condition e = { };
+ char * const start_of_string =
+ tomoyo_get_transit_preference(param, &e);
+ char * const end_of_string = start_of_string + strlen(start_of_string);
+ char *pos;
+
+rerun:
+ pos = start_of_string;
+ while (1) {
+ u8 left = -1;
+ u8 right = -1;
+ char *left_word = pos;
+ char *cp;
+ char *right_word;
+ bool is_not;
+
+ if (!*left_word)
+ break;
+ /*
+ * Since left-hand condition does not allow use of "path_group"
+ * or "number_group" and environment variable's names do not
+ * accept '=', it is guaranteed that the original line consists
+ * of one or more repetition of $left$operator$right blocks
+ * where "$left is free from '=' and ' '" and "$operator is
+ * either '=' or '!='" and "$right is free from ' '".
+ * Therefore, we can reconstruct the original line at the end
+ * of dry run even if we overwrite $operator with '\0'.
+ */
+ cp = strchr(pos, ' ');
+ if (cp) {
+ *cp = '\0'; /* Will restore later. */
+ pos = cp + 1;
+ } else {
+ pos = "";
+ }
+ right_word = strchr(left_word, '=');
+ if (!right_word || right_word == left_word)
+ goto out;
+ is_not = *(right_word - 1) == '!';
+ if (is_not)
+ *(right_word++ - 1) = '\0'; /* Will restore later. */
+ else if (*(right_word + 1) != '=')
+ *right_word++ = '\0'; /* Will restore later. */
+ else
+ goto out;
+ dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word,
+ is_not ? "!" : "", right_word);
+ if (!strcmp(left_word, "grant_log")) {
+ if (entry) {
+ if (is_not ||
+ entry->grant_log != TOMOYO_GRANTLOG_AUTO)
+ goto out;
+ else if (!strcmp(right_word, "yes"))
+ entry->grant_log = TOMOYO_GRANTLOG_YES;
+ else if (!strcmp(right_word, "no"))
+ entry->grant_log = TOMOYO_GRANTLOG_NO;
+ else
+ goto out;
+ }
+ continue;
+ }
+ if (!strncmp(left_word, "exec.argv[", 10)) {
+ if (!argv) {
+ e.argc++;
+ e.condc++;
+ } else {
+ e.argc--;
+ e.condc--;
+ left = TOMOYO_ARGV_ENTRY;
+ argv->is_not = is_not;
+ if (!tomoyo_parse_argv(left_word + 10,
+ right_word, argv++))
+ goto out;
+ }
+ goto store_value;
+ }
+ if (!strncmp(left_word, "exec.envp[\"", 11)) {
+ if (!envp) {
+ e.envc++;
+ e.condc++;
+ } else {
+ e.envc--;
+ e.condc--;
+ left = TOMOYO_ENVP_ENTRY;
+ envp->is_not = is_not;
+ if (!tomoyo_parse_envp(left_word + 11,
+ right_word, envp++))
+ goto out;
+ }
+ goto store_value;
+ }
+ left = tomoyo_condition_type(left_word);
+ dprintk(KERN_WARNING "%u: <%s> left=%u\n", __LINE__, left_word,
+ left);
+ if (left == TOMOYO_MAX_CONDITION_KEYWORD) {
+ if (!numbers_p) {
+ e.numbers_count++;
+ } else {
+ e.numbers_count--;
+ left = TOMOYO_NUMBER_UNION;
+ param->data = left_word;
+ if (*left_word == '@' ||
+ !tomoyo_parse_number_union(param,
+ numbers_p++))
+ goto out;
+ }
+ }
+ if (!condp)
+ e.condc++;
+ else
+ e.condc--;
+ if (left == TOMOYO_EXEC_REALPATH ||
+ left == TOMOYO_SYMLINK_TARGET) {
+ if (!names_p) {
+ e.names_count++;
+ } else {
+ e.names_count--;
+ right = TOMOYO_NAME_UNION;
+ param->data = right_word;
+ if (!tomoyo_parse_name_union_quoted(param,
+ names_p++))
+ goto out;
+ }
+ goto store_value;
+ }
+ right = tomoyo_condition_type(right_word);
+ if (right == TOMOYO_MAX_CONDITION_KEYWORD) {
+ if (!numbers_p) {
+ e.numbers_count++;
+ } else {
+ e.numbers_count--;
+ right = TOMOYO_NUMBER_UNION;
+ param->data = right_word;
+ if (!tomoyo_parse_number_union(param,
+ numbers_p++))
+ goto out;
+ }
+ }
+store_value:
+ if (!condp) {
+ dprintk(KERN_WARNING "%u: dry_run left=%u right=%u match=%u\n",
+ __LINE__, left, right, !is_not);
+ continue;
+ }
+ condp->left = left;
+ condp->right = right;
+ condp->equals = !is_not;
+ dprintk(KERN_WARNING "%u: left=%u right=%u match=%u\n",
+ __LINE__, condp->left, condp->right,
+ condp->equals);
+ condp++;
+ }
+ dprintk(KERN_INFO "%u: cond=%u numbers=%u names=%u ac=%u ec=%u\n",
+ __LINE__, e.condc, e.numbers_count, e.names_count, e.argc,
+ e.envc);
+ if (entry) {
+ BUG_ON(e.names_count | e.numbers_count | e.argc | e.envc |
+ e.condc);
+ return tomoyo_commit_condition(entry);
+ }
+ e.size = sizeof(*entry)
+ + e.condc * sizeof(struct tomoyo_condition_element)
+ + e.numbers_count * sizeof(struct tomoyo_number_union)
+ + e.names_count * sizeof(struct tomoyo_name_union)
+ + e.argc * sizeof(struct tomoyo_argv)
+ + e.envc * sizeof(struct tomoyo_envp);
+ entry = kzalloc(e.size, GFP_NOFS);
+ if (!entry)
+ goto out2;
+ *entry = e;
+ e.transit = NULL;
+ condp = (struct tomoyo_condition_element *) (entry + 1);
+ numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
+ names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
+ argv = (struct tomoyo_argv *) (names_p + e.names_count);
+ envp = (struct tomoyo_envp *) (argv + e.argc);
+ {
+ bool flag = false;
+
+ for (pos = start_of_string; pos < end_of_string; pos++) {
+ if (*pos)
+ continue;
+ if (flag) /* Restore " ". */
+ *pos = ' ';
+ else if (*(pos + 1) == '=') /* Restore "!=". */
+ *pos = '!';
+ else /* Restore "=". */
+ *pos = '=';
+ flag = !flag;
+ }
+ }
+ goto rerun;
+out:
+ dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
+ if (entry) {
+ tomoyo_del_condition(&entry->head.list);
+ kfree(entry);
+ }
+out2:
+ tomoyo_put_name(e.transit);
+ return NULL;
+}
+
+/**
+ * tomoyo_get_attributes - Revalidate "struct inode".
+ *
+ * @obj: Pointer to "struct tomoyo_obj_info".
+ *
+ * Returns nothing.
+ */
+void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
+{
+ u8 i;
+ struct dentry *dentry = NULL;
+
+ for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
+ struct inode *inode;
+
+ switch (i) {
+ case TOMOYO_PATH1:
+ dentry = obj->path1.dentry;
+ if (!dentry)
+ continue;
+ break;
+ case TOMOYO_PATH2:
+ dentry = obj->path2.dentry;
+ if (!dentry)
+ continue;
+ break;
+ default:
+ if (!dentry)
+ continue;
+ dentry = dget_parent(dentry);
+ break;
+ }
+ inode = d_backing_inode(dentry);
+ if (inode) {
+ struct tomoyo_mini_stat *stat = &obj->stat[i];
+
+ stat->uid = inode->i_uid;
+ stat->gid = inode->i_gid;
+ stat->ino = inode->i_ino;
+ stat->mode = inode->i_mode;
+ stat->dev = inode->i_sb->s_dev;
+ stat->rdev = inode->i_rdev;
+ obj->stat_valid[i] = true;
+ }
+ if (i & 1) /* TOMOYO_PATH1_PARENT or TOMOYO_PATH2_PARENT */
+ dput(dentry);
+ }
+}
+
+/**
+ * tomoyo_condition - Check condition part.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @cond: Pointer to "struct tomoyo_condition". Maybe NULL.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_condition(struct tomoyo_request_info *r,
+ const struct tomoyo_condition *cond)
+{
+ u32 i;
+ unsigned long min_v[2] = { 0, 0 };
+ unsigned long max_v[2] = { 0, 0 };
+ const struct tomoyo_condition_element *condp;
+ const struct tomoyo_number_union *numbers_p;
+ const struct tomoyo_name_union *names_p;
+ const struct tomoyo_argv *argv;
+ const struct tomoyo_envp *envp;
+ struct tomoyo_obj_info *obj;
+ u16 condc;
+ u16 argc;
+ u16 envc;
+ struct linux_binprm *bprm = NULL;
+
+ if (!cond)
+ return true;
+ condc = cond->condc;
+ argc = cond->argc;
+ envc = cond->envc;
+ obj = r->obj;
+ if (r->ee)
+ bprm = r->ee->bprm;
+ if (!bprm && (argc || envc))
+ return false;
+ condp = (struct tomoyo_condition_element *) (cond + 1);
+ numbers_p = (const struct tomoyo_number_union *) (condp + condc);
+ names_p = (const struct tomoyo_name_union *)
+ (numbers_p + cond->numbers_count);
+ argv = (const struct tomoyo_argv *) (names_p + cond->names_count);
+ envp = (const struct tomoyo_envp *) (argv + argc);
+ for (i = 0; i < condc; i++) {
+ const bool match = condp->equals;
+ const u8 left = condp->left;
+ const u8 right = condp->right;
+ bool is_bitop[2] = { false, false };
+ u8 j;
+
+ condp++;
+ /* Check argv[] and envp[] later. */
+ if (left == TOMOYO_ARGV_ENTRY || left == TOMOYO_ENVP_ENTRY)
+ continue;
+ /* Check string expressions. */
+ if (right == TOMOYO_NAME_UNION) {
+ const struct tomoyo_name_union *ptr = names_p++;
+ struct tomoyo_path_info *symlink;
+ struct tomoyo_execve *ee;
+ struct file *file;
+
+ switch (left) {
+ case TOMOYO_SYMLINK_TARGET:
+ symlink = obj ? obj->symlink_target : NULL;
+ if (!symlink ||
+ !tomoyo_compare_name_union(symlink, ptr)
+ == match)
+ goto out;
+ break;
+ case TOMOYO_EXEC_REALPATH:
+ ee = r->ee;
+ file = ee ? ee->bprm->file : NULL;
+ if (!tomoyo_scan_exec_realpath(file, ptr,
+ match))
+ goto out;
+ break;
+ }
+ continue;
+ }
+ /* Check numeric or bit-op expressions. */
+ for (j = 0; j < 2; j++) {
+ const u8 index = j ? right : left;
+ unsigned long value = 0;
+
+ switch (index) {
+ case TOMOYO_TASK_UID:
+ value = from_kuid(&init_user_ns, current_uid());
+ break;
+ case TOMOYO_TASK_EUID:
+ value = from_kuid(&init_user_ns, current_euid());
+ break;
+ case TOMOYO_TASK_SUID:
+ value = from_kuid(&init_user_ns, current_suid());
+ break;
+ case TOMOYO_TASK_FSUID:
+ value = from_kuid(&init_user_ns, current_fsuid());
+ break;
+ case TOMOYO_TASK_GID:
+ value = from_kgid(&init_user_ns, current_gid());
+ break;
+ case TOMOYO_TASK_EGID:
+ value = from_kgid(&init_user_ns, current_egid());
+ break;
+ case TOMOYO_TASK_SGID:
+ value = from_kgid(&init_user_ns, current_sgid());
+ break;
+ case TOMOYO_TASK_FSGID:
+ value = from_kgid(&init_user_ns, current_fsgid());
+ break;
+ case TOMOYO_TASK_PID:
+ value = tomoyo_sys_getpid();
+ break;
+ case TOMOYO_TASK_PPID:
+ value = tomoyo_sys_getppid();
+ break;
+ case TOMOYO_TYPE_IS_SOCKET:
+ value = S_IFSOCK;
+ break;
+ case TOMOYO_TYPE_IS_SYMLINK:
+ value = S_IFLNK;
+ break;
+ case TOMOYO_TYPE_IS_FILE:
+ value = S_IFREG;
+ break;
+ case TOMOYO_TYPE_IS_BLOCK_DEV:
+ value = S_IFBLK;
+ break;
+ case TOMOYO_TYPE_IS_DIRECTORY:
+ value = S_IFDIR;
+ break;
+ case TOMOYO_TYPE_IS_CHAR_DEV:
+ value = S_IFCHR;
+ break;
+ case TOMOYO_TYPE_IS_FIFO:
+ value = S_IFIFO;
+ break;
+ case TOMOYO_MODE_SETUID:
+ value = S_ISUID;
+ break;
+ case TOMOYO_MODE_SETGID:
+ value = S_ISGID;
+ break;
+ case TOMOYO_MODE_STICKY:
+ value = S_ISVTX;
+ break;
+ case TOMOYO_MODE_OWNER_READ:
+ value = 0400;
+ break;
+ case TOMOYO_MODE_OWNER_WRITE:
+ value = 0200;
+ break;
+ case TOMOYO_MODE_OWNER_EXECUTE:
+ value = 0100;
+ break;
+ case TOMOYO_MODE_GROUP_READ:
+ value = 0040;
+ break;
+ case TOMOYO_MODE_GROUP_WRITE:
+ value = 0020;
+ break;
+ case TOMOYO_MODE_GROUP_EXECUTE:
+ value = 0010;
+ break;
+ case TOMOYO_MODE_OTHERS_READ:
+ value = 0004;
+ break;
+ case TOMOYO_MODE_OTHERS_WRITE:
+ value = 0002;
+ break;
+ case TOMOYO_MODE_OTHERS_EXECUTE:
+ value = 0001;
+ break;
+ case TOMOYO_EXEC_ARGC:
+ if (!bprm)
+ goto out;
+ value = bprm->argc;
+ break;
+ case TOMOYO_EXEC_ENVC:
+ if (!bprm)
+ goto out;
+ value = bprm->envc;
+ break;
+ case TOMOYO_NUMBER_UNION:
+ /* Fetch values later. */
+ break;
+ default:
+ if (!obj)
+ goto out;
+ if (!obj->validate_done) {
+ tomoyo_get_attributes(obj);
+ obj->validate_done = true;
+ }
+ {
+ u8 stat_index;
+ struct tomoyo_mini_stat *stat;
+
+ switch (index) {
+ case TOMOYO_PATH1_UID:
+ case TOMOYO_PATH1_GID:
+ case TOMOYO_PATH1_INO:
+ case TOMOYO_PATH1_MAJOR:
+ case TOMOYO_PATH1_MINOR:
+ case TOMOYO_PATH1_TYPE:
+ case TOMOYO_PATH1_DEV_MAJOR:
+ case TOMOYO_PATH1_DEV_MINOR:
+ case TOMOYO_PATH1_PERM:
+ stat_index = TOMOYO_PATH1;
+ break;
+ case TOMOYO_PATH2_UID:
+ case TOMOYO_PATH2_GID:
+ case TOMOYO_PATH2_INO:
+ case TOMOYO_PATH2_MAJOR:
+ case TOMOYO_PATH2_MINOR:
+ case TOMOYO_PATH2_TYPE:
+ case TOMOYO_PATH2_DEV_MAJOR:
+ case TOMOYO_PATH2_DEV_MINOR:
+ case TOMOYO_PATH2_PERM:
+ stat_index = TOMOYO_PATH2;
+ break;
+ case TOMOYO_PATH1_PARENT_UID:
+ case TOMOYO_PATH1_PARENT_GID:
+ case TOMOYO_PATH1_PARENT_INO:
+ case TOMOYO_PATH1_PARENT_PERM:
+ stat_index =
+ TOMOYO_PATH1_PARENT;
+ break;
+ case TOMOYO_PATH2_PARENT_UID:
+ case TOMOYO_PATH2_PARENT_GID:
+ case TOMOYO_PATH2_PARENT_INO:
+ case TOMOYO_PATH2_PARENT_PERM:
+ stat_index =
+ TOMOYO_PATH2_PARENT;
+ break;
+ default:
+ goto out;
+ }
+ if (!obj->stat_valid[stat_index])
+ goto out;
+ stat = &obj->stat[stat_index];
+ switch (index) {
+ case TOMOYO_PATH1_UID:
+ case TOMOYO_PATH2_UID:
+ case TOMOYO_PATH1_PARENT_UID:
+ case TOMOYO_PATH2_PARENT_UID:
+ value = from_kuid(&init_user_ns, stat->uid);
+ break;
+ case TOMOYO_PATH1_GID:
+ case TOMOYO_PATH2_GID:
+ case TOMOYO_PATH1_PARENT_GID:
+ case TOMOYO_PATH2_PARENT_GID:
+ value = from_kgid(&init_user_ns, stat->gid);
+ break;
+ case TOMOYO_PATH1_INO:
+ case TOMOYO_PATH2_INO:
+ case TOMOYO_PATH1_PARENT_INO:
+ case TOMOYO_PATH2_PARENT_INO:
+ value = stat->ino;
+ break;
+ case TOMOYO_PATH1_MAJOR:
+ case TOMOYO_PATH2_MAJOR:
+ value = MAJOR(stat->dev);
+ break;
+ case TOMOYO_PATH1_MINOR:
+ case TOMOYO_PATH2_MINOR:
+ value = MINOR(stat->dev);
+ break;
+ case TOMOYO_PATH1_TYPE:
+ case TOMOYO_PATH2_TYPE:
+ value = stat->mode & S_IFMT;
+ break;
+ case TOMOYO_PATH1_DEV_MAJOR:
+ case TOMOYO_PATH2_DEV_MAJOR:
+ value = MAJOR(stat->rdev);
+ break;
+ case TOMOYO_PATH1_DEV_MINOR:
+ case TOMOYO_PATH2_DEV_MINOR:
+ value = MINOR(stat->rdev);
+ break;
+ case TOMOYO_PATH1_PERM:
+ case TOMOYO_PATH2_PERM:
+ case TOMOYO_PATH1_PARENT_PERM:
+ case TOMOYO_PATH2_PARENT_PERM:
+ value = stat->mode & S_IALLUGO;
+ break;
+ }
+ }
+ break;
+ }
+ max_v[j] = value;
+ min_v[j] = value;
+ switch (index) {
+ case TOMOYO_MODE_SETUID:
+ case TOMOYO_MODE_SETGID:
+ case TOMOYO_MODE_STICKY:
+ case TOMOYO_MODE_OWNER_READ:
+ case TOMOYO_MODE_OWNER_WRITE:
+ case TOMOYO_MODE_OWNER_EXECUTE:
+ case TOMOYO_MODE_GROUP_READ:
+ case TOMOYO_MODE_GROUP_WRITE:
+ case TOMOYO_MODE_GROUP_EXECUTE:
+ case TOMOYO_MODE_OTHERS_READ:
+ case TOMOYO_MODE_OTHERS_WRITE:
+ case TOMOYO_MODE_OTHERS_EXECUTE:
+ is_bitop[j] = true;
+ }
+ }
+ if (left == TOMOYO_NUMBER_UNION) {
+ /* Fetch values now. */
+ const struct tomoyo_number_union *ptr = numbers_p++;
+
+ min_v[0] = ptr->values[0];
+ max_v[0] = ptr->values[1];
+ }
+ if (right == TOMOYO_NUMBER_UNION) {
+ /* Fetch values now. */
+ const struct tomoyo_number_union *ptr = numbers_p++;
+
+ if (ptr->group) {
+ if (tomoyo_number_matches_group(min_v[0],
+ max_v[0],
+ ptr->group)
+ == match)
+ continue;
+ } else {
+ if ((min_v[0] <= ptr->values[1] &&
+ max_v[0] >= ptr->values[0]) == match)
+ continue;
+ }
+ goto out;
+ }
+ /*
+ * Bit operation is valid only when counterpart value
+ * represents permission.
+ */
+ if (is_bitop[0] && is_bitop[1]) {
+ goto out;
+ } else if (is_bitop[0]) {
+ switch (right) {
+ case TOMOYO_PATH1_PERM:
+ case TOMOYO_PATH1_PARENT_PERM:
+ case TOMOYO_PATH2_PERM:
+ case TOMOYO_PATH2_PARENT_PERM:
+ if (!(max_v[0] & max_v[1]) == !match)
+ continue;
+ }
+ goto out;
+ } else if (is_bitop[1]) {
+ switch (left) {
+ case TOMOYO_PATH1_PERM:
+ case TOMOYO_PATH1_PARENT_PERM:
+ case TOMOYO_PATH2_PERM:
+ case TOMOYO_PATH2_PARENT_PERM:
+ if (!(max_v[0] & max_v[1]) == !match)
+ continue;
+ }
+ goto out;
+ }
+ /* Normal value range comparison. */
+ if ((min_v[0] <= max_v[1] && max_v[0] >= min_v[1]) == match)
+ continue;
+out:
+ return false;
+ }
+ /* Check argv[] and envp[] now. */
+ if (r->ee && (argc || envc))
+ return tomoyo_scan_bprm(r->ee, argc, argv, envc, envp);
+ return true;
+}
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
new file mode 100644
index 000000000..31af29f66
--- /dev/null
+++ b/security/tomoyo/domain.c
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/domain.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+#include <linux/binfmts.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+
+/* Variables definitions.*/
+
+/* The initial domain. */
+struct tomoyo_domain_info tomoyo_kernel_domain;
+
+/**
+ * tomoyo_update_policy - Update an entry for exception policy.
+ *
+ * @new_entry: Pointer to "struct tomoyo_acl_info".
+ * @size: Size of @new_entry in bytes.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @check_duplicate: Callback function to find duplicated entry.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate)(const struct tomoyo_acl_head
+ *,
+ const struct tomoyo_acl_head
+ *))
+{
+ int error = param->is_delete ? -ENOENT : -ENOMEM;
+ struct tomoyo_acl_head *entry;
+ struct list_head *list = param->list;
+
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -ENOMEM;
+ list_for_each_entry_rcu(entry, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!check_duplicate(entry, new_entry))
+ continue;
+ entry->is_deleted = param->is_delete;
+ error = 0;
+ break;
+ }
+ if (error && !param->is_delete) {
+ entry = tomoyo_commit_ok(new_entry, size);
+ if (entry) {
+ list_add_tail_rcu(&entry->list, list);
+ error = 0;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ return error;
+}
+
+/**
+ * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ return a->type == b->type && a->cond == b->cond;
+}
+
+/**
+ * tomoyo_update_domain - Update an entry for domain policy.
+ *
+ * @new_entry: Pointer to "struct tomoyo_acl_info".
+ * @size: Size of @new_entry in bytes.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @check_duplicate: Callback function to find duplicated entry.
+ * @merge_duplicate: Callback function to merge duplicated entry.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate)(const struct tomoyo_acl_info
+ *,
+ const struct tomoyo_acl_info
+ *),
+ bool (*merge_duplicate)(struct tomoyo_acl_info *,
+ struct tomoyo_acl_info *,
+ const bool))
+{
+ const bool is_delete = param->is_delete;
+ int error = is_delete ? -ENOENT : -ENOMEM;
+ struct tomoyo_acl_info *entry;
+ struct list_head * const list = param->list;
+
+ if (param->data[0]) {
+ new_entry->cond = tomoyo_get_condition(param);
+ if (!new_entry->cond)
+ return -EINVAL;
+ /*
+ * Domain transition preference is allowed for only
+ * "file execute" entries.
+ */
+ if (new_entry->cond->transit &&
+ !(new_entry->type == TOMOYO_TYPE_PATH_ACL &&
+ container_of(new_entry, struct tomoyo_path_acl, head)
+ ->perm == 1 << TOMOYO_TYPE_EXECUTE))
+ goto out;
+ }
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ list_for_each_entry_rcu(entry, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!tomoyo_same_acl_head(entry, new_entry) ||
+ !check_duplicate(entry, new_entry))
+ continue;
+ if (merge_duplicate)
+ entry->is_deleted = merge_duplicate(entry, new_entry,
+ is_delete);
+ else
+ entry->is_deleted = is_delete;
+ error = 0;
+ break;
+ }
+ if (error && !is_delete) {
+ entry = tomoyo_commit_ok(new_entry, size);
+ if (entry) {
+ list_add_tail_rcu(&entry->list, list);
+ error = 0;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ tomoyo_put_condition(new_entry->cond);
+ return error;
+}
+
+/**
+ * tomoyo_check_acl - Do permission check.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @check_entry: Callback function to check type specific parameters.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+void tomoyo_check_acl(struct tomoyo_request_info *r,
+ bool (*check_entry)(struct tomoyo_request_info *,
+ const struct tomoyo_acl_info *))
+{
+ const struct tomoyo_domain_info *domain = r->domain;
+ struct tomoyo_acl_info *ptr;
+ const struct list_head *list = &domain->acl_info_list;
+ u16 i = 0;
+
+retry:
+ list_for_each_entry_rcu(ptr, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->is_deleted || ptr->type != r->param_type)
+ continue;
+ if (!check_entry(r, ptr))
+ continue;
+ if (!tomoyo_condition(r, ptr->cond))
+ continue;
+ r->matched_acl = ptr;
+ r->granted = true;
+ return;
+ }
+ for (; i < TOMOYO_MAX_ACL_GROUPS; i++) {
+ if (!test_bit(i, domain->group))
+ continue;
+ list = &domain->ns->acl_group[i++];
+ goto retry;
+ }
+ r->granted = false;
+}
+
+/* The list for "struct tomoyo_domain_info". */
+LIST_HEAD(tomoyo_domain_list);
+
+/**
+ * tomoyo_last_word - Get last component of a domainname.
+ *
+ * @name: Domainname to check.
+ *
+ * Returns the last word of @domainname.
+ */
+static const char *tomoyo_last_word(const char *name)
+{
+ const char *cp = strrchr(name, ' ');
+
+ if (cp)
+ return cp + 1;
+ return name;
+}
+
+/**
+ * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_transition_control *p1 = container_of(a,
+ typeof(*p1),
+ head);
+ const struct tomoyo_transition_control *p2 = container_of(b,
+ typeof(*p2),
+ head);
+
+ return p1->type == p2->type && p1->is_last_name == p2->is_last_name
+ && p1->domainname == p2->domainname
+ && p1->program == p2->program;
+}
+
+/**
+ * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @type: Type of this entry.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
+ const u8 type)
+{
+ struct tomoyo_transition_control e = { .type = type };
+ int error = param->is_delete ? -ENOENT : -ENOMEM;
+ char *program = param->data;
+ char *domainname = strstr(program, " from ");
+
+ if (domainname) {
+ *domainname = '\0';
+ domainname += 6;
+ } else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP ||
+ type == TOMOYO_TRANSITION_CONTROL_KEEP) {
+ domainname = program;
+ program = NULL;
+ }
+ if (program && strcmp(program, "any")) {
+ if (!tomoyo_correct_path(program))
+ return -EINVAL;
+ e.program = tomoyo_get_name(program);
+ if (!e.program)
+ goto out;
+ }
+ if (domainname && strcmp(domainname, "any")) {
+ if (!tomoyo_correct_domain(domainname)) {
+ if (!tomoyo_correct_path(domainname))
+ goto out;
+ e.is_last_name = true;
+ }
+ e.domainname = tomoyo_get_name(domainname);
+ if (!e.domainname)
+ goto out;
+ }
+ param->list = &param->ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_transition_control);
+out:
+ tomoyo_put_name(e.domainname);
+ tomoyo_put_name(e.program);
+ return error;
+}
+
+/**
+ * tomoyo_scan_transition - Try to find specific domain transition type.
+ *
+ * @list: Pointer to "struct list_head".
+ * @domainname: The name of current domain.
+ * @program: The name of requested program.
+ * @last_name: The last component of @domainname.
+ * @type: One of values in "enum tomoyo_transition_type".
+ *
+ * Returns true if found one, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static inline bool tomoyo_scan_transition
+(const struct list_head *list, const struct tomoyo_path_info *domainname,
+ const struct tomoyo_path_info *program, const char *last_name,
+ const enum tomoyo_transition_type type)
+{
+ const struct tomoyo_transition_control *ptr;
+
+ list_for_each_entry_rcu(ptr, list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->head.is_deleted || ptr->type != type)
+ continue;
+ if (ptr->domainname) {
+ if (!ptr->is_last_name) {
+ if (ptr->domainname != domainname)
+ continue;
+ } else {
+ /*
+ * Use direct strcmp() since this is
+ * unlikely used.
+ */
+ if (strcmp(ptr->domainname->name, last_name))
+ continue;
+ }
+ }
+ if (ptr->program && tomoyo_pathcmp(ptr->program, program))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_transition_type - Get domain transition type.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @domainname: The name of current domain.
+ * @program: The name of requested program.
+ *
+ * Returns TOMOYO_TRANSITION_CONTROL_TRANSIT if executing @program causes
+ * domain transition across namespaces, TOMOYO_TRANSITION_CONTROL_INITIALIZE if
+ * executing @program reinitializes domain transition within that namespace,
+ * TOMOYO_TRANSITION_CONTROL_KEEP if executing @program stays at @domainname ,
+ * others otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static enum tomoyo_transition_type tomoyo_transition_type
+(const struct tomoyo_policy_namespace *ns,
+ const struct tomoyo_path_info *domainname,
+ const struct tomoyo_path_info *program)
+{
+ const char *last_name = tomoyo_last_word(domainname->name);
+ enum tomoyo_transition_type type = TOMOYO_TRANSITION_CONTROL_NO_RESET;
+
+ while (type < TOMOYO_MAX_TRANSITION_TYPE) {
+ const struct list_head * const list =
+ &ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+
+ if (!tomoyo_scan_transition(list, domainname, program,
+ last_name, type)) {
+ type++;
+ continue;
+ }
+ if (type != TOMOYO_TRANSITION_CONTROL_NO_RESET &&
+ type != TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE)
+ break;
+ /*
+ * Do not check for reset_domain if no_reset_domain matched.
+ * Do not check for initialize_domain if no_initialize_domain
+ * matched.
+ */
+ type++;
+ type++;
+ }
+ return type;
+}
+
+/**
+ * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_aggregator(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return p1->original_name == p2->original_name &&
+ p1->aggregated_name == p2->aggregated_name;
+}
+
+/**
+ * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_aggregator(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_aggregator e = { };
+ int error = param->is_delete ? -ENOENT : -ENOMEM;
+ const char *original_name = tomoyo_read_token(param);
+ const char *aggregated_name = tomoyo_read_token(param);
+
+ if (!tomoyo_correct_word(original_name) ||
+ !tomoyo_correct_path(aggregated_name))
+ return -EINVAL;
+ e.original_name = tomoyo_get_name(original_name);
+ e.aggregated_name = tomoyo_get_name(aggregated_name);
+ if (!e.original_name || !e.aggregated_name ||
+ e.aggregated_name->is_patterned) /* No patterns allowed. */
+ goto out;
+ param->list = &param->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_aggregator);
+out:
+ tomoyo_put_name(e.original_name);
+ tomoyo_put_name(e.aggregated_name);
+ return error;
+}
+
+/**
+ * tomoyo_find_namespace - Find specified namespace.
+ *
+ * @name: Name of namespace to find.
+ * @len: Length of @name.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" if found,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static struct tomoyo_policy_namespace *tomoyo_find_namespace
+(const char *name, const unsigned int len)
+{
+ struct tomoyo_policy_namespace *ns;
+
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+ if (strncmp(name, ns->name, len) ||
+ (name[len] && name[len] != ' '))
+ continue;
+ return ns;
+ }
+ return NULL;
+}
+
+/**
+ * tomoyo_assign_namespace - Create a new namespace.
+ *
+ * @domainname: Name of namespace to create.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" on success,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname)
+{
+ struct tomoyo_policy_namespace *ptr;
+ struct tomoyo_policy_namespace *entry;
+ const char *cp = domainname;
+ unsigned int len = 0;
+
+ while (*cp && *cp++ != ' ')
+ len++;
+ ptr = tomoyo_find_namespace(domainname, len);
+ if (ptr)
+ return ptr;
+ if (len >= TOMOYO_EXEC_TMPSIZE - 10 || !tomoyo_domain_def(domainname))
+ return NULL;
+ entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS | __GFP_NOWARN);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ ptr = tomoyo_find_namespace(domainname, len);
+ if (!ptr && tomoyo_memory_ok(entry)) {
+ char *name = (char *) (entry + 1);
+
+ ptr = entry;
+ memmove(name, domainname, len);
+ name[len] = '\0';
+ entry->name = name;
+ tomoyo_init_policy_namespace(entry);
+ entry = NULL;
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ kfree(entry);
+ return ptr;
+}
+
+/**
+ * tomoyo_namespace_jump - Check for namespace jump.
+ *
+ * @domainname: Name of domain.
+ *
+ * Returns true if namespace differs, false otherwise.
+ */
+static bool tomoyo_namespace_jump(const char *domainname)
+{
+ const char *namespace = tomoyo_current_namespace()->name;
+ const int len = strlen(namespace);
+
+ return strncmp(domainname, namespace, len) ||
+ (domainname[len] && domainname[len] != ' ');
+}
+
+/**
+ * tomoyo_assign_domain - Create a domain or a namespace.
+ *
+ * @domainname: The name of domain.
+ * @transit: True if transit to domain found or created.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
+ const bool transit)
+{
+ struct tomoyo_domain_info e = { };
+ struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname);
+ bool created = false;
+
+ if (entry) {
+ if (transit) {
+ /*
+ * Since namespace is created at runtime, profiles may
+ * not be created by the moment the process transits to
+ * that domain. Do not perform domain transition if
+ * profile for that domain is not yet created.
+ */
+ if (tomoyo_policy_loaded &&
+ !entry->ns->profile_ptr[entry->profile])
+ return NULL;
+ }
+ return entry;
+ }
+ /* Requested domain does not exist. */
+ /* Don't create requested domain if domainname is invalid. */
+ if (strlen(domainname) >= TOMOYO_EXEC_TMPSIZE - 10 ||
+ !tomoyo_correct_domain(domainname))
+ return NULL;
+ /*
+ * Since definition of profiles and acl_groups may differ across
+ * namespaces, do not inherit "use_profile" and "use_group" settings
+ * by automatically creating requested domain upon domain transition.
+ */
+ if (transit && tomoyo_namespace_jump(domainname))
+ return NULL;
+ e.ns = tomoyo_assign_namespace(domainname);
+ if (!e.ns)
+ return NULL;
+ /*
+ * "use_profile" and "use_group" settings for automatically created
+ * domains are inherited from current domain. These are 0 for manually
+ * created domains.
+ */
+ if (transit) {
+ const struct tomoyo_domain_info *domain = tomoyo_domain();
+
+ e.profile = domain->profile;
+ memcpy(e.group, domain->group, sizeof(e.group));
+ }
+ e.domainname = tomoyo_get_name(domainname);
+ if (!e.domainname)
+ return NULL;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ entry = tomoyo_find_domain(domainname);
+ if (!entry) {
+ entry = tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ INIT_LIST_HEAD(&entry->acl_info_list);
+ list_add_tail_rcu(&entry->list, &tomoyo_domain_list);
+ created = true;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ tomoyo_put_name(e.domainname);
+ if (entry && transit) {
+ if (created) {
+ struct tomoyo_request_info r;
+ int i;
+
+ tomoyo_init_request_info(&r, entry,
+ TOMOYO_MAC_FILE_EXECUTE);
+ r.granted = false;
+ tomoyo_write_log(&r, "use_profile %u\n",
+ entry->profile);
+ for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
+ if (test_bit(i, entry->group))
+ tomoyo_write_log(&r, "use_group %u\n",
+ i);
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+ }
+ }
+ return entry;
+}
+
+/**
+ * tomoyo_environ - Check permission for environment variable names.
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_environ(struct tomoyo_execve *ee)
+{
+ struct tomoyo_request_info *r = &ee->r;
+ struct linux_binprm *bprm = ee->bprm;
+ /* env_page.data is allocated by tomoyo_dump_page(). */
+ struct tomoyo_page_dump env_page = { };
+ char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ int error = -ENOMEM;
+
+ ee->r.type = TOMOYO_MAC_ENVIRON;
+ ee->r.profile = r->domain->profile;
+ ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile,
+ TOMOYO_MAC_ENVIRON);
+ if (!r->mode || !envp_count)
+ return 0;
+ arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!arg_ptr)
+ goto out;
+ while (error == -ENOMEM) {
+ if (!tomoyo_dump_page(bprm, pos, &env_page))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (argv_count && offset < PAGE_SIZE) {
+ if (!env_page.data[offset++])
+ argv_count--;
+ }
+ if (argv_count) {
+ offset = 0;
+ continue;
+ }
+ while (offset < PAGE_SIZE) {
+ const unsigned char c = env_page.data[offset++];
+
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '=') {
+ arg_ptr[arg_len++] = '\0';
+ } else if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++]
+ = ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ if (tomoyo_env_perm(r, arg_ptr)) {
+ error = -EPERM;
+ break;
+ }
+ if (!--envp_count) {
+ error = 0;
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
+ }
+out:
+ if (r->mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ kfree(env_page.data);
+ kfree(arg_ptr);
+ return error;
+}
+
+/**
+ * tomoyo_find_next_domain - Find a domain.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_find_next_domain(struct linux_binprm *bprm)
+{
+ struct tomoyo_domain_info *old_domain = tomoyo_domain();
+ struct tomoyo_domain_info *domain = NULL;
+ const char *original_name = bprm->filename;
+ int retval = -ENOMEM;
+ bool reject_on_transition_failure = false;
+ const struct tomoyo_path_info *candidate;
+ struct tomoyo_path_info exename;
+ struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS);
+
+ if (!ee)
+ return -ENOMEM;
+ ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!ee->tmp) {
+ kfree(ee);
+ return -ENOMEM;
+ }
+ /* ee->dump->data is allocated by tomoyo_dump_page(). */
+ tomoyo_init_request_info(&ee->r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ ee->r.ee = ee;
+ ee->bprm = bprm;
+ ee->r.obj = &ee->obj;
+ ee->obj.path1 = bprm->file->f_path;
+ /* Get symlink's pathname of program. */
+ retval = -ENOENT;
+ exename.name = tomoyo_realpath_nofollow(original_name);
+ if (!exename.name)
+ goto out;
+ tomoyo_fill_path_info(&exename);
+retry:
+ /* Check 'aggregator' directive. */
+ {
+ struct tomoyo_aggregator *ptr;
+ struct list_head *list =
+ &old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+
+ /* Check 'aggregator' directive. */
+ candidate = &exename;
+ list_for_each_entry_rcu(ptr, list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->head.is_deleted ||
+ !tomoyo_path_matches_pattern(&exename,
+ ptr->original_name))
+ continue;
+ candidate = ptr->aggregated_name;
+ break;
+ }
+ }
+
+ /* Check execute permission. */
+ retval = tomoyo_execute_permission(&ee->r, candidate);
+ if (retval == TOMOYO_RETRY_REQUEST)
+ goto retry;
+ if (retval < 0)
+ goto out;
+ /*
+ * To be able to specify domainnames with wildcards, use the
+ * pathname specified in the policy (which may contain
+ * wildcard) rather than the pathname passed to execve()
+ * (which never contains wildcard).
+ */
+ if (ee->r.param.path.matched_path)
+ candidate = ee->r.param.path.matched_path;
+
+ /*
+ * Check for domain transition preference if "file execute" matched.
+ * If preference is given, make execve() fail if domain transition
+ * has failed, for domain transition preference should be used with
+ * destination domain defined.
+ */
+ if (ee->transition) {
+ const char *domainname = ee->transition->name;
+
+ reject_on_transition_failure = true;
+ if (!strcmp(domainname, "keep"))
+ goto force_keep_domain;
+ if (!strcmp(domainname, "child"))
+ goto force_child_domain;
+ if (!strcmp(domainname, "reset"))
+ goto force_reset_domain;
+ if (!strcmp(domainname, "initialize"))
+ goto force_initialize_domain;
+ if (!strcmp(domainname, "parent")) {
+ char *cp;
+
+ strncpy(ee->tmp, old_domain->domainname->name,
+ TOMOYO_EXEC_TMPSIZE - 1);
+ cp = strrchr(ee->tmp, ' ');
+ if (cp)
+ *cp = '\0';
+ } else if (*domainname == '<')
+ strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+ else
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, domainname);
+ goto force_jump_domain;
+ }
+ /*
+ * No domain transition preference specified.
+ * Calculate domain to transit to.
+ */
+ switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname,
+ candidate)) {
+ case TOMOYO_TRANSITION_CONTROL_RESET:
+force_reset_domain:
+ /* Transit to the root of specified namespace. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>",
+ candidate->name);
+ /*
+ * Make execve() fail if domain transition across namespaces
+ * has failed.
+ */
+ reject_on_transition_failure = true;
+ break;
+ case TOMOYO_TRANSITION_CONTROL_INITIALIZE:
+force_initialize_domain:
+ /* Transit to the child of current namespace's root. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->ns->name, candidate->name);
+ break;
+ case TOMOYO_TRANSITION_CONTROL_KEEP:
+force_keep_domain:
+ /* Keep current domain. */
+ domain = old_domain;
+ break;
+ default:
+ if (old_domain == &tomoyo_kernel_domain &&
+ !tomoyo_policy_loaded) {
+ /*
+ * Needn't to transit from kernel domain before
+ * starting /sbin/init. But transit from kernel domain
+ * if executing initializers because they might start
+ * before /sbin/init.
+ */
+ domain = old_domain;
+ break;
+ }
+force_child_domain:
+ /* Normal domain transition. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, candidate->name);
+ break;
+ }
+force_jump_domain:
+ if (!domain)
+ domain = tomoyo_assign_domain(ee->tmp, true);
+ if (domain)
+ retval = 0;
+ else if (reject_on_transition_failure) {
+ pr_warn("ERROR: Domain '%s' not ready.\n", ee->tmp);
+ retval = -ENOMEM;
+ } else if (ee->r.mode == TOMOYO_CONFIG_ENFORCING)
+ retval = -ENOMEM;
+ else {
+ retval = 0;
+ if (!old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED]) {
+ old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED] = true;
+ ee->r.granted = false;
+ tomoyo_write_log(&ee->r, "%s", tomoyo_dif
+ [TOMOYO_DIF_TRANSITION_FAILED]);
+ pr_warn("ERROR: Domain '%s' not defined.\n", ee->tmp);
+ }
+ }
+ out:
+ if (!domain)
+ domain = old_domain;
+ /* Update reference count on "struct tomoyo_domain_info". */
+ {
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ s->old_domain_info = s->domain_info;
+ s->domain_info = domain;
+ atomic_inc(&domain->users);
+ }
+ kfree(exename.name);
+ if (!retval) {
+ ee->r.domain = domain;
+ retval = tomoyo_environ(ee);
+ }
+ kfree(ee->tmp);
+ kfree(ee->dump.data);
+ kfree(ee);
+ return retval;
+}
+
+/**
+ * tomoyo_dump_page - Dump a page to buffer.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ * @pos: Location to dump.
+ * @dump: Pointer to "struct tomoyo_page_dump".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
+ struct tomoyo_page_dump *dump)
+{
+ struct page *page;
+#ifdef CONFIG_MMU
+ int ret;
+#endif
+
+ /* dump->data is released by tomoyo_find_next_domain(). */
+ if (!dump->data) {
+ dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
+ if (!dump->data)
+ return false;
+ }
+ /* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
+#ifdef CONFIG_MMU
+ /*
+ * This is called at execve() time in order to dig around
+ * in the argv/environment of the new proceess
+ * (represented by bprm).
+ */
+ mmap_read_lock(bprm->mm);
+ ret = get_user_pages_remote(bprm->mm, pos, 1,
+ FOLL_FORCE, &page, NULL, NULL);
+ mmap_read_unlock(bprm->mm);
+ if (ret <= 0)
+ return false;
+#else
+ page = bprm->page[pos / PAGE_SIZE];
+#endif
+ if (page != dump->page) {
+ const unsigned int offset = pos % PAGE_SIZE;
+ /*
+ * Maybe kmap()/kunmap() should be used here.
+ * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
+ * So do I.
+ */
+ char *kaddr = kmap_atomic(page);
+
+ dump->page = page;
+ memcpy(dump->data + offset, kaddr + offset,
+ PAGE_SIZE - offset);
+ kunmap_atomic(kaddr);
+ }
+ /* Same with put_arg_page(page) in fs/exec.c */
+#ifdef CONFIG_MMU
+ put_page(page);
+#endif
+ return true;
+}
diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c
new file mode 100644
index 000000000..7f0a471f1
--- /dev/null
+++ b/security/tomoyo/environ.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/environ.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+/**
+ * tomoyo_check_env_acl - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_env_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_env_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return tomoyo_path_matches_pattern(r->param.environ.name, acl->env);
+}
+
+/**
+ * tomoyo_audit_env_log - Audit environment variable name log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_env_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "misc env %s\n",
+ r->param.environ.name->name);
+}
+
+/**
+ * tomoyo_env_perm - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @env: The name of environment variable.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env)
+{
+ struct tomoyo_path_info environ;
+ int error;
+
+ if (!env || !*env)
+ return 0;
+ environ.name = env;
+ tomoyo_fill_path_info(&environ);
+ r->param_type = TOMOYO_TYPE_ENV_ACL;
+ r->param.environ.name = &environ;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_env_acl);
+ error = tomoyo_audit_env_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ return error;
+}
+
+/**
+ * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->env == p2->env;
+}
+
+/**
+ * tomoyo_write_env - Write "struct tomoyo_env_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_env(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
+ int error = -ENOMEM;
+ const char *data = tomoyo_read_token(param);
+
+ if (!tomoyo_correct_word(data) || strchr(data, '='))
+ return -EINVAL;
+ e.env = tomoyo_get_name(data);
+ if (!e.env)
+ return error;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_env_acl, NULL);
+ tomoyo_put_name(e.env);
+ return error;
+}
+
+/**
+ * tomoyo_write_misc - Update environment variable list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_misc(struct tomoyo_acl_param *param)
+{
+ if (tomoyo_str_starts(&param->data, "env "))
+ return tomoyo_write_env(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
new file mode 100644
index 000000000..8f3b90b6e
--- /dev/null
+++ b/security/tomoyo/file.c
@@ -0,0 +1,1045 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/file.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/*
+ * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index".
+ */
+static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = {
+ [TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE,
+ [TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN,
+ [TOMOYO_TYPE_WRITE] = TOMOYO_MAC_FILE_OPEN,
+ [TOMOYO_TYPE_APPEND] = TOMOYO_MAC_FILE_OPEN,
+ [TOMOYO_TYPE_UNLINK] = TOMOYO_MAC_FILE_UNLINK,
+ [TOMOYO_TYPE_GETATTR] = TOMOYO_MAC_FILE_GETATTR,
+ [TOMOYO_TYPE_RMDIR] = TOMOYO_MAC_FILE_RMDIR,
+ [TOMOYO_TYPE_TRUNCATE] = TOMOYO_MAC_FILE_TRUNCATE,
+ [TOMOYO_TYPE_SYMLINK] = TOMOYO_MAC_FILE_SYMLINK,
+ [TOMOYO_TYPE_CHROOT] = TOMOYO_MAC_FILE_CHROOT,
+ [TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT,
+};
+
+/*
+ * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = {
+ [TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK,
+ [TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR,
+};
+
+/*
+ * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = {
+ [TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK,
+ [TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME,
+ [TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT,
+};
+
+/*
+ * Mapping table from "enum tomoyo_path_number_acl_index" to
+ * "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = {
+ [TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE,
+ [TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR,
+ [TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO,
+ [TOMOYO_TYPE_MKSOCK] = TOMOYO_MAC_FILE_MKSOCK,
+ [TOMOYO_TYPE_IOCTL] = TOMOYO_MAC_FILE_IOCTL,
+ [TOMOYO_TYPE_CHMOD] = TOMOYO_MAC_FILE_CHMOD,
+ [TOMOYO_TYPE_CHOWN] = TOMOYO_MAC_FILE_CHOWN,
+ [TOMOYO_TYPE_CHGRP] = TOMOYO_MAC_FILE_CHGRP,
+};
+
+/**
+ * tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union".
+ *
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_put_name_union(struct tomoyo_name_union *ptr)
+{
+ tomoyo_put_group(ptr->group);
+ tomoyo_put_name(ptr->filename);
+}
+
+/**
+ * tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not.
+ *
+ * @name: Pointer to "struct tomoyo_path_info".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise.
+ */
+const struct tomoyo_path_info *
+tomoyo_compare_name_union(const struct tomoyo_path_info *name,
+ const struct tomoyo_name_union *ptr)
+{
+ if (ptr->group)
+ return tomoyo_path_matches_group(name, ptr->group);
+ if (tomoyo_path_matches_pattern(name, ptr->filename))
+ return ptr->filename;
+ return NULL;
+}
+
+/**
+ * tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union".
+ *
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_put_number_union(struct tomoyo_number_union *ptr)
+{
+ tomoyo_put_group(ptr->group);
+}
+
+/**
+ * tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not.
+ *
+ * @value: Number to check.
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true if @value matches @ptr, false otherwise.
+ */
+bool tomoyo_compare_number_union(const unsigned long value,
+ const struct tomoyo_number_union *ptr)
+{
+ if (ptr->group)
+ return tomoyo_number_matches_group(value, value, ptr->group);
+ return value >= ptr->values[0] && value <= ptr->values[1];
+}
+
+/**
+ * tomoyo_add_slash - Add trailing '/' if needed.
+ *
+ * @buf: Pointer to "struct tomoyo_path_info".
+ *
+ * Returns nothing.
+ *
+ * @buf must be generated by tomoyo_encode() because this function does not
+ * allocate memory for adding '/'.
+ */
+static void tomoyo_add_slash(struct tomoyo_path_info *buf)
+{
+ if (buf->is_dir)
+ return;
+ /*
+ * This is OK because tomoyo_encode() reserves space for appending "/".
+ */
+ strcat((char *) buf->name, "/");
+ tomoyo_fill_path_info(buf);
+}
+
+/**
+ * tomoyo_get_realpath - Get realpath.
+ *
+ * @buf: Pointer to "struct tomoyo_path_info".
+ * @path: Pointer to "struct path".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path)
+{
+ buf->name = tomoyo_realpath_from_path(path);
+ if (buf->name) {
+ tomoyo_fill_path_info(buf);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_audit_path_log - Audit path request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_path_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword
+ [r->param.path.operation],
+ r->param.path.filename->name);
+}
+
+/**
+ * tomoyo_audit_path2_log - Audit path/path request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_path2_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
+ [tomoyo_pp2mac[r->param.path2.operation]],
+ r->param.path2.filename1->name,
+ r->param.path2.filename2->name);
+}
+
+/**
+ * tomoyo_audit_mkdev_log - Audit path/number/number/number request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n",
+ tomoyo_mac_keywords
+ [tomoyo_pnnn2mac[r->param.mkdev.operation]],
+ r->param.mkdev.filename->name,
+ r->param.mkdev.mode, r->param.mkdev.major,
+ r->param.mkdev.minor);
+}
+
+/**
+ * tomoyo_audit_path_number_log - Audit path/number request log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r)
+{
+ const u8 type = r->param.path_number.operation;
+ u8 radix;
+ char buffer[64];
+
+ switch (type) {
+ case TOMOYO_TYPE_CREATE:
+ case TOMOYO_TYPE_MKDIR:
+ case TOMOYO_TYPE_MKFIFO:
+ case TOMOYO_TYPE_MKSOCK:
+ case TOMOYO_TYPE_CHMOD:
+ radix = TOMOYO_VALUE_TYPE_OCTAL;
+ break;
+ case TOMOYO_TYPE_IOCTL:
+ radix = TOMOYO_VALUE_TYPE_HEXADECIMAL;
+ break;
+ default:
+ radix = TOMOYO_VALUE_TYPE_DECIMAL;
+ break;
+ }
+ tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number,
+ radix);
+ return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
+ [tomoyo_pn2mac[type]],
+ r->param.path_number.filename->name, buffer);
+}
+
+/**
+ * tomoyo_check_path_acl - Check permission for path operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ *
+ * To be able to use wildcard for domain transition, this function sets
+ * matching entry on success. Since the caller holds tomoyo_read_lock(),
+ * it is safe to set matching entry.
+ */
+static bool tomoyo_check_path_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_path_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+
+ if (acl->perm & (1 << r->param.path.operation)) {
+ r->param.path.matched_path =
+ tomoyo_compare_name_union(r->param.path.filename,
+ &acl->name);
+ return r->param.path.matched_path != NULL;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_check_path_number_acl - Check permission for path number operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_path_number_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.path_number.operation)) &&
+ tomoyo_compare_number_union(r->param.path_number.number,
+ &acl->number) &&
+ tomoyo_compare_name_union(r->param.path_number.filename,
+ &acl->name);
+}
+
+/**
+ * tomoyo_check_path2_acl - Check permission for path path operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_path2_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.path2.operation)) &&
+ tomoyo_compare_name_union(r->param.path2.filename1, &acl->name1)
+ && tomoyo_compare_name_union(r->param.path2.filename2,
+ &acl->name2);
+}
+
+/**
+ * tomoyo_check_mkdev_acl - Check permission for path number number number operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_mkdev_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.mkdev.operation)) &&
+ tomoyo_compare_number_union(r->param.mkdev.mode,
+ &acl->mode) &&
+ tomoyo_compare_number_union(r->param.mkdev.major,
+ &acl->major) &&
+ tomoyo_compare_number_union(r->param.mkdev.minor,
+ &acl->minor) &&
+ tomoyo_compare_name_union(r->param.mkdev.filename,
+ &acl->name);
+}
+
+/**
+ * tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head)
+ ->perm;
+ u16 perm = READ_ONCE(*a_perm);
+ const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ WRITE_ONCE(*a_perm, perm);
+ return !perm;
+}
+
+/**
+ * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
+ *
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_path_acl(const u16 perm,
+ struct tomoyo_acl_param *param)
+{
+ struct tomoyo_path_acl e = {
+ .head.type = TOMOYO_TYPE_PATH_ACL,
+ .perm = perm
+ };
+ int error;
+
+ if (!tomoyo_parse_name_union(param, &e.name))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_path_acl,
+ tomoyo_merge_path_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
+}
+
+/**
+ * tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return tomoyo_same_name_union(&p1->name, &p2->name) &&
+ tomoyo_same_number_union(&p1->mode, &p2->mode) &&
+ tomoyo_same_number_union(&p1->major, &p2->major) &&
+ tomoyo_same_number_union(&p1->minor, &p2->minor);
+}
+
+/**
+ * tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl,
+ head)->perm;
+ u8 perm = READ_ONCE(*a_perm);
+ const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head)
+ ->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ WRITE_ONCE(*a_perm, perm);
+ return !perm;
+}
+
+/**
+ * tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list.
+ *
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_mkdev_acl(const u8 perm,
+ struct tomoyo_acl_param *param)
+{
+ struct tomoyo_mkdev_acl e = {
+ .head.type = TOMOYO_TYPE_MKDEV_ACL,
+ .perm = perm
+ };
+ int error;
+
+ if (!tomoyo_parse_name_union(param, &e.name) ||
+ !tomoyo_parse_number_union(param, &e.mode) ||
+ !tomoyo_parse_number_union(param, &e.major) ||
+ !tomoyo_parse_number_union(param, &e.minor))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_mkdev_acl,
+ tomoyo_merge_mkdev_acl);
+ tomoyo_put_name_union(&e.name);
+ tomoyo_put_number_union(&e.mode);
+ tomoyo_put_number_union(&e.major);
+ tomoyo_put_number_union(&e.minor);
+ return error;
+}
+
+/**
+ * tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return tomoyo_same_name_union(&p1->name1, &p2->name1) &&
+ tomoyo_same_name_union(&p1->name2, &p2->name2);
+}
+
+/**
+ * tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head)
+ ->perm;
+ u8 perm = READ_ONCE(*a_perm);
+ const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ WRITE_ONCE(*a_perm, perm);
+ return !perm;
+}
+
+/**
+ * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
+ *
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_path2_acl(const u8 perm,
+ struct tomoyo_acl_param *param)
+{
+ struct tomoyo_path2_acl e = {
+ .head.type = TOMOYO_TYPE_PATH2_ACL,
+ .perm = perm
+ };
+ int error;
+
+ if (!tomoyo_parse_name_union(param, &e.name1) ||
+ !tomoyo_parse_name_union(param, &e.name2))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_path2_acl,
+ tomoyo_merge_path2_acl);
+ tomoyo_put_name_union(&e.name1);
+ tomoyo_put_name_union(&e.name2);
+ return error;
+}
+
+/**
+ * tomoyo_path_permission - Check permission for single path operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @operation: Type of operation.
+ * @filename: Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
+ const struct tomoyo_path_info *filename)
+{
+ int error;
+
+ r->type = tomoyo_p2mac[operation];
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ if (r->mode == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = operation;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ error = tomoyo_audit_path_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ return error;
+}
+
+/**
+ * tomoyo_execute_permission - Check permission for execute operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @filename: Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename)
+{
+ /*
+ * Unlike other permission checks, this check is done regardless of
+ * profile mode settings in order to check for domain transition
+ * preference.
+ */
+ r->type = TOMOYO_MAC_FILE_EXECUTE;
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = TOMOYO_TYPE_EXECUTE;
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ r->ee->transition = r->matched_acl && r->matched_acl->cond ?
+ r->matched_acl->cond->transit : NULL;
+ if (r->mode != TOMOYO_CONFIG_DISABLED)
+ return tomoyo_audit_path_log(r);
+ return 0;
+}
+
+/**
+ * tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_path_number_acl *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return tomoyo_same_name_union(&p1->name, &p2->name) &&
+ tomoyo_same_number_union(&p1->number, &p2->number);
+}
+
+/**
+ * tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm = &container_of(a, struct tomoyo_path_number_acl,
+ head)->perm;
+ u8 perm = READ_ONCE(*a_perm);
+ const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head)
+ ->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ WRITE_ONCE(*a_perm, perm);
+ return !perm;
+}
+
+/**
+ * tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL.
+ *
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_update_path_number_acl(const u8 perm,
+ struct tomoyo_acl_param *param)
+{
+ struct tomoyo_path_number_acl e = {
+ .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL,
+ .perm = perm
+ };
+ int error;
+
+ if (!tomoyo_parse_name_union(param, &e.name) ||
+ !tomoyo_parse_number_union(param, &e.number))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_path_number_acl,
+ tomoyo_merge_path_number_acl);
+ tomoyo_put_name_union(&e.name);
+ tomoyo_put_number_union(&e.number);
+ return error;
+}
+
+/**
+ * tomoyo_path_number_perm - Check permission for "create", "mkdir", "mkfifo", "mksock", "ioctl", "chmod", "chown", "chgrp".
+ *
+ * @type: Type of operation.
+ * @path: Pointer to "struct path".
+ * @number: Number.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_path_number_perm(const u8 type, const struct path *path,
+ unsigned long number)
+{
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
+ };
+ int error = -ENOMEM;
+ struct tomoyo_path_info buf;
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type])
+ == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ idx = tomoyo_read_lock();
+ if (!tomoyo_get_realpath(&buf, path))
+ goto out;
+ r.obj = &obj;
+ if (type == TOMOYO_TYPE_MKDIR)
+ tomoyo_add_slash(&buf);
+ r.param_type = TOMOYO_TYPE_PATH_NUMBER_ACL;
+ r.param.path_number.operation = type;
+ r.param.path_number.filename = &buf;
+ r.param.path_number.number = number;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_path_number_acl);
+ error = tomoyo_audit_path_number_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf.name);
+ out:
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ return error;
+}
+
+/**
+ * tomoyo_check_open_permission - Check permission for "read" and "write".
+ *
+ * @domain: Pointer to "struct tomoyo_domain_info".
+ * @path: Pointer to "struct path".
+ * @flag: Flags for open().
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+ const struct path *path, const int flag)
+{
+ const u8 acc_mode = ACC_MODE(flag);
+ int error = 0;
+ struct tomoyo_path_info buf;
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
+ };
+ int idx;
+
+ buf.name = NULL;
+ r.mode = TOMOYO_CONFIG_DISABLED;
+ idx = tomoyo_read_lock();
+ if (acc_mode &&
+ tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN)
+ != TOMOYO_CONFIG_DISABLED) {
+ if (!tomoyo_get_realpath(&buf, path)) {
+ error = -ENOMEM;
+ goto out;
+ }
+ r.obj = &obj;
+ if (acc_mode & MAY_READ)
+ error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ,
+ &buf);
+ if (!error && (acc_mode & MAY_WRITE))
+ error = tomoyo_path_permission(&r, (flag & O_APPEND) ?
+ TOMOYO_TYPE_APPEND :
+ TOMOYO_TYPE_WRITE,
+ &buf);
+ }
+ out:
+ kfree(buf.name);
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ return error;
+}
+
+/**
+ * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount".
+ *
+ * @operation: Type of operation.
+ * @path: Pointer to "struct path".
+ * @target: Symlink's target if @operation is TOMOYO_TYPE_SYMLINK,
+ * NULL otherwise.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target)
+{
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
+ };
+ int error;
+ struct tomoyo_path_info buf;
+ bool is_enforce;
+ struct tomoyo_path_info symlink_target;
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation])
+ == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING);
+ error = -ENOMEM;
+ buf.name = NULL;
+ idx = tomoyo_read_lock();
+ if (!tomoyo_get_realpath(&buf, path))
+ goto out;
+ r.obj = &obj;
+ switch (operation) {
+ case TOMOYO_TYPE_RMDIR:
+ case TOMOYO_TYPE_CHROOT:
+ tomoyo_add_slash(&buf);
+ break;
+ case TOMOYO_TYPE_SYMLINK:
+ symlink_target.name = tomoyo_encode(target);
+ if (!symlink_target.name)
+ goto out;
+ tomoyo_fill_path_info(&symlink_target);
+ obj.symlink_target = &symlink_target;
+ break;
+ }
+ error = tomoyo_path_permission(&r, operation, &buf);
+ if (operation == TOMOYO_TYPE_SYMLINK)
+ kfree(symlink_target.name);
+ out:
+ kfree(buf.name);
+ tomoyo_read_unlock(idx);
+ if (!is_enforce)
+ error = 0;
+ return error;
+}
+
+/**
+ * tomoyo_mkdev_perm - Check permission for "mkblock" and "mkchar".
+ *
+ * @operation: Type of operation. (TOMOYO_TYPE_MKCHAR or TOMOYO_TYPE_MKBLOCK)
+ * @path: Pointer to "struct path".
+ * @mode: Create mode.
+ * @dev: Device number.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_mkdev_perm(const u8 operation, const struct path *path,
+ const unsigned int mode, unsigned int dev)
+{
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
+ };
+ int error = -ENOMEM;
+ struct tomoyo_path_info buf;
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation])
+ == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ idx = tomoyo_read_lock();
+ error = -ENOMEM;
+ if (tomoyo_get_realpath(&buf, path)) {
+ r.obj = &obj;
+ dev = new_decode_dev(dev);
+ r.param_type = TOMOYO_TYPE_MKDEV_ACL;
+ r.param.mkdev.filename = &buf;
+ r.param.mkdev.operation = operation;
+ r.param.mkdev.mode = mode;
+ r.param.mkdev.major = MAJOR(dev);
+ r.param.mkdev.minor = MINOR(dev);
+ tomoyo_check_acl(&r, tomoyo_check_mkdev_acl);
+ error = tomoyo_audit_mkdev_log(&r);
+ kfree(buf.name);
+ }
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ return error;
+}
+
+/**
+ * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
+ *
+ * @operation: Type of operation.
+ * @path1: Pointer to "struct path".
+ * @path2: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_path2_perm(const u8 operation, const struct path *path1,
+ const struct path *path2)
+{
+ int error = -ENOMEM;
+ struct tomoyo_path_info buf1;
+ struct tomoyo_path_info buf2;
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = { .mnt = path1->mnt, .dentry = path1->dentry },
+ .path2 = { .mnt = path2->mnt, .dentry = path2->dentry }
+ };
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation])
+ == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ buf1.name = NULL;
+ buf2.name = NULL;
+ idx = tomoyo_read_lock();
+ if (!tomoyo_get_realpath(&buf1, path1) ||
+ !tomoyo_get_realpath(&buf2, path2))
+ goto out;
+ switch (operation) {
+ case TOMOYO_TYPE_RENAME:
+ case TOMOYO_TYPE_LINK:
+ if (!d_is_dir(path1->dentry))
+ break;
+ fallthrough;
+ case TOMOYO_TYPE_PIVOT_ROOT:
+ tomoyo_add_slash(&buf1);
+ tomoyo_add_slash(&buf2);
+ break;
+ }
+ r.obj = &obj;
+ r.param_type = TOMOYO_TYPE_PATH2_ACL;
+ r.param.path2.operation = operation;
+ r.param.path2.filename1 = &buf1;
+ r.param.path2.filename2 = &buf2;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_path2_acl);
+ error = tomoyo_audit_path2_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ out:
+ kfree(buf1.name);
+ kfree(buf2.name);
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ return error;
+}
+
+/**
+ * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
+ tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
+ tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
+ tomoyo_same_number_union(&p1->flags, &p2->flags);
+}
+
+/**
+ * tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
+ int error;
+
+ if (!tomoyo_parse_name_union(param, &e.dev_name) ||
+ !tomoyo_parse_name_union(param, &e.dir_name) ||
+ !tomoyo_parse_name_union(param, &e.fs_type) ||
+ !tomoyo_parse_number_union(param, &e.flags))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_mount_acl, NULL);
+ tomoyo_put_name_union(&e.dev_name);
+ tomoyo_put_name_union(&e.dir_name);
+ tomoyo_put_name_union(&e.fs_type);
+ tomoyo_put_number_union(&e.flags);
+ return error;
+}
+
+/**
+ * tomoyo_write_file - Update file related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_file(struct tomoyo_acl_param *param)
+{
+ u16 perm = 0;
+ u8 type;
+ const char *operation = tomoyo_read_token(param);
+
+ for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_path_keyword[type]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_path_acl(perm, param);
+ for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++)
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[tomoyo_pp2mac[type]]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_path2_acl(perm, param);
+ for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++)
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[tomoyo_pn2mac[type]]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_path_number_acl(perm, param);
+ for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++)
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[tomoyo_pnnn2mac[type]]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_mkdev_acl(perm, param);
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT]))
+ return tomoyo_update_mount_acl(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
new file mode 100644
index 000000000..026e29ea3
--- /dev/null
+++ b/security/tomoyo/gc.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/gc.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+/**
+ * tomoyo_memory_free - Free memory for elements.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_memory_free(void *ptr)
+{
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr);
+ kfree(ptr);
+}
+
+/* The list for "struct tomoyo_io_buffer". */
+static LIST_HEAD(tomoyo_io_buffer_list);
+/* Lock for protecting tomoyo_io_buffer_list. */
+static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
+
+/**
+ * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns true if @element is used by /sys/kernel/security/tomoyo/ users,
+ * false otherwise.
+ */
+static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
+{
+ struct tomoyo_io_buffer *head;
+ bool in_use = false;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
+ head->users++;
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ mutex_lock(&head->io_sem);
+ if (head->r.domain == element || head->r.group == element ||
+ head->r.acl == element || &head->w.domain->list == element)
+ in_use = true;
+ mutex_unlock(&head->io_sem);
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ head->users--;
+ if (in_use)
+ break;
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ return in_use;
+}
+
+/**
+ * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
+ *
+ * @string: String to check.
+ *
+ * Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
+ * false otherwise.
+ */
+static bool tomoyo_name_used_by_io_buffer(const char *string)
+{
+ struct tomoyo_io_buffer *head;
+ const size_t size = strlen(string) + 1;
+ bool in_use = false;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
+ int i;
+
+ head->users++;
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ mutex_lock(&head->io_sem);
+ for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
+ const char *w = head->r.w[i];
+
+ if (w < string || w > string + size)
+ continue;
+ in_use = true;
+ break;
+ }
+ mutex_unlock(&head->io_sem);
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ head->users--;
+ if (in_use)
+ break;
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ return in_use;
+}
+
+/**
+ * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_transition_control(struct list_head *element)
+{
+ struct tomoyo_transition_control *ptr =
+ container_of(element, typeof(*ptr), head.list);
+
+ tomoyo_put_name(ptr->domainname);
+ tomoyo_put_name(ptr->program);
+}
+
+/**
+ * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_aggregator(struct list_head *element)
+{
+ struct tomoyo_aggregator *ptr =
+ container_of(element, typeof(*ptr), head.list);
+
+ tomoyo_put_name(ptr->original_name);
+ tomoyo_put_name(ptr->aggregated_name);
+}
+
+/**
+ * tomoyo_del_manager - Delete members in "struct tomoyo_manager".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_manager(struct list_head *element)
+{
+ struct tomoyo_manager *ptr =
+ container_of(element, typeof(*ptr), head.list);
+
+ tomoyo_put_name(ptr->manager);
+}
+
+/**
+ * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_del_acl(struct list_head *element)
+{
+ struct tomoyo_acl_info *acl =
+ container_of(element, typeof(*acl), list);
+
+ tomoyo_put_condition(acl->cond);
+ switch (acl->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ {
+ struct tomoyo_path_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ {
+ struct tomoyo_path2_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name1);
+ tomoyo_put_name_union(&entry->name2);
+ }
+ break;
+ case TOMOYO_TYPE_PATH_NUMBER_ACL:
+ {
+ struct tomoyo_path_number_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name);
+ tomoyo_put_number_union(&entry->number);
+ }
+ break;
+ case TOMOYO_TYPE_MKDEV_ACL:
+ {
+ struct tomoyo_mkdev_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name);
+ tomoyo_put_number_union(&entry->mode);
+ tomoyo_put_number_union(&entry->major);
+ tomoyo_put_number_union(&entry->minor);
+ }
+ break;
+ case TOMOYO_TYPE_MOUNT_ACL:
+ {
+ struct tomoyo_mount_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->dev_name);
+ tomoyo_put_name_union(&entry->dir_name);
+ tomoyo_put_name_union(&entry->fs_type);
+ tomoyo_put_number_union(&entry->flags);
+ }
+ break;
+ case TOMOYO_TYPE_ENV_ACL:
+ {
+ struct tomoyo_env_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name(entry->env);
+ }
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ {
+ struct tomoyo_inet_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_group(entry->address.group);
+ tomoyo_put_number_union(&entry->port);
+ }
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ {
+ struct tomoyo_unix_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ {
+ struct tomoyo_task_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name(entry->domainname);
+ }
+ break;
+ }
+}
+
+/**
+ * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_del_domain(struct list_head *element)
+{
+ struct tomoyo_domain_info *domain =
+ container_of(element, typeof(*domain), list);
+ struct tomoyo_acl_info *acl;
+ struct tomoyo_acl_info *tmp;
+
+ /*
+ * Since this domain is referenced from neither
+ * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
+ * elements without checking for is_deleted flag.
+ */
+ list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
+ tomoyo_del_acl(&acl->list);
+ tomoyo_memory_free(acl);
+ }
+ tomoyo_put_name(domain->domainname);
+}
+
+/**
+ * tomoyo_del_condition - Delete members in "struct tomoyo_condition".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+void tomoyo_del_condition(struct list_head *element)
+{
+ struct tomoyo_condition *cond = container_of(element, typeof(*cond),
+ head.list);
+ const u16 condc = cond->condc;
+ const u16 numbers_count = cond->numbers_count;
+ const u16 names_count = cond->names_count;
+ const u16 argc = cond->argc;
+ const u16 envc = cond->envc;
+ unsigned int i;
+ const struct tomoyo_condition_element *condp
+ = (const struct tomoyo_condition_element *) (cond + 1);
+ struct tomoyo_number_union *numbers_p
+ = (struct tomoyo_number_union *) (condp + condc);
+ struct tomoyo_name_union *names_p
+ = (struct tomoyo_name_union *) (numbers_p + numbers_count);
+ const struct tomoyo_argv *argv
+ = (const struct tomoyo_argv *) (names_p + names_count);
+ const struct tomoyo_envp *envp
+ = (const struct tomoyo_envp *) (argv + argc);
+
+ for (i = 0; i < numbers_count; i++)
+ tomoyo_put_number_union(numbers_p++);
+ for (i = 0; i < names_count; i++)
+ tomoyo_put_name_union(names_p++);
+ for (i = 0; i < argc; argv++, i++)
+ tomoyo_put_name(argv->value);
+ for (i = 0; i < envc; envp++, i++) {
+ tomoyo_put_name(envp->name);
+ tomoyo_put_name(envp->value);
+ }
+}
+
+/**
+ * tomoyo_del_name - Delete members in "struct tomoyo_name".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_name(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_path_group(struct list_head *element)
+{
+ struct tomoyo_path_group *member =
+ container_of(element, typeof(*member), head.list);
+
+ tomoyo_put_name(member->member_name);
+}
+
+/**
+ * tomoyo_del_group - Delete "struct tomoyo_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_group(struct list_head *element)
+{
+ struct tomoyo_group *group =
+ container_of(element, typeof(*group), head.list);
+
+ tomoyo_put_name(group->group_name);
+}
+
+/**
+ * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_address_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_number_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_try_to_gc - Try to kfree() an entry.
+ *
+ * @type: One of values in "enum tomoyo_policy_id".
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
+ struct list_head *element)
+{
+ /*
+ * __list_del_entry() guarantees that the list element became no longer
+ * reachable from the list which the element was originally on (e.g.
+ * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
+ * list element became no longer referenced by syscall users.
+ */
+ __list_del_entry(element);
+ mutex_unlock(&tomoyo_policy_lock);
+ synchronize_srcu(&tomoyo_ss);
+ /*
+ * However, there are two users which may still be using the list
+ * element. We need to defer until both users forget this element.
+ *
+ * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
+ * and "struct tomoyo_io_buffer"->w.domain forget this element.
+ */
+ if (tomoyo_struct_used_by_io_buffer(element))
+ goto reinject;
+ switch (type) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ tomoyo_del_transition_control(element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(element);
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ tomoyo_del_aggregator(element);
+ break;
+ case TOMOYO_ID_GROUP:
+ tomoyo_del_group(element);
+ break;
+ case TOMOYO_ID_PATH_GROUP:
+ tomoyo_del_path_group(element);
+ break;
+ case TOMOYO_ID_ADDRESS_GROUP:
+ tomoyo_del_address_group(element);
+ break;
+ case TOMOYO_ID_NUMBER_GROUP:
+ tomoyo_del_number_group(element);
+ break;
+ case TOMOYO_ID_CONDITION:
+ tomoyo_del_condition(element);
+ break;
+ case TOMOYO_ID_NAME:
+ /*
+ * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
+ * forget this element.
+ */
+ if (tomoyo_name_used_by_io_buffer
+ (container_of(element, typeof(struct tomoyo_name),
+ head.list)->entry.name))
+ goto reinject;
+ tomoyo_del_name(element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ /*
+ * Don't kfree() until all "struct cred"->security forget this
+ * element.
+ */
+ if (atomic_read(&container_of
+ (element, typeof(struct tomoyo_domain_info),
+ list)->users))
+ goto reinject;
+ break;
+ case TOMOYO_MAX_POLICY:
+ break;
+ }
+ mutex_lock(&tomoyo_policy_lock);
+ if (type == TOMOYO_ID_DOMAIN)
+ tomoyo_del_domain(element);
+ tomoyo_memory_free(element);
+ return;
+reinject:
+ /*
+ * We can safely reinject this element here because
+ * (1) Appending list elements and removing list elements are protected
+ * by tomoyo_policy_lock mutex.
+ * (2) Only this function removes list elements and this function is
+ * exclusively executed by tomoyo_gc_mutex mutex.
+ * are true.
+ */
+ mutex_lock(&tomoyo_policy_lock);
+ list_add_rcu(element, element->prev);
+}
+
+/**
+ * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head".
+ *
+ * @id: One of values in "enum tomoyo_policy_id".
+ * @member_list: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_member(const enum tomoyo_policy_id id,
+ struct list_head *member_list)
+{
+ struct tomoyo_acl_head *member;
+ struct tomoyo_acl_head *tmp;
+
+ list_for_each_entry_safe(member, tmp, member_list, list) {
+ if (!member->is_deleted)
+ continue;
+ member->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(id, &member->list);
+ }
+}
+
+/**
+ * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info".
+ *
+ * @list: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_acl(struct list_head *list)
+{
+ struct tomoyo_acl_info *acl;
+ struct tomoyo_acl_info *tmp;
+
+ list_for_each_entry_safe(acl, tmp, list, list) {
+ if (!acl->is_deleted)
+ continue;
+ acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
+ }
+}
+
+/**
+ * tomoyo_collect_entry - Try to kfree() deleted elements.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_entry(void)
+{
+ int i;
+ enum tomoyo_policy_id id;
+ struct tomoyo_policy_namespace *ns;
+
+ mutex_lock(&tomoyo_policy_lock);
+ {
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_domain_info *tmp;
+
+ list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
+ list) {
+ tomoyo_collect_acl(&domain->acl_info_list);
+ if (!domain->is_deleted || atomic_read(&domain->users))
+ continue;
+ tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+ for (id = 0; id < TOMOYO_MAX_POLICY; id++)
+ tomoyo_collect_member(id, &ns->policy_list[id]);
+ for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
+ tomoyo_collect_acl(&ns->acl_group[i]);
+ }
+ {
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+
+ list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
+ list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+ for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
+ struct list_head *list = &ns->group_list[i];
+ struct tomoyo_group *group;
+ struct tomoyo_group *tmp;
+
+ switch (i) {
+ case 0:
+ id = TOMOYO_ID_PATH_GROUP;
+ break;
+ case 1:
+ id = TOMOYO_ID_NUMBER_GROUP;
+ break;
+ default:
+ id = TOMOYO_ID_ADDRESS_GROUP;
+ break;
+ }
+ list_for_each_entry_safe(group, tmp, list, head.list) {
+ tomoyo_collect_member(id, &group->member_list);
+ if (!list_empty(&group->member_list) ||
+ atomic_read(&group->head.users) > 0)
+ continue;
+ atomic_set(&group->head.users,
+ TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_GROUP,
+ &group->head.list);
+ }
+ }
+ }
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct list_head *list = &tomoyo_name_list[i];
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+
+ list_for_each_entry_safe(ptr, tmp, list, list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+}
+
+/**
+ * tomoyo_gc_thread - Garbage collector thread function.
+ *
+ * @unused: Unused.
+ *
+ * Returns 0.
+ */
+static int tomoyo_gc_thread(void *unused)
+{
+ /* Garbage collector thread is exclusive. */
+ static DEFINE_MUTEX(tomoyo_gc_mutex);
+
+ if (!mutex_trylock(&tomoyo_gc_mutex))
+ goto out;
+ tomoyo_collect_entry();
+ {
+ struct tomoyo_io_buffer *head;
+ struct tomoyo_io_buffer *tmp;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list,
+ list) {
+ if (head->users)
+ continue;
+ list_del(&head->list);
+ kfree(head->read_buf);
+ kfree(head->write_buf);
+ kfree(head);
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ }
+ mutex_unlock(&tomoyo_gc_mutex);
+out:
+ /* This acts as do_exit(0). */
+ return 0;
+}
+
+/**
+ * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @is_register: True if register, false if unregister.
+ *
+ * Returns nothing.
+ */
+void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register)
+{
+ bool is_write = false;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ if (is_register) {
+ head->users = 1;
+ list_add(&head->list, &tomoyo_io_buffer_list);
+ } else {
+ is_write = head->write_buf != NULL;
+ if (!--head->users) {
+ list_del(&head->list);
+ kfree(head->read_buf);
+ kfree(head->write_buf);
+ kfree(head);
+ }
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ if (is_write)
+ kthread_run(tomoyo_gc_thread, NULL, "GC for TOMOYO");
+}
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
new file mode 100644
index 000000000..1cecdd797
--- /dev/null
+++ b/security/tomoyo/group.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/group.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include <linux/rculist.h>
+
+#include "common.h"
+
+/**
+ * tomoyo_same_path_group - Check for duplicated "struct tomoyo_path_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_path_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ return container_of(a, struct tomoyo_path_group, head)->member_name ==
+ container_of(b, struct tomoyo_path_group, head)->member_name;
+}
+
+/**
+ * tomoyo_same_number_group - Check for duplicated "struct tomoyo_number_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ return !memcmp(&container_of(a, struct tomoyo_number_group, head)
+ ->number,
+ &container_of(b, struct tomoyo_number_group, head)
+ ->number,
+ sizeof(container_of(a, struct tomoyo_number_group, head)
+ ->number));
+}
+
+/**
+ * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
+}
+
+/**
+ * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @type: Type of this group.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
+{
+ struct tomoyo_group *group = tomoyo_get_group(param, type);
+ int error = -EINVAL;
+
+ if (!group)
+ return -ENOMEM;
+ param->list = &group->member_list;
+ if (type == TOMOYO_PATH_GROUP) {
+ struct tomoyo_path_group e = { };
+
+ e.member_name = tomoyo_get_name(tomoyo_read_token(param));
+ if (!e.member_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_path_group);
+ tomoyo_put_name(e.member_name);
+ } else if (type == TOMOYO_NUMBER_GROUP) {
+ struct tomoyo_number_group e = { };
+
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_number_union(param, &e.number))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_number_group);
+ /*
+ * tomoyo_put_number_union() is not needed because
+ * param->data[0] != '@'.
+ */
+ } else {
+ struct tomoyo_address_group e = { };
+
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_address_group);
+ }
+out:
+ tomoyo_put_group(group);
+ return error;
+}
+
+/**
+ * tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group.
+ *
+ * @pathname: The name of pathname.
+ * @group: Pointer to "struct tomoyo_path_group".
+ *
+ * Returns matched member's pathname if @pathname matches pathnames in @group,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+const struct tomoyo_path_info *
+tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_path_group *member;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (!tomoyo_path_matches_pattern(pathname, member->member_name))
+ continue;
+ return member->member_name;
+ }
+ return NULL;
+}
+
+/**
+ * tomoyo_number_matches_group - Check whether the given number matches members of the given number group.
+ *
+ * @min: Min number.
+ * @max: Max number.
+ * @group: Pointer to "struct tomoyo_number_group".
+ *
+ * Returns true if @min and @max partially overlaps @group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_number_matches_group(const unsigned long min,
+ const unsigned long max,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_number_group *member;
+ bool matched = false;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (min > member->number.values[1] ||
+ max < member->number.values[0])
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
+
+/**
+ * tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
+ *
+ * @is_ipv6: True if @address is an IPv6 address.
+ * @address: An IPv4 or IPv6 address.
+ * @group: Pointer to "struct tomoyo_address_group".
+ *
+ * Returns true if @address matches addresses in @group group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_address_group *member;
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+ continue;
+ if (memcmp(&member->address.ip[0], address, size) > 0 ||
+ memcmp(address, &member->address.ip[1], size) > 0)
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
new file mode 100644
index 000000000..363b65be8
--- /dev/null
+++ b/security/tomoyo/load_policy.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/load_policy.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+
+/*
+ * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER)
+ */
+static const char *tomoyo_loader;
+
+/**
+ * tomoyo_loader_setup - Set policy loader.
+ *
+ * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ).
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_loader_setup(char *str)
+{
+ tomoyo_loader = str;
+ return 1;
+}
+
+__setup("TOMOYO_loader=", tomoyo_loader_setup);
+
+/**
+ * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists.
+ *
+ * Returns true if /sbin/tomoyo-init exists, false otherwise.
+ */
+static bool tomoyo_policy_loader_exists(void)
+{
+ struct path path;
+
+ if (!tomoyo_loader)
+ tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER;
+ if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) {
+ pr_info("Not activating Mandatory Access Control as %s does not exist.\n",
+ tomoyo_loader);
+ return false;
+ }
+ path_put(&path);
+ return true;
+}
+
+/*
+ * Path to the trigger. (default = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER)
+ */
+static const char *tomoyo_trigger;
+
+/**
+ * tomoyo_trigger_setup - Set trigger for activation.
+ *
+ * @str: Program to use as an activation trigger (e.g. /sbin/init ).
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_trigger_setup(char *str)
+{
+ tomoyo_trigger = str;
+ return 1;
+}
+
+__setup("TOMOYO_trigger=", tomoyo_trigger_setup);
+
+/**
+ * tomoyo_load_policy - Run external policy loader to load policy.
+ *
+ * @filename: The program about to start.
+ *
+ * This function checks whether @filename is /sbin/init , and if so
+ * invoke /sbin/tomoyo-init and wait for the termination of /sbin/tomoyo-init
+ * and then continues invocation of /sbin/init.
+ * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and
+ * writes to /sys/kernel/security/tomoyo/ interfaces.
+ *
+ * Returns nothing.
+ */
+void tomoyo_load_policy(const char *filename)
+{
+ static bool done;
+ char *argv[2];
+ char *envp[3];
+
+ if (tomoyo_policy_loaded || done)
+ return;
+ if (!tomoyo_trigger)
+ tomoyo_trigger = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER;
+ if (strcmp(filename, tomoyo_trigger))
+ return;
+ if (!tomoyo_policy_loader_exists())
+ return;
+ done = true;
+ pr_info("Calling %s to load policy. Please wait.\n", tomoyo_loader);
+ argv[0] = (char *) tomoyo_loader;
+ argv[1] = NULL;
+ envp[0] = "HOME=/";
+ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[2] = NULL;
+ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ tomoyo_check_profile();
+}
+
+#endif
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
new file mode 100644
index 000000000..1b570bde7
--- /dev/null
+++ b/security/tomoyo/memory.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/memory.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include "common.h"
+
+/**
+ * tomoyo_warn_oom - Print out of memory warning message.
+ *
+ * @function: Function's name.
+ */
+void tomoyo_warn_oom(const char *function)
+{
+ /* Reduce error messages. */
+ static pid_t tomoyo_last_pid;
+ const pid_t pid = current->pid;
+
+ if (tomoyo_last_pid != pid) {
+ pr_warn("ERROR: Out of memory at %s.\n", function);
+ tomoyo_last_pid = pid;
+ }
+ if (!tomoyo_policy_loaded)
+ panic("MAC Initialization failed.\n");
+}
+
+/* Memoy currently used by policy/audit log/query. */
+unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+/* Memory quota for "policy"/"audit log"/"query". */
+unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
+
+/**
+ * tomoyo_memory_ok - Check memory quota.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+bool tomoyo_memory_ok(void *ptr)
+{
+ if (ptr) {
+ const size_t s = ksize(ptr);
+
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
+ if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
+ tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
+ return true;
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
+ }
+ tomoyo_warn_oom(__func__);
+ return false;
+}
+
+/**
+ * tomoyo_commit_ok - Check memory quota.
+ *
+ * @data: Data to copy from.
+ * @size: Size in byte.
+ *
+ * Returns pointer to allocated memory on success, NULL otherwise.
+ * @data is zero-cleared on success.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+void *tomoyo_commit_ok(void *data, const unsigned int size)
+{
+ void *ptr = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
+
+ if (tomoyo_memory_ok(ptr)) {
+ memmove(ptr, data, size);
+ memset(data, 0, size);
+ return ptr;
+ }
+ kfree(ptr);
+ return NULL;
+}
+
+/**
+ * tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @idx: Index number.
+ *
+ * Returns pointer to "struct tomoyo_group" on success, NULL otherwise.
+ */
+struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
+ const u8 idx)
+{
+ struct tomoyo_group e = { };
+ struct tomoyo_group *group = NULL;
+ struct list_head *list;
+ const char *group_name = tomoyo_read_token(param);
+ bool found = false;
+
+ if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP)
+ return NULL;
+ e.group_name = tomoyo_get_name(group_name);
+ if (!e.group_name)
+ return NULL;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ list = &param->ns->group_list[idx];
+ list_for_each_entry(group, list, head.list) {
+ if (e.group_name != group->group_name ||
+ atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ atomic_inc(&group->head.users);
+ found = true;
+ break;
+ }
+ if (!found) {
+ struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e));
+
+ if (entry) {
+ INIT_LIST_HEAD(&entry->member_list);
+ atomic_set(&entry->head.users, 1);
+ list_add_tail_rcu(&entry->head.list, list);
+ group = entry;
+ found = true;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ tomoyo_put_name(e.group_name);
+ return found ? group : NULL;
+}
+
+/*
+ * tomoyo_name_list is used for holding string data used by TOMOYO.
+ * Since same string data is likely used for multiple times (e.g.
+ * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
+ * "const struct tomoyo_path_info *".
+ */
+struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+
+/**
+ * tomoyo_get_name - Allocate permanent memory for string data.
+ *
+ * @name: The string to store into the permernent memory.
+ *
+ * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_name(const char *name)
+{
+ struct tomoyo_name *ptr;
+ unsigned int hash;
+ int len;
+ struct list_head *head;
+
+ if (!name)
+ return NULL;
+ len = strlen(name) + 1;
+ hash = full_name_hash(NULL, (const unsigned char *) name, len - 1);
+ head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return NULL;
+ list_for_each_entry(ptr, head, head.list) {
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ atomic_inc(&ptr->head.users);
+ goto out;
+ }
+ ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS | __GFP_NOWARN);
+ if (tomoyo_memory_ok(ptr)) {
+ ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
+ memmove((char *) ptr->entry.name, name, len);
+ atomic_set(&ptr->head.users, 1);
+ tomoyo_fill_path_info(&ptr->entry);
+ list_add_tail(&ptr->head.list, head);
+ } else {
+ kfree(ptr);
+ ptr = NULL;
+ }
+out:
+ mutex_unlock(&tomoyo_policy_lock);
+ return ptr ? &ptr->entry : NULL;
+}
+
+/* Initial namespace.*/
+struct tomoyo_policy_namespace tomoyo_kernel_namespace;
+
+/**
+ * tomoyo_mm_init - Initialize mm related code.
+ */
+void __init tomoyo_mm_init(void)
+{
+ int idx;
+
+ for (idx = 0; idx < TOMOYO_MAX_HASH; idx++)
+ INIT_LIST_HEAD(&tomoyo_name_list[idx]);
+ tomoyo_kernel_namespace.name = "<kernel>";
+ tomoyo_init_policy_namespace(&tomoyo_kernel_namespace);
+ tomoyo_kernel_domain.ns = &tomoyo_kernel_namespace;
+ INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
+ tomoyo_kernel_domain.domainname = tomoyo_get_name("<kernel>");
+ list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
+}
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
new file mode 100644
index 000000000..2755971f5
--- /dev/null
+++ b/security/tomoyo/mount.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/mount.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include <uapi/linux/mount.h>
+#include "common.h"
+
+/* String table for special mount operations. */
+static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = {
+ [TOMOYO_MOUNT_BIND] = "--bind",
+ [TOMOYO_MOUNT_MOVE] = "--move",
+ [TOMOYO_MOUNT_REMOUNT] = "--remount",
+ [TOMOYO_MOUNT_MAKE_UNBINDABLE] = "--make-unbindable",
+ [TOMOYO_MOUNT_MAKE_PRIVATE] = "--make-private",
+ [TOMOYO_MOUNT_MAKE_SLAVE] = "--make-slave",
+ [TOMOYO_MOUNT_MAKE_SHARED] = "--make-shared",
+};
+
+/**
+ * tomoyo_audit_mount_log - Audit mount log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_mount_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "file mount %s %s %s 0x%lX\n",
+ r->param.mount.dev->name,
+ r->param.mount.dir->name,
+ r->param.mount.type->name,
+ r->param.mount.flags);
+}
+
+/**
+ * tomoyo_check_mount_acl - Check permission for path path path number operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_mount_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return tomoyo_compare_number_union(r->param.mount.flags,
+ &acl->flags) &&
+ tomoyo_compare_name_union(r->param.mount.type,
+ &acl->fs_type) &&
+ tomoyo_compare_name_union(r->param.mount.dir,
+ &acl->dir_name) &&
+ (!r->param.mount.need_dev ||
+ tomoyo_compare_name_union(r->param.mount.dev,
+ &acl->dev_name));
+}
+
+/**
+ * tomoyo_mount_acl - Check permission for mount() operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @dev_name: Name of device file. Maybe NULL.
+ * @dir: Pointer to "struct path".
+ * @type: Name of filesystem type.
+ * @flags: Mount options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+ const char *dev_name,
+ const struct path *dir, const char *type,
+ unsigned long flags)
+{
+ struct tomoyo_obj_info obj = { };
+ struct path path;
+ struct file_system_type *fstype = NULL;
+ const char *requested_type = NULL;
+ const char *requested_dir_name = NULL;
+ const char *requested_dev_name = NULL;
+ struct tomoyo_path_info rtype;
+ struct tomoyo_path_info rdev;
+ struct tomoyo_path_info rdir;
+ int need_dev = 0;
+ int error = -ENOMEM;
+
+ r->obj = &obj;
+
+ /* Get fstype. */
+ requested_type = tomoyo_encode(type);
+ if (!requested_type)
+ goto out;
+ rtype.name = requested_type;
+ tomoyo_fill_path_info(&rtype);
+
+ /* Get mount point. */
+ obj.path2 = *dir;
+ requested_dir_name = tomoyo_realpath_from_path(dir);
+ if (!requested_dir_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ rdir.name = requested_dir_name;
+ tomoyo_fill_path_info(&rdir);
+
+ /* Compare fs name. */
+ if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) {
+ /* dev_name is ignored. */
+ } else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) {
+ /* dev_name is ignored. */
+ } else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
+ need_dev = -1; /* dev_name is a directory */
+ } else {
+ fstype = get_fs_type(type);
+ if (!fstype) {
+ error = -ENODEV;
+ goto out;
+ }
+ if (fstype->fs_flags & FS_REQUIRES_DEV)
+ /* dev_name is a block device file. */
+ need_dev = 1;
+ }
+ if (need_dev) {
+ /* Get mount point or device file. */
+ if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
+ error = -ENOENT;
+ goto out;
+ }
+ obj.path1 = path;
+ requested_dev_name = tomoyo_realpath_from_path(&path);
+ if (!requested_dev_name) {
+ error = -ENOENT;
+ goto out;
+ }
+ } else {
+ /* Map dev_name to "<NULL>" if no dev_name given. */
+ if (!dev_name)
+ dev_name = "<NULL>";
+ requested_dev_name = tomoyo_encode(dev_name);
+ if (!requested_dev_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ }
+ rdev.name = requested_dev_name;
+ tomoyo_fill_path_info(&rdev);
+ r->param_type = TOMOYO_TYPE_MOUNT_ACL;
+ r->param.mount.need_dev = need_dev;
+ r->param.mount.dev = &rdev;
+ r->param.mount.dir = &rdir;
+ r->param.mount.type = &rtype;
+ r->param.mount.flags = flags;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_mount_acl);
+ error = tomoyo_audit_mount_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ out:
+ kfree(requested_dev_name);
+ kfree(requested_dir_name);
+ if (fstype)
+ put_filesystem(fstype);
+ kfree(requested_type);
+ /* Drop refcount obtained by kern_path(). */
+ if (obj.path1.dentry)
+ path_put(&obj.path1);
+ return error;
+}
+
+/**
+ * tomoyo_mount_permission - Check permission for mount() operation.
+ *
+ * @dev_name: Name of device file. Maybe NULL.
+ * @path: Pointer to "struct path".
+ * @type: Name of filesystem type. Maybe NULL.
+ * @flags: Mount options.
+ * @data_page: Optional data. Maybe NULL.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_mount_permission(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags,
+ void *data_page)
+{
+ struct tomoyo_request_info r;
+ int error;
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT)
+ == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+ flags &= ~MS_MGC_MSK;
+ if (flags & MS_REMOUNT) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT];
+ flags &= ~MS_REMOUNT;
+ } else if (flags & MS_BIND) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_BIND];
+ flags &= ~MS_BIND;
+ } else if (flags & MS_SHARED) {
+ if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
+ flags &= ~MS_SHARED;
+ } else if (flags & MS_PRIVATE) {
+ if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE];
+ flags &= ~MS_PRIVATE;
+ } else if (flags & MS_SLAVE) {
+ if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE];
+ flags &= ~MS_SLAVE;
+ } else if (flags & MS_UNBINDABLE) {
+ if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
+ flags &= ~MS_UNBINDABLE;
+ } else if (flags & MS_MOVE) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
+ flags &= ~MS_MOVE;
+ }
+ if (!type)
+ type = "<NULL>";
+ idx = tomoyo_read_lock();
+ error = tomoyo_mount_acl(&r, dev_name, path, type, flags);
+ tomoyo_read_unlock(idx);
+ return error;
+}
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
new file mode 100644
index 000000000..8dc61335f
--- /dev/null
+++ b/security/tomoyo/network.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/network.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* Structure for holding inet domain socket's address. */
+struct tomoyo_inet_addr_info {
+ __be16 port; /* In network byte order. */
+ const __be32 *address; /* In network byte order. */
+ bool is_ipv6;
+};
+
+/* Structure for holding unix domain socket's address. */
+struct tomoyo_unix_addr_info {
+ u8 *addr; /* This may not be '\0' terminated string. */
+ unsigned int addr_len;
+};
+
+/* Structure for holding socket address. */
+struct tomoyo_addr_info {
+ u8 protocol;
+ u8 operation;
+ struct tomoyo_inet_addr_info inet;
+ struct tomoyo_unix_addr_info unix0;
+};
+
+/* String table for socket's protocols. */
+const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
+ [SOCK_STREAM] = "stream",
+ [SOCK_DGRAM] = "dgram",
+ [SOCK_RAW] = "raw",
+ [SOCK_SEQPACKET] = "seqpacket",
+ [0] = " ", /* Dummy for avoiding NULL pointer dereference. */
+ [4] = " ", /* Dummy for avoiding NULL pointer dereference. */
+};
+
+/**
+ * tomoyo_parse_ipaddr_union - Parse an IP address.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr)
+{
+ u8 * const min = ptr->ip[0].in6_u.u6_addr8;
+ u8 * const max = ptr->ip[1].in6_u.u6_addr8;
+ char *address = tomoyo_read_token(param);
+ const char *end;
+
+ if (!strchr(address, ':') &&
+ in4_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = false;
+ if (!*end)
+ ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
+ else if (*end++ != '-' ||
+ in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ if (in6_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = true;
+ if (!*end)
+ memmove(max, min, sizeof(u16) * 8);
+ else if (*end++ != '-' ||
+ in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_print_ipv4 - Print an IPv4 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to __be32.
+ * @max_ip: Pointer to __be32.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
+ const __be32 *min_ip, const __be32 *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
+ *min_ip == *max_ip ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ipv6 - Print an IPv6 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to "struct in6_addr".
+ * @max_ip: Pointer to "struct in6_addr".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
+ const struct in6_addr *min_ip,
+ const struct in6_addr *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
+ !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ip - Print an IP address.
+ *
+ * @buf: Buffer to write to.
+ * @size: Size of @buf.
+ * @ptr: Pointer to "struct ipaddr_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr)
+{
+ if (ptr->is_ipv6)
+ tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
+ else
+ tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
+ &ptr->ip[1].s6_addr32[0]);
+}
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for inet domain socket.
+ */
+static const u8 tomoyo_inet2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ },
+ [SOCK_RAW] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ },
+};
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for unix domain socket.
+ */
+static const u8 tomoyo_unix2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ },
+ [SOCK_SEQPACKET] = {
+ [TOMOYO_NETWORK_BIND] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ },
+};
+
+/**
+ * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
+ tomoyo_same_number_union(&p1->port, &p2->port);
+}
+
+/**
+ * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_inet_acl, head)->perm;
+ u8 perm = READ_ONCE(*a_perm);
+ const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ WRITE_ONCE(*a_perm, perm);
+ return !perm;
+}
+
+/**
+ * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_unix_acl, head)->perm;
+ u8 perm = READ_ONCE(*a_perm);
+ const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ WRITE_ONCE(*a_perm, perm);
+ return !perm;
+}
+
+/**
+ * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
+ int error = -EINVAL;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (param->data[0] == '@') {
+ param->data++;
+ e.address.group =
+ tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
+ if (!e.address.group)
+ return -ENOMEM;
+ } else {
+ if (!tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ }
+ if (!tomoyo_parse_number_union(param, &e.port) ||
+ e.port.values[1] > 65535)
+ goto out;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_inet_acl,
+ tomoyo_merge_inet_acl);
+out:
+ tomoyo_put_group(e.address.group);
+ tomoyo_put_number_union(&e.port);
+ return error;
+}
+
+/**
+ * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
+ int error;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (!tomoyo_parse_name_union(param, &e.name))
+ return -EINVAL;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_unix_acl,
+ tomoyo_merge_unix_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
+}
+
+/**
+ * tomoyo_audit_net_log - Audit network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @family: Name of socket family ("inet" or "unix").
+ * @protocol: Name of protocol in @family.
+ * @operation: Name of socket operation.
+ * @address: Name of address.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
+ const char *family, const u8 protocol,
+ const u8 operation, const char *address)
+{
+ return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
+ tomoyo_proto_keyword[protocol],
+ tomoyo_socket_keyword[operation], address);
+}
+
+/**
+ * tomoyo_audit_inet_log - Audit INET network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
+{
+ char buf[128];
+ int len;
+ const __be32 *address = r->param.inet_network.address;
+
+ if (r->param.inet_network.is_ipv6)
+ tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
+ address, (const struct in6_addr *) address);
+ else
+ tomoyo_print_ipv4(buf, sizeof(buf), address, address);
+ len = strlen(buf);
+ snprintf(buf + len, sizeof(buf) - len, " %u",
+ r->param.inet_network.port);
+ return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
+ r->param.inet_network.operation, buf);
+}
+
+/**
+ * tomoyo_audit_unix_log - Audit UNIX network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
+ r->param.unix_network.operation,
+ r->param.unix_network.address->name);
+}
+
+/**
+ * tomoyo_check_inet_acl - Check permission for inet domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_inet_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
+
+ if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
+ !tomoyo_compare_number_union(r->param.inet_network.port,
+ &acl->port))
+ return false;
+ if (acl->address.group)
+ return tomoyo_address_matches_group
+ (r->param.inet_network.is_ipv6,
+ r->param.inet_network.address, acl->address.group);
+ return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
+ memcmp(&acl->address.ip[0],
+ r->param.inet_network.address, size) <= 0 &&
+ memcmp(r->param.inet_network.address,
+ &acl->address.ip[1], size) <= 0;
+}
+
+/**
+ * tomoyo_check_unix_acl - Check permission for unix domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_unix_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.unix_network.operation)) &&
+ tomoyo_compare_name_union(r->param.unix_network.address,
+ &acl->name);
+}
+
+/**
+ * tomoyo_inet_entry - Check permission for INET network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ r.param_type = TOMOYO_TYPE_INET_ACL;
+ r.param.inet_network.protocol = address->protocol;
+ r.param.inet_network.operation = address->operation;
+ r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
+ r.param.inet_network.address = address->inet.address;
+ r.param.inet_network.port = ntohs(address->inet.port);
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_inet_acl);
+ error = tomoyo_audit_inet_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_inet_address - Check permission for inet domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @port: Port number.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_inet_address(const struct sockaddr *addr,
+ const unsigned int addr_len,
+ const u16 port,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_inet_addr_info *i = &address->inet;
+
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return 0;
+ switch (addr->sa_family) {
+ case AF_INET6:
+ if (addr_len < SIN6_LEN_RFC2133)
+ goto skip;
+ i->is_ipv6 = true;
+ i->address = (__be32 *)
+ ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
+ i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
+ break;
+ case AF_INET:
+ if (addr_len < sizeof(struct sockaddr_in))
+ goto skip;
+ i->is_ipv6 = false;
+ i->address = (__be32 *)
+ &((struct sockaddr_in *) addr)->sin_addr;
+ i->port = ((struct sockaddr_in *) addr)->sin_port;
+ break;
+ default:
+ goto skip;
+ }
+ if (address->protocol == SOCK_RAW)
+ i->port = htons(port);
+ return tomoyo_inet_entry(address);
+skip:
+ return 0;
+}
+
+/**
+ * tomoyo_unix_entry - Check permission for UNIX network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ char *buf = address->unix0.addr;
+ int len = address->unix0.addr_len - sizeof(sa_family_t);
+
+ if (len <= 0) {
+ buf = "anonymous";
+ len = 9;
+ } else if (buf[0]) {
+ len = strnlen(buf, len);
+ }
+ buf = tomoyo_encode2(buf, len);
+ if (buf) {
+ struct tomoyo_path_info addr;
+
+ addr.name = buf;
+ tomoyo_fill_path_info(&addr);
+ r.param_type = TOMOYO_TYPE_UNIX_ACL;
+ r.param.unix_network.protocol = address->protocol;
+ r.param.unix_network.operation = address->operation;
+ r.param.unix_network.address = &addr;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_unix_acl);
+ error = tomoyo_audit_unix_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf);
+ } else
+ error = -ENOMEM;
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_unix_address - Check permission for unix domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_unix_address(struct sockaddr *addr,
+ const unsigned int addr_len,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_unix_addr_info *u = &address->unix0;
+
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return 0;
+ if (addr->sa_family != AF_UNIX)
+ return 0;
+ u->addr = ((struct sockaddr_un *) addr)->sun_path;
+ u->addr_len = addr_len;
+ return tomoyo_unix_entry(address);
+}
+
+/**
+ * tomoyo_kernel_service - Check whether I'm kernel service or not.
+ *
+ * Returns true if I'm kernel service, false otherwise.
+ */
+static bool tomoyo_kernel_service(void)
+{
+ /* Nothing to do if I am a kernel service. */
+ return current->flags & PF_KTHREAD;
+}
+
+/**
+ * tomoyo_sock_family - Get socket's family.
+ *
+ * @sk: Pointer to "struct sock".
+ *
+ * Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
+ */
+static u8 tomoyo_sock_family(struct sock *sk)
+{
+ u8 family;
+
+ if (tomoyo_kernel_service())
+ return 0;
+ family = sk->sk_family;
+ switch (family) {
+ case PF_INET:
+ case PF_INET6:
+ case PF_UNIX:
+ return family;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * tomoyo_socket_listen_permission - Check permission for listening a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_listen_permission(struct socket *sock)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+ struct sockaddr_storage addr;
+ int addr_len;
+
+ if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
+ return 0;
+ {
+ const int error = sock->ops->getname(sock, (struct sockaddr *)
+ &addr, 0);
+
+ if (error < 0)
+ return error;
+ addr_len = error;
+ }
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_LISTEN;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *) &addr,
+ addr_len, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
+ 0, &address);
+}
+
+/**
+ * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ address.protocol = type;
+ switch (type) {
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ address.operation = TOMOYO_NETWORK_SEND;
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ address.operation = TOMOYO_NETWORK_CONNECT;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ case SOCK_SEQPACKET:
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_BIND;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Unused.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!msg->msg_name || !family ||
+ (type != SOCK_DGRAM && type != SOCK_RAW))
+ return 0;
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_SEND;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *)
+ msg->msg_name,
+ msg->msg_namelen, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
+ msg->msg_namelen,
+ sock->sk->sk_protocol, &address);
+}
diff --git a/security/tomoyo/policy/exception_policy.conf.default b/security/tomoyo/policy/exception_policy.conf.default
new file mode 100644
index 000000000..2678df496
--- /dev/null
+++ b/security/tomoyo/policy/exception_policy.conf.default
@@ -0,0 +1,2 @@
+initialize_domain /sbin/modprobe from any
+initialize_domain /sbin/hotplug from any
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
new file mode 100644
index 000000000..1c483ee7f
--- /dev/null
+++ b/security/tomoyo/realpath.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/realpath.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/magic.h>
+#include <linux/proc_fs.h>
+
+/**
+ * tomoyo_encode2 - Encode binary string to ascii string.
+ *
+ * @str: String in binary format.
+ * @str_len: Size of @str in byte.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode2(const char *str, int str_len)
+{
+ int i;
+ int len = 0;
+ const char *p = str;
+ char *cp;
+ char *cp0;
+
+ if (!p)
+ return NULL;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
+
+ if (c == '\\')
+ len += 2;
+ else if (c > ' ' && c < 127)
+ len++;
+ else
+ len += 4;
+ }
+ len++;
+ /* Reserve space for appending "/". */
+ cp = kzalloc(len + 10, GFP_NOFS);
+ if (!cp)
+ return NULL;
+ cp0 = cp;
+ p = str;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
+
+ if (c == '\\') {
+ *cp++ = '\\';
+ *cp++ = '\\';
+ } else if (c > ' ' && c < 127) {
+ *cp++ = c;
+ } else {
+ *cp++ = '\\';
+ *cp++ = (c >> 6) + '0';
+ *cp++ = ((c >> 3) & 7) + '0';
+ *cp++ = (c & 7) + '0';
+ }
+ }
+ return cp0;
+}
+
+/**
+ * tomoyo_encode - Encode binary string to ascii string.
+ *
+ * @str: String in binary format.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode(const char *str)
+{
+ return str ? tomoyo_encode2(str, strlen(str)) : NULL;
+}
+
+/**
+ * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
+ *
+ * @path: Pointer to "struct path".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer on success, an error code otherwise.
+ *
+ * If dentry is a directory, trailing '/' is appended.
+ */
+static char *tomoyo_get_absolute_path(const struct path *path, char * const buffer,
+ const int buflen)
+{
+ char *pos = ERR_PTR(-ENOMEM);
+
+ if (buflen >= 256) {
+ /* go to whatever namespace root we are under */
+ pos = d_absolute_path(path, buffer, buflen - 1);
+ if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
+ struct inode *inode = d_backing_inode(path->dentry);
+
+ if (inode && S_ISDIR(inode->i_mode)) {
+ buffer[buflen - 2] = '/';
+ buffer[buflen - 1] = '\0';
+ }
+ }
+ }
+ return pos;
+}
+
+/**
+ * tomoyo_get_dentry_path - Get the path of a dentry.
+ *
+ * @dentry: Pointer to "struct dentry".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer on success, an error code otherwise.
+ *
+ * If dentry is a directory, trailing '/' is appended.
+ */
+static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer,
+ const int buflen)
+{
+ char *pos = ERR_PTR(-ENOMEM);
+
+ if (buflen >= 256) {
+ pos = dentry_path_raw(dentry, buffer, buflen - 1);
+ if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
+ struct inode *inode = d_backing_inode(dentry);
+
+ if (inode && S_ISDIR(inode->i_mode)) {
+ buffer[buflen - 2] = '/';
+ buffer[buflen - 1] = '\0';
+ }
+ }
+ }
+ return pos;
+}
+
+/**
+ * tomoyo_get_local_path - Get the path of a dentry.
+ *
+ * @dentry: Pointer to "struct dentry".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
+ *
+ * Returns the buffer on success, an error code otherwise.
+ */
+static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
+ const int buflen)
+{
+ struct super_block *sb = dentry->d_sb;
+ char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen);
+
+ if (IS_ERR(pos))
+ return pos;
+ /* Convert from $PID to self if $PID is current thread. */
+ if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') {
+ char *ep;
+ const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10);
+ struct pid_namespace *proc_pidns = proc_pid_ns(sb);
+
+ if (*ep == '/' && pid && pid ==
+ task_tgid_nr_ns(current, proc_pidns)) {
+ pos = ep - 5;
+ if (pos < buffer)
+ goto out;
+ memmove(pos, "/self", 5);
+ }
+ goto prepend_filesystem_name;
+ }
+ /* Use filesystem name for unnamed devices. */
+ if (!MAJOR(sb->s_dev))
+ goto prepend_filesystem_name;
+ {
+ struct inode *inode = d_backing_inode(sb->s_root);
+
+ /*
+ * Use filesystem name if filesystem does not support rename()
+ * operation.
+ */
+ if (!inode->i_op->rename)
+ goto prepend_filesystem_name;
+ }
+ /* Prepend device name. */
+ {
+ char name[64];
+ int name_len;
+ const dev_t dev = sb->s_dev;
+
+ name[sizeof(name) - 1] = '\0';
+ snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev),
+ MINOR(dev));
+ name_len = strlen(name);
+ pos -= name_len;
+ if (pos < buffer)
+ goto out;
+ memmove(pos, name, name_len);
+ return pos;
+ }
+ /* Prepend filesystem name. */
+prepend_filesystem_name:
+ {
+ const char *name = sb->s_type->name;
+ const int name_len = strlen(name);
+
+ pos -= name_len + 1;
+ if (pos < buffer)
+ goto out;
+ memmove(pos, name, name_len);
+ pos[name_len] = ':';
+ }
+ return pos;
+out:
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns the realpath of the given @path on success, NULL otherwise.
+ *
+ * If dentry is a directory, trailing '/' is appended.
+ * Characters out of 0x20 < c < 0x7F range are converted to
+ * \ooo style octal string.
+ * Character \ is converted to \\ string.
+ *
+ * These functions use kzalloc(), so the caller must call kfree()
+ * if these functions didn't return NULL.
+ */
+char *tomoyo_realpath_from_path(const struct path *path)
+{
+ char *buf = NULL;
+ char *name = NULL;
+ unsigned int buf_len = PAGE_SIZE / 2;
+ struct dentry *dentry = path->dentry;
+ struct super_block *sb = dentry->d_sb;
+
+ while (1) {
+ char *pos;
+ struct inode *inode;
+
+ buf_len <<= 1;
+ kfree(buf);
+ buf = kmalloc(buf_len, GFP_NOFS);
+ if (!buf)
+ break;
+ /* To make sure that pos is '\0' terminated. */
+ buf[buf_len - 1] = '\0';
+ /* For "pipe:[\$]" and "socket:[\$]". */
+ if (dentry->d_op && dentry->d_op->d_dname) {
+ pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
+ goto encode;
+ }
+ inode = d_backing_inode(sb->s_root);
+ /*
+ * Get local name for filesystems without rename() operation
+ */
+ if ((!inode->i_op->rename &&
+ !(sb->s_type->fs_flags & FS_REQUIRES_DEV)))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ /* Get absolute name for the rest. */
+ else {
+ pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+ /*
+ * Fall back to local name if absolute name is not
+ * available.
+ */
+ if (pos == ERR_PTR(-EINVAL))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ }
+encode:
+ if (IS_ERR(pos))
+ continue;
+ name = tomoyo_encode(pos);
+ break;
+ }
+ kfree(buf);
+ if (!name)
+ tomoyo_warn_oom(__func__);
+ return name;
+}
+
+/**
+ * tomoyo_realpath_nofollow - Get realpath of a pathname.
+ *
+ * @pathname: The pathname to solve.
+ *
+ * Returns the realpath of @pathname on success, NULL otherwise.
+ */
+char *tomoyo_realpath_nofollow(const char *pathname)
+{
+ struct path path;
+
+ if (pathname && kern_path(pathname, 0, &path) == 0) {
+ char *buf = tomoyo_realpath_from_path(&path);
+
+ path_put(&path);
+ return buf;
+ }
+ return NULL;
+}
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
new file mode 100644
index 000000000..a27057984
--- /dev/null
+++ b/security/tomoyo/securityfs_if.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/securityfs_if.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/security.h>
+#include "common.h"
+
+/**
+ * tomoyo_check_task_acl - Check permission for task operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+
+ return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
+}
+
+/**
+ * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname to transit to.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ *
+ * If domain transition was permitted but the domain transition failed, this
+ * function returns error rather than terminating current thread with SIGKILL.
+ */
+static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int error;
+
+ if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
+ return -ENOMEM;
+ data = memdup_user_nul(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ tomoyo_normalize_line(data);
+ if (tomoyo_correct_domain(data)) {
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_path_info name;
+ struct tomoyo_request_info r;
+
+ name.name = data;
+ tomoyo_fill_path_info(&name);
+ /* Check "task manual_domain_transition" permission. */
+ tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
+ r.param.task.domainname = &name;
+ tomoyo_check_acl(&r, tomoyo_check_task_acl);
+ if (!r.granted)
+ error = -EPERM;
+ else {
+ struct tomoyo_domain_info *new_domain =
+ tomoyo_assign_domain(data, true);
+ if (!new_domain) {
+ error = -ENOENT;
+ } else {
+ struct tomoyo_task *s = tomoyo_task(current);
+ struct tomoyo_domain_info *old_domain =
+ s->domain_info;
+
+ s->domain_info = new_domain;
+ atomic_inc(&new_domain->users);
+ atomic_dec(&old_domain->users);
+ error = 0;
+ }
+ }
+ tomoyo_read_unlock(idx);
+ } else
+ error = -EINVAL;
+ kfree(data);
+ return error ? error : count;
+}
+
+/**
+ * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname which current thread belongs to.
+ * @count: Size of @buf.
+ * @ppos: Bytes read by now.
+ *
+ * Returns read size on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *domain = tomoyo_domain()->domainname->name;
+ loff_t len = strlen(domain);
+ loff_t pos = *ppos;
+
+ if (pos >= len || !count)
+ return 0;
+ len -= pos;
+ if (count < len)
+ len = count;
+ if (copy_to_user(buf, domain + pos, len))
+ return -EFAULT;
+ *ppos += len;
+ return len;
+}
+
+/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
+static const struct file_operations tomoyo_self_operations = {
+ .write = tomoyo_write_self,
+ .read = tomoyo_read_self,
+};
+
+/**
+ * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @inode: Pointer to "struct inode".
+ * @file: Pointer to "struct file".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_open(struct inode *inode, struct file *file)
+{
+ const u8 key = (uintptr_t) file_inode(file)->i_private;
+
+ return tomoyo_open_control(key, file);
+}
+
+/**
+ * tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @inode: Pointer to "struct inode".
+ * @file: Pointer to "struct file".
+ *
+ */
+static int tomoyo_release(struct inode *inode, struct file *file)
+{
+ tomoyo_close_control(file->private_data);
+ return 0;
+}
+
+/**
+ * tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write,
+ * EPOLLOUT | EPOLLWRNORM otherwise.
+ */
+static __poll_t tomoyo_poll(struct file *file, poll_table *wait)
+{
+ return tomoyo_poll_control(file, wait);
+}
+
+/**
+ * tomoyo_read - read() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Pointer to buffer.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns bytes read on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return tomoyo_read_control(file->private_data, buf, count);
+}
+
+/**
+ * tomoyo_write - write() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Pointer to buffer.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ */
+static ssize_t tomoyo_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return tomoyo_write_control(file->private_data, buf, count);
+}
+
+/*
+ * tomoyo_operations is a "struct file_operations" which is used for handling
+ * /sys/kernel/security/tomoyo/ interface.
+ *
+ * Some files under /sys/kernel/security/tomoyo/ directory accept open(O_RDWR).
+ * See tomoyo_io_buffer for internals.
+ */
+static const struct file_operations tomoyo_operations = {
+ .open = tomoyo_open,
+ .release = tomoyo_release,
+ .poll = tomoyo_poll,
+ .read = tomoyo_read,
+ .write = tomoyo_write,
+ .llseek = noop_llseek,
+};
+
+/**
+ * tomoyo_create_entry - Create interface files under /sys/kernel/security/tomoyo/ directory.
+ *
+ * @name: The name of the interface file.
+ * @mode: The permission of the interface file.
+ * @parent: The parent directory.
+ * @key: Type of interface.
+ *
+ * Returns nothing.
+ */
+static void __init tomoyo_create_entry(const char *name, const umode_t mode,
+ struct dentry *parent, const u8 key)
+{
+ securityfs_create_file(name, mode, parent, (void *) (uintptr_t) key,
+ &tomoyo_operations);
+}
+
+/**
+ * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_initerface_init(void)
+{
+ struct tomoyo_domain_info *domain;
+ struct dentry *tomoyo_dir;
+
+ if (!tomoyo_enabled)
+ return 0;
+ domain = tomoyo_domain();
+ /* Don't create securityfs entries unless registered. */
+ if (domain != &tomoyo_kernel_domain)
+ return 0;
+
+ tomoyo_dir = securityfs_create_dir("tomoyo", NULL);
+ tomoyo_create_entry("query", 0600, tomoyo_dir,
+ TOMOYO_QUERY);
+ tomoyo_create_entry("domain_policy", 0600, tomoyo_dir,
+ TOMOYO_DOMAINPOLICY);
+ tomoyo_create_entry("exception_policy", 0600, tomoyo_dir,
+ TOMOYO_EXCEPTIONPOLICY);
+ tomoyo_create_entry("audit", 0400, tomoyo_dir,
+ TOMOYO_AUDIT);
+ tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
+ TOMOYO_PROCESS_STATUS);
+ tomoyo_create_entry("stat", 0644, tomoyo_dir,
+ TOMOYO_STAT);
+ tomoyo_create_entry("profile", 0600, tomoyo_dir,
+ TOMOYO_PROFILE);
+ tomoyo_create_entry("manager", 0600, tomoyo_dir,
+ TOMOYO_MANAGER);
+ tomoyo_create_entry("version", 0400, tomoyo_dir,
+ TOMOYO_VERSION);
+ securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
+ &tomoyo_self_operations);
+ tomoyo_load_builtin_policy();
+ return 0;
+}
+
+fs_initcall(tomoyo_initerface_init);
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
new file mode 100644
index 000000000..fb599401b
--- /dev/null
+++ b/security/tomoyo/tomoyo.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/tomoyo.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/lsm_hooks.h>
+#include "common.h"
+
+/**
+ * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" for current thread.
+ */
+struct tomoyo_domain_info *tomoyo_domain(void)
+{
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ if (s->old_domain_info && !current->in_execve) {
+ atomic_dec(&s->old_domain_info->users);
+ s->old_domain_info = NULL;
+ }
+ return s->domain_info;
+}
+
+/**
+ * tomoyo_cred_prepare - Target for security_prepare_creds().
+ *
+ * @new: Pointer to "struct cred".
+ * @old: Pointer to "struct cred".
+ * @gfp: Memory allocation flags.
+ *
+ * Returns 0.
+ */
+static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
+ gfp_t gfp)
+{
+ /* Restore old_domain_info saved by previous execve() request. */
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ if (s->old_domain_info && !current->in_execve) {
+ atomic_dec(&s->domain_info->users);
+ s->domain_info = s->old_domain_info;
+ s->old_domain_info = NULL;
+ }
+ return 0;
+}
+
+/**
+ * tomoyo_bprm_committed_creds - Target for security_bprm_committed_creds().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ */
+static void tomoyo_bprm_committed_creds(struct linux_binprm *bprm)
+{
+ /* Clear old_domain_info saved by execve() request. */
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ atomic_dec(&s->old_domain_info->users);
+ s->old_domain_info = NULL;
+}
+
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+/**
+ * tomoyo_bprm_creds_for_exec - Target for security_bprm_creds_for_exec().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0.
+ */
+static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ /*
+ * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
+ * for the first time.
+ */
+ if (!tomoyo_policy_loaded)
+ tomoyo_load_policy(bprm->filename);
+ return 0;
+}
+#endif
+
+/**
+ * tomoyo_bprm_check_security - Target for security_bprm_check().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
+{
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ /*
+ * Execute permission is checked against pathname passed to execve()
+ * using current domain.
+ */
+ if (!s->old_domain_info) {
+ const int idx = tomoyo_read_lock();
+ const int err = tomoyo_find_next_domain(bprm);
+
+ tomoyo_read_unlock(idx);
+ return err;
+ }
+ /*
+ * Read permission is checked against interpreters using next domain.
+ */
+ return tomoyo_check_open_permission(s->domain_info,
+ &bprm->file->f_path, O_RDONLY);
+}
+
+/**
+ * tomoyo_inode_getattr - Target for security_inode_getattr().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inode_getattr(const struct path *path)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL);
+}
+
+/**
+ * tomoyo_path_truncate - Target for security_path_truncate().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_truncate(const struct path *path)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL);
+}
+
+/**
+ * tomoyo_path_unlink - Target for security_path_unlink().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry)
+{
+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
+ return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
+}
+
+/**
+ * tomoyo_path_mkdir - Target for security_path_mkdir().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @mode: DAC permission mode.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry,
+ umode_t mode)
+{
+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
+ return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
+ mode & S_IALLUGO);
+}
+
+/**
+ * tomoyo_path_rmdir - Target for security_path_rmdir().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry)
+{
+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
+ return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
+}
+
+/**
+ * tomoyo_path_symlink - Target for security_path_symlink().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @old_name: Symlink's content.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry,
+ const char *old_name)
+{
+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
+ return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
+}
+
+/**
+ * tomoyo_path_mknod - Target for security_path_mknod().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @mode: DAC permission mode.
+ * @dev: Device attributes.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+{
+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
+ int type = TOMOYO_TYPE_CREATE;
+ const unsigned int perm = mode & S_IALLUGO;
+
+ switch (mode & S_IFMT) {
+ case S_IFCHR:
+ type = TOMOYO_TYPE_MKCHAR;
+ break;
+ case S_IFBLK:
+ type = TOMOYO_TYPE_MKBLOCK;
+ break;
+ default:
+ goto no_dev;
+ }
+ return tomoyo_mkdev_perm(type, &path, perm, dev);
+ no_dev:
+ switch (mode & S_IFMT) {
+ case S_IFIFO:
+ type = TOMOYO_TYPE_MKFIFO;
+ break;
+ case S_IFSOCK:
+ type = TOMOYO_TYPE_MKSOCK;
+ break;
+ }
+ return tomoyo_path_number_perm(type, &path, perm);
+}
+
+/**
+ * tomoyo_path_link - Target for security_path_link().
+ *
+ * @old_dentry: Pointer to "struct dentry".
+ * @new_dir: Pointer to "struct path".
+ * @new_dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry };
+ struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry };
+
+ return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
+}
+
+/**
+ * tomoyo_path_rename - Target for security_path_rename().
+ *
+ * @old_parent: Pointer to "struct path".
+ * @old_dentry: Pointer to "struct dentry".
+ * @new_parent: Pointer to "struct path".
+ * @new_dentry: Pointer to "struct dentry".
+ * @flags: Rename options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_rename(const struct path *old_parent,
+ struct dentry *old_dentry,
+ const struct path *new_parent,
+ struct dentry *new_dentry,
+ const unsigned int flags)
+{
+ struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry };
+ struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry };
+
+ if (flags & RENAME_EXCHANGE) {
+ const int err = tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path2,
+ &path1);
+
+ if (err)
+ return err;
+ }
+ return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
+}
+
+/**
+ * tomoyo_file_fcntl - Target for security_file_fcntl().
+ *
+ * @file: Pointer to "struct file".
+ * @cmd: Command for fcntl().
+ * @arg: Argument for @cmd.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)))
+ return 0;
+ return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path,
+ O_WRONLY | (arg & O_APPEND));
+}
+
+/**
+ * tomoyo_file_open - Target for security_file_open().
+ *
+ * @f: Pointer to "struct file".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_open(struct file *f)
+{
+ /* Don't check read permission here if called from execve(). */
+ if (current->in_execve)
+ return 0;
+ return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path,
+ f->f_flags);
+}
+
+/**
+ * tomoyo_file_ioctl - Target for security_file_ioctl().
+ *
+ * @file: Pointer to "struct file".
+ * @cmd: Command for ioctl().
+ * @arg: Argument for @cmd.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd);
+}
+
+/**
+ * tomoyo_path_chmod - Target for security_path_chmod().
+ *
+ * @path: Pointer to "struct path".
+ * @mode: DAC permission mode.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chmod(const struct path *path, umode_t mode)
+{
+ return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
+ mode & S_IALLUGO);
+}
+
+/**
+ * tomoyo_path_chown - Target for security_path_chown().
+ *
+ * @path: Pointer to "struct path".
+ * @uid: Owner ID.
+ * @gid: Group ID.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
+{
+ int error = 0;
+
+ if (uid_valid(uid))
+ error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
+ from_kuid(&init_user_ns, uid));
+ if (!error && gid_valid(gid))
+ error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path,
+ from_kgid(&init_user_ns, gid));
+ return error;
+}
+
+/**
+ * tomoyo_path_chroot - Target for security_path_chroot().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chroot(const struct path *path)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL);
+}
+
+/**
+ * tomoyo_sb_mount - Target for security_sb_mount().
+ *
+ * @dev_name: Name of device file. Maybe NULL.
+ * @path: Pointer to "struct path".
+ * @type: Name of filesystem type. Maybe NULL.
+ * @flags: Mount options.
+ * @data: Optional data. Maybe NULL.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_mount(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+{
+ return tomoyo_mount_permission(dev_name, path, type, flags, data);
+}
+
+/**
+ * tomoyo_sb_umount - Target for security_sb_umount().
+ *
+ * @mnt: Pointer to "struct vfsmount".
+ * @flags: Unmount options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
+
+ return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
+}
+
+/**
+ * tomoyo_sb_pivotroot - Target for security_sb_pivotroot().
+ *
+ * @old_path: Pointer to "struct path".
+ * @new_path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path)
+{
+ return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
+}
+
+/**
+ * tomoyo_socket_listen - Check permission for listen().
+ *
+ * @sock: Pointer to "struct socket".
+ * @backlog: Backlog parameter.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return tomoyo_socket_listen_permission(sock);
+}
+
+/**
+ * tomoyo_socket_connect - Check permission for connect().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_connect_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_bind - Check permission for bind().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_bind_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_sendmsg - Check permission for sendmsg().
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Size of message.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ return tomoyo_socket_sendmsg_permission(sock, msg, size);
+}
+
+struct lsm_blob_sizes tomoyo_blob_sizes __lsm_ro_after_init = {
+ .lbs_task = sizeof(struct tomoyo_task),
+};
+
+/**
+ * tomoyo_task_alloc - Target for security_task_alloc().
+ *
+ * @task: Pointer to "struct task_struct".
+ * @clone_flags: clone() flags.
+ *
+ * Returns 0.
+ */
+static int tomoyo_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ struct tomoyo_task *old = tomoyo_task(current);
+ struct tomoyo_task *new = tomoyo_task(task);
+
+ new->domain_info = old->domain_info;
+ atomic_inc(&new->domain_info->users);
+ new->old_domain_info = NULL;
+ return 0;
+}
+
+/**
+ * tomoyo_task_free - Target for security_task_free().
+ *
+ * @task: Pointer to "struct task_struct".
+ */
+static void tomoyo_task_free(struct task_struct *task)
+{
+ struct tomoyo_task *s = tomoyo_task(task);
+
+ if (s->domain_info) {
+ atomic_dec(&s->domain_info->users);
+ s->domain_info = NULL;
+ }
+ if (s->old_domain_info) {
+ atomic_dec(&s->old_domain_info->users);
+ s->old_domain_info = NULL;
+ }
+}
+
+/*
+ * tomoyo_security_ops is a "struct security_operations" which is used for
+ * registering TOMOYO.
+ */
+static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
+ LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
+ LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
+ LSM_HOOK_INIT(task_free, tomoyo_task_free),
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
+#endif
+ LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
+ LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
+ LSM_HOOK_INIT(file_open, tomoyo_file_open),
+ LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
+ LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
+ LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
+ LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
+ LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
+ LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
+ LSM_HOOK_INIT(path_link, tomoyo_path_link),
+ LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+ LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+ LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+ LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+ LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+ LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
+ LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
+ LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
+ LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
+ LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
+ LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
+ LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
+};
+
+/* Lock for GC. */
+DEFINE_SRCU(tomoyo_ss);
+
+int tomoyo_enabled __lsm_ro_after_init = 1;
+
+/**
+ * tomoyo_init - Register TOMOYO Linux as a LSM module.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_init(void)
+{
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ /* register ourselves with the security framework */
+ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), "tomoyo");
+ pr_info("TOMOYO Linux initialized\n");
+ s->domain_info = &tomoyo_kernel_domain;
+ atomic_inc(&tomoyo_kernel_domain.users);
+ s->old_domain_info = NULL;
+ tomoyo_mm_init();
+
+ return 0;
+}
+
+DEFINE_LSM(tomoyo) = {
+ .name = "tomoyo",
+ .enabled = &tomoyo_enabled,
+ .flags = LSM_FLAG_LEGACY_MAJOR,
+ .blobs = &tomoyo_blob_sizes,
+ .init = tomoyo_init,
+};
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
new file mode 100644
index 000000000..6799b1122
--- /dev/null
+++ b/security/tomoyo/util.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/util.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include <linux/rculist.h>
+
+#include "common.h"
+
+/* Lock for protecting policy. */
+DEFINE_MUTEX(tomoyo_policy_lock);
+
+/* Has /sbin/init started? */
+bool tomoyo_policy_loaded;
+
+/*
+ * Mapping table from "enum tomoyo_mac_index" to
+ * "enum tomoyo_mac_category_index".
+ */
+const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
+ /* CONFIG::file group */
+ [TOMOYO_MAC_FILE_EXECUTE] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_OPEN] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CREATE] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_UNLINK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_GETATTR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKDIR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_RMDIR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKFIFO] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKSOCK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_TRUNCATE] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_SYMLINK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKBLOCK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKCHAR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_LINK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_RENAME] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHMOD] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHOWN] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHGRP] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_IOCTL] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC,
+};
+
+/**
+ * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss.
+ *
+ * @time64: Seconds since 1970/01/01 00:00:00.
+ * @stamp: Pointer to "struct tomoyo_time".
+ *
+ * Returns nothing.
+ */
+void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
+{
+ struct tm tm;
+
+ time64_to_tm(time64, 0, &tm);
+ stamp->sec = tm.tm_sec;
+ stamp->min = tm.tm_min;
+ stamp->hour = tm.tm_hour;
+ stamp->day = tm.tm_mday;
+ stamp->month = tm.tm_mon + 1;
+ stamp->year = tm.tm_year + 1900;
+}
+
+/**
+ * tomoyo_permstr - Find permission keywords.
+ *
+ * @string: String representation for permissions in foo/bar/buz format.
+ * @keyword: Keyword to find from @string/
+ *
+ * Returns true if @keyword was found in @string, false otherwise.
+ *
+ * This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2.
+ */
+bool tomoyo_permstr(const char *string, const char *keyword)
+{
+ const char *cp = strstr(string, keyword);
+
+ if (cp)
+ return cp == string || *(cp - 1) == '/';
+ return false;
+}
+
+/**
+ * tomoyo_read_token - Read a word from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a word on success, "" otherwise.
+ *
+ * To allow the caller to skip NULL check, this function returns "" rather than
+ * NULL if there is no more words to read.
+ */
+char *tomoyo_read_token(struct tomoyo_acl_param *param)
+{
+ char *pos = param->data;
+ char *del = strchr(pos, ' ');
+
+ if (del)
+ *del++ = '\0';
+ else
+ del = pos + strlen(pos);
+ param->data = del;
+ return pos;
+}
+
+static bool tomoyo_correct_path2(const char *filename, const size_t len);
+
+/**
+ * tomoyo_get_domainname - Read a domainname from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a domainname on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param)
+{
+ char *start = param->data;
+ char *pos = start;
+
+ while (*pos) {
+ if (*pos++ != ' ' ||
+ tomoyo_correct_path2(pos, strchrnul(pos, ' ') - pos))
+ continue;
+ *(pos - 1) = '\0';
+ break;
+ }
+ param->data = pos;
+ if (tomoyo_correct_domain(start))
+ return tomoyo_get_name(start);
+ return NULL;
+}
+
+/**
+ * tomoyo_parse_ulong - Parse an "unsigned long" value.
+ *
+ * @result: Pointer to "unsigned long".
+ * @str: Pointer to string to parse.
+ *
+ * Returns one of values in "enum tomoyo_value_type".
+ *
+ * The @src is updated to point the first character after the value
+ * on success.
+ */
+u8 tomoyo_parse_ulong(unsigned long *result, char **str)
+{
+ const char *cp = *str;
+ char *ep;
+ int base = 10;
+
+ if (*cp == '0') {
+ char c = *(cp + 1);
+
+ if (c == 'x' || c == 'X') {
+ base = 16;
+ cp += 2;
+ } else if (c >= '0' && c <= '7') {
+ base = 8;
+ cp++;
+ }
+ }
+ *result = simple_strtoul(cp, &ep, base);
+ if (cp == ep)
+ return TOMOYO_VALUE_TYPE_INVALID;
+ *str = ep;
+ switch (base) {
+ case 16:
+ return TOMOYO_VALUE_TYPE_HEXADECIMAL;
+ case 8:
+ return TOMOYO_VALUE_TYPE_OCTAL;
+ default:
+ return TOMOYO_VALUE_TYPE_DECIMAL;
+ }
+}
+
+/**
+ * tomoyo_print_ulong - Print an "unsigned long" value.
+ *
+ * @buffer: Pointer to buffer.
+ * @buffer_len: Size of @buffer.
+ * @value: An "unsigned long" value.
+ * @type: Type of @value.
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ulong(char *buffer, const int buffer_len,
+ const unsigned long value, const u8 type)
+{
+ if (type == TOMOYO_VALUE_TYPE_DECIMAL)
+ snprintf(buffer, buffer_len, "%lu", value);
+ else if (type == TOMOYO_VALUE_TYPE_OCTAL)
+ snprintf(buffer, buffer_len, "0%lo", value);
+ else if (type == TOMOYO_VALUE_TYPE_HEXADECIMAL)
+ snprintf(buffer, buffer_len, "0x%lX", value);
+ else
+ snprintf(buffer, buffer_len, "type(%u)", type);
+}
+
+/**
+ * tomoyo_parse_name_union - Parse a tomoyo_name_union.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
+ struct tomoyo_name_union *ptr)
+{
+ char *filename;
+
+ if (param->data[0] == '@') {
+ param->data++;
+ ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP);
+ return ptr->group != NULL;
+ }
+ filename = tomoyo_read_token(param);
+ if (!tomoyo_correct_word(filename))
+ return false;
+ ptr->filename = tomoyo_get_name(filename);
+ return ptr->filename != NULL;
+}
+
+/**
+ * tomoyo_parse_number_union - Parse a tomoyo_number_union.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
+ struct tomoyo_number_union *ptr)
+{
+ char *data;
+ u8 type;
+ unsigned long v;
+
+ memset(ptr, 0, sizeof(*ptr));
+ if (param->data[0] == '@') {
+ param->data++;
+ ptr->group = tomoyo_get_group(param, TOMOYO_NUMBER_GROUP);
+ return ptr->group != NULL;
+ }
+ data = tomoyo_read_token(param);
+ type = tomoyo_parse_ulong(&v, &data);
+ if (type == TOMOYO_VALUE_TYPE_INVALID)
+ return false;
+ ptr->values[0] = v;
+ ptr->value_type[0] = type;
+ if (!*data) {
+ ptr->values[1] = v;
+ ptr->value_type[1] = type;
+ return true;
+ }
+ if (*data++ != '-')
+ return false;
+ type = tomoyo_parse_ulong(&v, &data);
+ if (type == TOMOYO_VALUE_TYPE_INVALID || *data || ptr->values[0] > v)
+ return false;
+ ptr->values[1] = v;
+ ptr->value_type[1] = type;
+ return true;
+}
+
+/**
+ * tomoyo_byte_range - Check whether the string is a \ooo style octal value.
+ *
+ * @str: Pointer to the string.
+ *
+ * Returns true if @str is a \ooo style octal value, false otherwise.
+ *
+ * TOMOYO uses \ooo style representation for 0x01 - 0x20 and 0x7F - 0xFF.
+ * This function verifies that \ooo is in valid range.
+ */
+static inline bool tomoyo_byte_range(const char *str)
+{
+ return *str >= '0' && *str++ <= '3' &&
+ *str >= '0' && *str++ <= '7' &&
+ *str >= '0' && *str <= '7';
+}
+
+/**
+ * tomoyo_alphabet_char - Check whether the character is an alphabet.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is an alphabet character, false otherwise.
+ */
+static inline bool tomoyo_alphabet_char(const char c)
+{
+ return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+
+/**
+ * tomoyo_make_byte - Make byte value from three octal characters.
+ *
+ * @c1: The first character.
+ * @c2: The second character.
+ * @c3: The third character.
+ *
+ * Returns byte value.
+ */
+static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3)
+{
+ return ((c1 - '0') << 6) + ((c2 - '0') << 3) + (c3 - '0');
+}
+
+/**
+ * tomoyo_valid - Check whether the character is a valid char.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is a valid character, false otherwise.
+ */
+static inline bool tomoyo_valid(const unsigned char c)
+{
+ return c > ' ' && c < 127;
+}
+
+/**
+ * tomoyo_invalid - Check whether the character is an invalid char.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is an invalid character, false otherwise.
+ */
+static inline bool tomoyo_invalid(const unsigned char c)
+{
+ return c && (c <= ' ' || c >= 127);
+}
+
+/**
+ * tomoyo_str_starts - Check whether the given string starts with the given keyword.
+ *
+ * @src: Pointer to pointer to the string.
+ * @find: Pointer to the keyword.
+ *
+ * Returns true if @src starts with @find, false otherwise.
+ *
+ * The @src is updated to point the first character after the @find
+ * if @src starts with @find.
+ */
+bool tomoyo_str_starts(char **src, const char *find)
+{
+ const int len = strlen(find);
+ char *tmp = *src;
+
+ if (strncmp(tmp, find, len))
+ return false;
+ tmp += len;
+ *src = tmp;
+ return true;
+}
+
+/**
+ * tomoyo_normalize_line - Format string.
+ *
+ * @buffer: The line to normalize.
+ *
+ * Leading and trailing whitespaces are removed.
+ * Multiple whitespaces are packed into single space.
+ *
+ * Returns nothing.
+ */
+void tomoyo_normalize_line(unsigned char *buffer)
+{
+ unsigned char *sp = buffer;
+ unsigned char *dp = buffer;
+ bool first = true;
+
+ while (tomoyo_invalid(*sp))
+ sp++;
+ while (*sp) {
+ if (!first)
+ *dp++ = ' ';
+ first = false;
+ while (tomoyo_valid(*sp))
+ *dp++ = *sp++;
+ while (tomoyo_invalid(*sp))
+ sp++;
+ }
+ *dp = '\0';
+}
+
+/**
+ * tomoyo_correct_word2 - Validate a string.
+ *
+ * @string: The string to check. Maybe non-'\0'-terminated.
+ * @len: Length of @string.
+ *
+ * Check whether the given string follows the naming rules.
+ * Returns true if @string follows the naming rules, false otherwise.
+ */
+static bool tomoyo_correct_word2(const char *string, size_t len)
+{
+ u8 recursion = 20;
+ const char *const start = string;
+ bool in_repetition = false;
+
+ if (!len)
+ goto out;
+ while (len--) {
+ unsigned char c = *string++;
+
+ if (c == '\\') {
+ if (!len--)
+ goto out;
+ c = *string++;
+ if (c >= '0' && c <= '3') {
+ unsigned char d;
+ unsigned char e;
+
+ if (!len-- || !len--)
+ goto out;
+ d = *string++;
+ e = *string++;
+ if (d < '0' || d > '7' || e < '0' || e > '7')
+ goto out;
+ c = tomoyo_make_byte(c, d, e);
+ if (c <= ' ' || c >= 127)
+ continue;
+ goto out;
+ }
+ switch (c) {
+ case '\\': /* "\\" */
+ case '+': /* "\+" */
+ case '?': /* "\?" */
+ case 'x': /* "\x" */
+ case 'a': /* "\a" */
+ case '-': /* "\-" */
+ continue;
+ }
+ if (!recursion--)
+ goto out;
+ switch (c) {
+ case '*': /* "\*" */
+ case '@': /* "\@" */
+ case '$': /* "\$" */
+ case 'X': /* "\X" */
+ case 'A': /* "\A" */
+ continue;
+ case '{': /* "/\{" */
+ if (string - 3 < start || *(string - 3) != '/')
+ goto out;
+ in_repetition = true;
+ continue;
+ case '}': /* "\}/" */
+ if (*string != '/')
+ goto out;
+ if (!in_repetition)
+ goto out;
+ in_repetition = false;
+ continue;
+ }
+ goto out;
+ } else if (in_repetition && c == '/') {
+ goto out;
+ } else if (c <= ' ' || c >= 127) {
+ goto out;
+ }
+ }
+ if (in_repetition)
+ goto out;
+ return true;
+ out:
+ return false;
+}
+
+/**
+ * tomoyo_correct_word - Validate a string.
+ *
+ * @string: The string to check.
+ *
+ * Check whether the given string follows the naming rules.
+ * Returns true if @string follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_word(const char *string)
+{
+ return tomoyo_correct_word2(string, strlen(string));
+}
+
+/**
+ * tomoyo_correct_path2 - Check whether the given pathname follows the naming rules.
+ *
+ * @filename: The pathname to check.
+ * @len: Length of @filename.
+ *
+ * Returns true if @filename follows the naming rules, false otherwise.
+ */
+static bool tomoyo_correct_path2(const char *filename, const size_t len)
+{
+ const char *cp1 = memchr(filename, '/', len);
+ const char *cp2 = memchr(filename, '.', len);
+
+ return cp1 && (!cp2 || (cp1 < cp2)) && tomoyo_correct_word2(filename, len);
+}
+
+/**
+ * tomoyo_correct_path - Validate a pathname.
+ *
+ * @filename: The pathname to check.
+ *
+ * Check whether the given pathname follows the naming rules.
+ * Returns true if @filename follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_path(const char *filename)
+{
+ return tomoyo_correct_path2(filename, strlen(filename));
+}
+
+/**
+ * tomoyo_correct_domain - Check whether the given domainname follows the naming rules.
+ *
+ * @domainname: The domainname to check.
+ *
+ * Returns true if @domainname follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_domain(const unsigned char *domainname)
+{
+ if (!domainname || !tomoyo_domain_def(domainname))
+ return false;
+ domainname = strchr(domainname, ' ');
+ if (!domainname++)
+ return true;
+ while (1) {
+ const unsigned char *cp = strchr(domainname, ' ');
+
+ if (!cp)
+ break;
+ if (!tomoyo_correct_path2(domainname, cp - domainname))
+ return false;
+ domainname = cp + 1;
+ }
+ return tomoyo_correct_path(domainname);
+}
+
+/**
+ * tomoyo_domain_def - Check whether the given token can be a domainname.
+ *
+ * @buffer: The token to check.
+ *
+ * Returns true if @buffer possibly be a domainname, false otherwise.
+ */
+bool tomoyo_domain_def(const unsigned char *buffer)
+{
+ const unsigned char *cp;
+ int len;
+
+ if (*buffer != '<')
+ return false;
+ cp = strchr(buffer, ' ');
+ if (!cp)
+ len = strlen(buffer);
+ else
+ len = cp - buffer;
+ if (buffer[len - 1] != '>' ||
+ !tomoyo_correct_word2(buffer + 1, len - 2))
+ return false;
+ return true;
+}
+
+/**
+ * tomoyo_find_domain - Find a domain by the given name.
+ *
+ * @domainname: The domainname to find.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
+{
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_path_info name;
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ if (!domain->is_deleted &&
+ !tomoyo_pathcmp(&name, domain->domainname))
+ return domain;
+ }
+ return NULL;
+}
+
+/**
+ * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
+ *
+ * @filename: The string to evaluate.
+ *
+ * Returns the initial length without a pattern in @filename.
+ */
+static int tomoyo_const_part_length(const char *filename)
+{
+ char c;
+ int len = 0;
+
+ if (!filename)
+ return 0;
+ while ((c = *filename++) != '\0') {
+ if (c != '\\') {
+ len++;
+ continue;
+ }
+ c = *filename++;
+ switch (c) {
+ case '\\': /* "\\" */
+ len += 2;
+ continue;
+ case '0': /* "\ooo" */
+ case '1':
+ case '2':
+ case '3':
+ c = *filename++;
+ if (c < '0' || c > '7')
+ break;
+ c = *filename++;
+ if (c < '0' || c > '7')
+ break;
+ len += 4;
+ continue;
+ }
+ break;
+ }
+ return len;
+}
+
+/**
+ * tomoyo_fill_path_info - Fill in "struct tomoyo_path_info" members.
+ *
+ * @ptr: Pointer to "struct tomoyo_path_info" to fill in.
+ *
+ * The caller sets "struct tomoyo_path_info"->name.
+ */
+void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
+{
+ const char *name = ptr->name;
+ const int len = strlen(name);
+
+ ptr->const_len = tomoyo_const_part_length(name);
+ ptr->is_dir = len && (name[len - 1] == '/');
+ ptr->is_patterned = (ptr->const_len < len);
+ ptr->hash = full_name_hash(NULL, name, len);
+}
+
+/**
+ * tomoyo_file_matches_pattern2 - Pattern matching without '/' character and "\-" pattern.
+ *
+ * @filename: The start of string to check.
+ * @filename_end: The end of string to check.
+ * @pattern: The start of pattern to compare.
+ * @pattern_end: The end of pattern to compare.
+ *
+ * Returns true if @filename matches @pattern, false otherwise.
+ */
+static bool tomoyo_file_matches_pattern2(const char *filename,
+ const char *filename_end,
+ const char *pattern,
+ const char *pattern_end)
+{
+ while (filename < filename_end && pattern < pattern_end) {
+ char c;
+ int i;
+ int j;
+
+ if (*pattern != '\\') {
+ if (*filename++ != *pattern++)
+ return false;
+ continue;
+ }
+ c = *filename;
+ pattern++;
+ switch (*pattern) {
+ case '?':
+ if (c == '/') {
+ return false;
+ } else if (c == '\\') {
+ if (filename[1] == '\\')
+ filename++;
+ else if (tomoyo_byte_range(filename + 1))
+ filename += 3;
+ else
+ return false;
+ }
+ break;
+ case '\\':
+ if (c != '\\')
+ return false;
+ if (*++filename != '\\')
+ return false;
+ break;
+ case '+':
+ if (!isdigit(c))
+ return false;
+ break;
+ case 'x':
+ if (!isxdigit(c))
+ return false;
+ break;
+ case 'a':
+ if (!tomoyo_alphabet_char(c))
+ return false;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ if (c == '\\' && tomoyo_byte_range(filename + 1)
+ && strncmp(filename + 1, pattern, 3) == 0) {
+ filename += 3;
+ pattern += 2;
+ break;
+ }
+ return false; /* Not matched. */
+ case '*':
+ case '@':
+ for (i = 0; i <= filename_end - filename; i++) {
+ if (tomoyo_file_matches_pattern2(
+ filename + i, filename_end,
+ pattern + 1, pattern_end))
+ return true;
+ c = filename[i];
+ if (c == '.' && *pattern == '@')
+ break;
+ if (c != '\\')
+ continue;
+ if (filename[i + 1] == '\\')
+ i++;
+ else if (tomoyo_byte_range(filename + i + 1))
+ i += 3;
+ else
+ break; /* Bad pattern. */
+ }
+ return false; /* Not matched. */
+ default:
+ j = 0;
+ c = *pattern;
+ if (c == '$') {
+ while (isdigit(filename[j]))
+ j++;
+ } else if (c == 'X') {
+ while (isxdigit(filename[j]))
+ j++;
+ } else if (c == 'A') {
+ while (tomoyo_alphabet_char(filename[j]))
+ j++;
+ }
+ for (i = 1; i <= j; i++) {
+ if (tomoyo_file_matches_pattern2(
+ filename + i, filename_end,
+ pattern + 1, pattern_end))
+ return true;
+ }
+ return false; /* Not matched or bad pattern. */
+ }
+ filename++;
+ pattern++;
+ }
+ while (*pattern == '\\' &&
+ (*(pattern + 1) == '*' || *(pattern + 1) == '@'))
+ pattern += 2;
+ return filename == filename_end && pattern == pattern_end;
+}
+
+/**
+ * tomoyo_file_matches_pattern - Pattern matching without '/' character.
+ *
+ * @filename: The start of string to check.
+ * @filename_end: The end of string to check.
+ * @pattern: The start of pattern to compare.
+ * @pattern_end: The end of pattern to compare.
+ *
+ * Returns true if @filename matches @pattern, false otherwise.
+ */
+static bool tomoyo_file_matches_pattern(const char *filename,
+ const char *filename_end,
+ const char *pattern,
+ const char *pattern_end)
+{
+ const char *pattern_start = pattern;
+ bool first = true;
+ bool result;
+
+ while (pattern < pattern_end - 1) {
+ /* Split at "\-" pattern. */
+ if (*pattern++ != '\\' || *pattern++ != '-')
+ continue;
+ result = tomoyo_file_matches_pattern2(filename,
+ filename_end,
+ pattern_start,
+ pattern - 2);
+ if (first)
+ result = !result;
+ if (result)
+ return false;
+ first = false;
+ pattern_start = pattern;
+ }
+ result = tomoyo_file_matches_pattern2(filename, filename_end,
+ pattern_start, pattern_end);
+ return first ? result : !result;
+}
+
+/**
+ * tomoyo_path_matches_pattern2 - Do pathname pattern matching.
+ *
+ * @f: The start of string to check.
+ * @p: The start of pattern to compare.
+ *
+ * Returns true if @f matches @p, false otherwise.
+ */
+static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
+{
+ const char *f_delimiter;
+ const char *p_delimiter;
+
+ while (*f && *p) {
+ f_delimiter = strchr(f, '/');
+ if (!f_delimiter)
+ f_delimiter = f + strlen(f);
+ p_delimiter = strchr(p, '/');
+ if (!p_delimiter)
+ p_delimiter = p + strlen(p);
+ if (*p == '\\' && *(p + 1) == '{')
+ goto recursive;
+ if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
+ p_delimiter))
+ return false;
+ f = f_delimiter;
+ if (*f)
+ f++;
+ p = p_delimiter;
+ if (*p)
+ p++;
+ }
+ /* Ignore trailing "\*" and "\@" in @pattern. */
+ while (*p == '\\' &&
+ (*(p + 1) == '*' || *(p + 1) == '@'))
+ p += 2;
+ return !*f && !*p;
+ recursive:
+ /*
+ * The "\{" pattern is permitted only after '/' character.
+ * This guarantees that below "*(p - 1)" is safe.
+ * Also, the "\}" pattern is permitted only before '/' character
+ * so that "\{" + "\}" pair will not break the "\-" operator.
+ */
+ if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
+ *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
+ return false; /* Bad pattern. */
+ do {
+ /* Compare current component with pattern. */
+ if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
+ p_delimiter - 2))
+ break;
+ /* Proceed to next component. */
+ f = f_delimiter;
+ if (!*f)
+ break;
+ f++;
+ /* Continue comparison. */
+ if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
+ return true;
+ f_delimiter = strchr(f, '/');
+ } while (f_delimiter);
+ return false; /* Not matched. */
+}
+
+/**
+ * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
+ *
+ * @filename: The filename to check.
+ * @pattern: The pattern to compare.
+ *
+ * Returns true if matches, false otherwise.
+ *
+ * The following patterns are available.
+ * \\ \ itself.
+ * \ooo Octal representation of a byte.
+ * \* Zero or more repetitions of characters other than '/'.
+ * \@ Zero or more repetitions of characters other than '/' or '.'.
+ * \? 1 byte character other than '/'.
+ * \$ One or more repetitions of decimal digits.
+ * \+ 1 decimal digit.
+ * \X One or more repetitions of hexadecimal digits.
+ * \x 1 hexadecimal digit.
+ * \A One or more repetitions of alphabet characters.
+ * \a 1 alphabet character.
+ *
+ * \- Subtraction operator.
+ *
+ * /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
+ * /dir/dir/dir/ ).
+ */
+bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
+ const struct tomoyo_path_info *pattern)
+{
+ const char *f = filename->name;
+ const char *p = pattern->name;
+ const int len = pattern->const_len;
+
+ /* If @pattern doesn't contain pattern, I can use strcmp(). */
+ if (!pattern->is_patterned)
+ return !tomoyo_pathcmp(filename, pattern);
+ /* Don't compare directory and non-directory. */
+ if (filename->is_dir != pattern->is_dir)
+ return false;
+ /* Compare the initial length without patterns. */
+ if (strncmp(f, p, len))
+ return false;
+ f += len;
+ p += len;
+ return tomoyo_path_matches_pattern2(f, p);
+}
+
+/**
+ * tomoyo_get_exe - Get tomoyo_realpath() of current process.
+ *
+ * Returns the tomoyo_realpath() of current process on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so the caller must call kfree()
+ * if this function didn't return NULL.
+ */
+const char *tomoyo_get_exe(void)
+{
+ struct file *exe_file;
+ const char *cp;
+ struct mm_struct *mm = current->mm;
+
+ if (!mm)
+ return NULL;
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ return NULL;
+
+ cp = tomoyo_realpath_from_path(&exe_file->f_path);
+ fput(exe_file);
+ return cp;
+}
+
+/**
+ * tomoyo_get_mode - Get MAC mode.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number.
+ * @index: Index number of functionality.
+ *
+ * Returns mode.
+ */
+int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
+ const u8 index)
+{
+ u8 mode;
+ struct tomoyo_profile *p;
+
+ if (!tomoyo_policy_loaded)
+ return TOMOYO_CONFIG_DISABLED;
+ p = tomoyo_profile(ns, profile);
+ mode = p->config[index];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->config[tomoyo_index2category[index]
+ + TOMOYO_MAX_MAC_INDEX];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->default_config;
+ return mode & 3;
+}
+
+/**
+ * tomoyo_init_request_info - Initialize "struct tomoyo_request_info" members.
+ *
+ * @r: Pointer to "struct tomoyo_request_info" to initialize.
+ * @domain: Pointer to "struct tomoyo_domain_info". NULL for tomoyo_domain().
+ * @index: Index number of functionality.
+ *
+ * Returns mode.
+ */
+int tomoyo_init_request_info(struct tomoyo_request_info *r,
+ struct tomoyo_domain_info *domain, const u8 index)
+{
+ u8 profile;
+
+ memset(r, 0, sizeof(*r));
+ if (!domain)
+ domain = tomoyo_domain();
+ r->domain = domain;
+ profile = domain->profile;
+ r->profile = profile;
+ r->type = index;
+ r->mode = tomoyo_get_mode(domain->ns, profile, index);
+ return r->mode;
+}
+
+/**
+ * tomoyo_domain_quota_is_ok - Check for domain's quota.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns true if the domain is not exceeded quota, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
+{
+ unsigned int count = 0;
+ struct tomoyo_domain_info *domain = r->domain;
+ struct tomoyo_acl_info *ptr;
+
+ if (r->mode != TOMOYO_CONFIG_LEARNING)
+ return false;
+ if (!domain)
+ return true;
+ if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
+ return false;
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
+ u16 perm;
+
+ if (ptr->is_deleted)
+ continue;
+ /*
+ * Reading perm bitmap might race with tomoyo_merge_*() because
+ * caller does not hold tomoyo_policy_lock mutex. But exceeding
+ * max_learning_entry parameter by a few entries does not harm.
+ */
+ switch (ptr->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
+ break;
+ case TOMOYO_TYPE_PATH_NUMBER_ACL:
+ perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
+ ->perm);
+ break;
+ case TOMOYO_TYPE_MKDEV_ACL:
+ perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ perm = 0;
+ break;
+ default:
+ perm = 1;
+ }
+ count += hweight16(perm);
+ }
+ if (count < tomoyo_profile(domain->ns, domain->profile)->
+ pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
+ return true;
+ WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
+ /* r->granted = false; */
+ tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+#ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
+ pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
+ domain->domainname->name);
+#endif
+ return false;
+}
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
new file mode 100644
index 000000000..a81030412
--- /dev/null
+++ b/security/yama/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SECURITY_YAMA
+ bool "Yama support"
+ depends on SECURITY
+ default n
+ help
+ This selects Yama, which extends DAC support with additional
+ system-wide security settings beyond regular Linux discretionary
+ access controls. Currently available is ptrace scope restriction.
+ Like capabilities, this security module stacks with other LSMs.
+ Further information can be found in
+ Documentation/admin-guide/LSM/Yama.rst.
+
+ If you are unsure how to answer this question, answer N.
diff --git a/security/yama/Makefile b/security/yama/Makefile
new file mode 100644
index 000000000..0fa5d0fe2
--- /dev/null
+++ b/security/yama/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SECURITY_YAMA) := yama.o
+
+yama-y := yama_lsm.o
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
new file mode 100644
index 000000000..06e226166
--- /dev/null
+++ b/security/yama/yama_lsm.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Yama Linux Security Module
+ *
+ * Author: Kees Cook <keescook@chromium.org>
+ *
+ * Copyright (C) 2010 Canonical, Ltd.
+ * Copyright (C) 2011 The Chromium OS Authors.
+ */
+
+#include <linux/lsm_hooks.h>
+#include <linux/sysctl.h>
+#include <linux/ptrace.h>
+#include <linux/prctl.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/string_helpers.h>
+#include <linux/task_work.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#define YAMA_SCOPE_DISABLED 0
+#define YAMA_SCOPE_RELATIONAL 1
+#define YAMA_SCOPE_CAPABILITY 2
+#define YAMA_SCOPE_NO_ATTACH 3
+
+static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
+
+/* describe a ptrace relationship for potential exception */
+struct ptrace_relation {
+ struct task_struct *tracer;
+ struct task_struct *tracee;
+ bool invalid;
+ struct list_head node;
+ struct rcu_head rcu;
+};
+
+static LIST_HEAD(ptracer_relations);
+static DEFINE_SPINLOCK(ptracer_relations_lock);
+
+static void yama_relation_cleanup(struct work_struct *work);
+static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
+
+struct access_report_info {
+ struct callback_head work;
+ const char *access;
+ struct task_struct *target;
+ struct task_struct *agent;
+};
+
+static void __report_access(struct callback_head *work)
+{
+ struct access_report_info *info =
+ container_of(work, struct access_report_info, work);
+ char *target_cmd, *agent_cmd;
+
+ target_cmd = kstrdup_quotable_cmdline(info->target, GFP_KERNEL);
+ agent_cmd = kstrdup_quotable_cmdline(info->agent, GFP_KERNEL);
+
+ pr_notice_ratelimited(
+ "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
+ info->access, target_cmd, info->target->pid, agent_cmd,
+ info->agent->pid);
+
+ kfree(agent_cmd);
+ kfree(target_cmd);
+
+ put_task_struct(info->agent);
+ put_task_struct(info->target);
+ kfree(info);
+}
+
+/* defers execution because cmdline access can sleep */
+static void report_access(const char *access, struct task_struct *target,
+ struct task_struct *agent)
+{
+ struct access_report_info *info;
+ char agent_comm[sizeof(agent->comm)];
+
+ assert_spin_locked(&target->alloc_lock); /* for target->comm */
+
+ if (current->flags & PF_KTHREAD) {
+ /* I don't think kthreads call task_work_run() before exiting.
+ * Imagine angry ranting about procfs here.
+ */
+ pr_notice_ratelimited(
+ "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
+ access, target->comm, target->pid,
+ get_task_comm(agent_comm, agent), agent->pid);
+ return;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_ATOMIC);
+ if (!info)
+ return;
+ init_task_work(&info->work, __report_access);
+ get_task_struct(target);
+ get_task_struct(agent);
+ info->access = access;
+ info->target = target;
+ info->agent = agent;
+ if (task_work_add(current, &info->work, TWA_RESUME) == 0)
+ return; /* success */
+
+ WARN(1, "report_access called from exiting task");
+ put_task_struct(target);
+ put_task_struct(agent);
+ kfree(info);
+}
+
+/**
+ * yama_relation_cleanup - remove invalid entries from the relation list
+ *
+ */
+static void yama_relation_cleanup(struct work_struct *work)
+{
+ struct ptrace_relation *relation;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid) {
+ list_del_rcu(&relation->node);
+ kfree_rcu(relation, rcu);
+ }
+ }
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+}
+
+/**
+ * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
+ * @tracer: the task_struct of the process doing the ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Each tracee can have, at most, one tracer registered. Each time this
+ * is called, the prior registered tracer will be replaced for the tracee.
+ *
+ * Returns 0 if relationship was added, -ve on error.
+ */
+static int yama_ptracer_add(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ struct ptrace_relation *relation, *added;
+
+ added = kmalloc(sizeof(*added), GFP_KERNEL);
+ if (!added)
+ return -ENOMEM;
+
+ added->tracee = tracee;
+ added->tracer = tracer;
+ added->invalid = false;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ list_replace_rcu(&relation->node, &added->node);
+ kfree_rcu(relation, rcu);
+ goto out;
+ }
+ }
+
+ list_add_rcu(&added->node, &ptracer_relations);
+
+out:
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+ return 0;
+}
+
+/**
+ * yama_ptracer_del - remove exceptions related to the given tasks
+ * @tracer: remove any relation where tracer task matches
+ * @tracee: remove any relation where tracee task matches
+ */
+static void yama_ptracer_del(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ struct ptrace_relation *relation;
+ bool marked = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee ||
+ (tracer && relation->tracer == tracer)) {
+ relation->invalid = true;
+ marked = true;
+ }
+ }
+ rcu_read_unlock();
+
+ if (marked)
+ schedule_work(&yama_relation_work);
+}
+
+/**
+ * yama_task_free - check for task_pid to remove from exception list
+ * @task: task being removed
+ */
+static void yama_task_free(struct task_struct *task)
+{
+ yama_ptracer_del(task, task);
+}
+
+/**
+ * yama_task_prctl - check for Yama-specific prctl operations
+ * @option: operation
+ * @arg2: argument
+ * @arg3: argument
+ * @arg4: argument
+ * @arg5: argument
+ *
+ * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
+ * does not handle the given option.
+ */
+static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+{
+ int rc = -ENOSYS;
+ struct task_struct *myself = current;
+
+ switch (option) {
+ case PR_SET_PTRACER:
+ /* Since a thread can call prctl(), find the group leader
+ * before calling _add() or _del() on it, since we want
+ * process-level granularity of control. The tracer group
+ * leader checking is handled later when walking the ancestry
+ * at the time of PTRACE_ATTACH check.
+ */
+ rcu_read_lock();
+ if (!thread_group_leader(myself))
+ myself = rcu_dereference(myself->group_leader);
+ get_task_struct(myself);
+ rcu_read_unlock();
+
+ if (arg2 == 0) {
+ yama_ptracer_del(NULL, myself);
+ rc = 0;
+ } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
+ rc = yama_ptracer_add(NULL, myself);
+ } else {
+ struct task_struct *tracer;
+
+ tracer = find_get_task_by_vpid(arg2);
+ if (!tracer) {
+ rc = -EINVAL;
+ } else {
+ rc = yama_ptracer_add(tracer, myself);
+ put_task_struct(tracer);
+ }
+ }
+
+ put_task_struct(myself);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * task_is_descendant - walk up a process family tree looking for a match
+ * @parent: the process to compare against while walking up from child
+ * @child: the process to start from while looking upwards for parent
+ *
+ * Returns 1 if child is a descendant of parent, 0 if not.
+ */
+static int task_is_descendant(struct task_struct *parent,
+ struct task_struct *child)
+{
+ int rc = 0;
+ struct task_struct *walker = child;
+
+ if (!parent || !child)
+ return 0;
+
+ rcu_read_lock();
+ if (!thread_group_leader(parent))
+ parent = rcu_dereference(parent->group_leader);
+ while (walker->pid > 0) {
+ if (!thread_group_leader(walker))
+ walker = rcu_dereference(walker->group_leader);
+ if (walker == parent) {
+ rc = 1;
+ break;
+ }
+ walker = rcu_dereference(walker->real_parent);
+ }
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * ptracer_exception_found - tracer registered as exception for this tracee
+ * @tracer: the task_struct of the process attempting ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Returns 1 if tracer has a ptracer exception ancestor for tracee.
+ */
+static int ptracer_exception_found(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ int rc = 0;
+ struct ptrace_relation *relation;
+ struct task_struct *parent = NULL;
+ bool found = false;
+
+ rcu_read_lock();
+
+ /*
+ * If there's already an active tracing relationship, then make an
+ * exception for the sake of other accesses, like process_vm_rw().
+ */
+ parent = ptrace_parent(tracee);
+ if (parent != NULL && same_thread_group(parent, tracer)) {
+ rc = 1;
+ goto unlock;
+ }
+
+ /* Look for a PR_SET_PTRACER relationship. */
+ if (!thread_group_leader(tracee))
+ tracee = rcu_dereference(tracee->group_leader);
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ parent = relation->tracer;
+ found = true;
+ break;
+ }
+ }
+
+ if (found && (parent == NULL || task_is_descendant(parent, tracer)))
+ rc = 1;
+
+unlock:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_access_check - validate PTRACE_ATTACH calls
+ * @child: task that current task is attempting to ptrace
+ * @mode: ptrace attach mode
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+static int yama_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ int rc = 0;
+
+ /* require ptrace target be a child of ptracer on attach */
+ if (mode & PTRACE_MODE_ATTACH) {
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_DISABLED:
+ /* No additional restrictions. */
+ break;
+ case YAMA_SCOPE_RELATIONAL:
+ rcu_read_lock();
+ if (!pid_alive(child))
+ rc = -EPERM;
+ if (!rc && !task_is_descendant(current, child) &&
+ !ptracer_exception_found(current, child) &&
+ !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;
+ rcu_read_unlock();
+ break;
+ case YAMA_SCOPE_CAPABILITY:
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;
+ rcu_read_unlock();
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ default:
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0)
+ report_access("attach", child, current);
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+static int yama_ptrace_traceme(struct task_struct *parent)
+{
+ int rc = 0;
+
+ /* Only disallow PTRACE_TRACEME on more aggressive settings. */
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_CAPABILITY:
+ if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
+ rc = -EPERM;
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ rc = -EPERM;
+ break;
+ }
+
+ if (rc) {
+ task_lock(current);
+ report_access("traceme", current, parent);
+ task_unlock(current);
+ }
+
+ return rc;
+}
+
+static struct security_hook_list yama_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme),
+ LSM_HOOK_INIT(task_prctl, yama_task_prctl),
+ LSM_HOOK_INIT(task_free, yama_task_free),
+};
+
+#ifdef CONFIG_SYSCTL
+static int yama_dointvec_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table table_copy;
+
+ if (write && !capable(CAP_SYS_PTRACE))
+ return -EPERM;
+
+ /* Lock the max value if it ever gets set. */
+ table_copy = *table;
+ if (*(int *)table_copy.data == *(int *)table_copy.extra2)
+ table_copy.extra1 = table_copy.extra2;
+
+ return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
+}
+
+static int max_scope = YAMA_SCOPE_NO_ATTACH;
+
+static struct ctl_path yama_sysctl_path[] = {
+ { .procname = "kernel", },
+ { .procname = "yama", },
+ { }
+};
+
+static struct ctl_table yama_sysctl_table[] = {
+ {
+ .procname = "ptrace_scope",
+ .data = &ptrace_scope,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = yama_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &max_scope,
+ },
+ { }
+};
+static void __init yama_init_sysctl(void)
+{
+ if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
+ panic("Yama: sysctl registration failed.\n");
+}
+#else
+static inline void yama_init_sysctl(void) { }
+#endif /* CONFIG_SYSCTL */
+
+static int __init yama_init(void)
+{
+ pr_info("Yama: becoming mindful.\n");
+ security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), "yama");
+ yama_init_sysctl();
+ return 0;
+}
+
+DEFINE_LSM(yama) = {
+ .name = "yama",
+ .init = yama_init,
+};