diff options
Diffstat (limited to '')
-rw-r--r-- | kernel/module/Kconfig | 392 | ||||
-rw-r--r-- | kernel/module/Makefile | 25 | ||||
-rw-r--r-- | kernel/module/debug_kmemleak.c | 30 | ||||
-rw-r--r-- | kernel/module/decompress.c | 368 | ||||
-rw-r--r-- | kernel/module/dups.c | 248 | ||||
-rw-r--r-- | kernel/module/internal.h | 406 | ||||
-rw-r--r-- | kernel/module/kallsyms.c | 521 | ||||
-rw-r--r-- | kernel/module/kdb.c | 63 | ||||
-rw-r--r-- | kernel/module/kmod.c | 180 | ||||
-rw-r--r-- | kernel/module/livepatch.c | 74 | ||||
-rw-r--r-- | kernel/module/main.c | 3366 | ||||
-rw-r--r-- | kernel/module/procfs.c | 152 | ||||
-rw-r--r-- | kernel/module/signing.c | 125 | ||||
-rw-r--r-- | kernel/module/stats.c | 432 | ||||
-rw-r--r-- | kernel/module/strict_rwx.c | 80 | ||||
-rw-r--r-- | kernel/module/sysfs.c | 436 | ||||
-rw-r--r-- | kernel/module/tracking.c | 129 | ||||
-rw-r--r-- | kernel/module/tree_lookup.c | 112 | ||||
-rw-r--r-- | kernel/module/version.c | 101 | ||||
-rw-r--r-- | kernel/module_signature.c | 46 |
20 files changed, 7286 insertions, 0 deletions
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig new file mode 100644 index 0000000000..33a2e991f6 --- /dev/null +++ b/kernel/module/Kconfig @@ -0,0 +1,392 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig MODULES + bool "Enable loadable module support" + modules + help + Kernel modules are small pieces of compiled code which can + be inserted in the running kernel, rather than being + permanently built into the kernel. You use the "modprobe" + tool to add (and sometimes remove) them. If you say Y here, + many parts of the kernel can be built as modules (by + answering M instead of Y where indicated): this is most + useful for infrequently used options which are not required + for booting. For more information, see the man pages for + modprobe, lsmod, modinfo, insmod and rmmod. + + If you say Y here, you will need to run "make + modules_install" to put the modules under /lib/modules/ + where modprobe can find them (you may need to be root to do + this). + + If unsure, say Y. + +if MODULES + +config MODULE_DEBUGFS + bool + +config MODULE_DEBUG + bool "Module debugging" + depends on DEBUG_FS + help + Allows you to enable / disable features which can help you debug + modules. You don't need these options on production systems. + +if MODULE_DEBUG + +config MODULE_STATS + bool "Module statistics" + depends on DEBUG_FS + select MODULE_DEBUGFS + help + This option allows you to maintain a record of module statistics. + For example, size of all modules, average size, text size, a list + of failed modules and the size for each of those. For failed + modules we keep track of modules which failed due to either the + existing module taking too long to load or that module was already + loaded. + + You should enable this if you are debugging production loads + and want to see if userspace or the kernel is doing stupid things + with loading modules when it shouldn't or if you want to help + optimize userspace / kernel space module autoloading schemes. + You might want to do this because failed modules tend to use + up significant amount of memory, and so you'd be doing everyone a + favor in avoiding these failures proactively. + + This functionality is also useful for those experimenting with + module .text ELF section optimization. + + If unsure, say N. + +config MODULE_DEBUG_AUTOLOAD_DUPS + bool "Debug duplicate modules with auto-loading" + help + Module autoloading allows in-kernel code to request modules through + the *request_module*() API calls. This in turn just calls userspace + modprobe. Although modprobe checks to see if a module is already + loaded before trying to load a module there is a small time window in + which multiple duplicate requests can end up in userspace and multiple + modprobe calls race calling finit_module() around the same time for + duplicate modules. The finit_module() system call can consume in the + worst case more than twice the respective module size in virtual + memory for each duplicate module requests. Although duplicate module + requests are non-fatal virtual memory is a limited resource and each + duplicate module request ends up just unnecessarily straining virtual + memory. + + This debugging facility will create pr_warn() splats for duplicate + module requests to help identify if module auto-loading may be the + culprit to your early boot virtual memory pressure. Since virtual + memory abuse caused by duplicate module requests could render a + system unusable this functionality will also converge races in + requests for the same module to a single request. You can boot with + the module.enable_dups_trace=1 kernel parameter to use WARN_ON() + instead of the pr_warn(). + + If the first module request used request_module_nowait() we cannot + use that as the anchor to wait for duplicate module requests, since + users of request_module() do want a proper return value. If a call + for the same module happened earlier with request_module() though, + then a duplicate request_module_nowait() would be detected. The + non-wait request_module() call is synchronous and waits until modprobe + completes. Subsequent auto-loading requests for the same module do + not trigger a new finit_module() calls and do not strain virtual + memory, and so as soon as modprobe successfully completes we remove + tracking for duplicates for that module. + + Enable this functionality to try to debug virtual memory abuse during + boot on systems which are failing to boot or if you suspect you may be + straining virtual memory during boot, and you want to identify if the + abuse was due to module auto-loading. These issues are currently only + known to occur on systems with many CPUs (over 400) and is likely the + result of udev issuing duplicate module requests for each CPU, and so + module auto-loading is not the culprit. There may very well still be + many duplicate module auto-loading requests which could be optimized + for and this debugging facility can be used to help identify them. + + Only enable this for debugging system functionality, never have it + enabled on real systems. + +config MODULE_DEBUG_AUTOLOAD_DUPS_TRACE + bool "Force full stack trace when duplicates are found" + depends on MODULE_DEBUG_AUTOLOAD_DUPS + help + Enabling this will force a full stack trace for duplicate module + auto-loading requests using WARN_ON() instead of pr_warn(). You + should keep this disabled at all times unless you are a developer + and are doing a manual inspection and want to debug exactly why + these duplicates occur. + +endif # MODULE_DEBUG + +config MODULE_FORCE_LOAD + bool "Forced module loading" + default n + help + Allow loading of modules without version information (ie. modprobe + --force). Forced module loading sets the 'F' (forced) taint flag and + is usually a really bad idea. + +config MODULE_UNLOAD + bool "Module unloading" + help + Without this option you will not be able to unload any + modules (note that some modules may not be unloadable + anyway), which makes your kernel smaller, faster + and simpler. If unsure, say Y. + +config MODULE_FORCE_UNLOAD + bool "Forced module unloading" + depends on MODULE_UNLOAD + help + This option allows you to force a module to unload, even if the + kernel believes it is unsafe: the kernel will remove the module + without waiting for anyone to stop using it (using the -f option to + rmmod). This is mainly for kernel developers and desperate users. + If unsure, say N. + +config MODULE_UNLOAD_TAINT_TRACKING + bool "Tainted module unload tracking" + depends on MODULE_UNLOAD + select MODULE_DEBUGFS + help + This option allows you to maintain a record of each unloaded + module that tainted the kernel. In addition to displaying a + list of linked (or loaded) modules e.g. on detection of a bad + page (see bad_page()), the aforementioned details are also + shown. If unsure, say N. + +config MODVERSIONS + bool "Module versioning support" + help + Usually, you have to use modules compiled with your kernel. + Saying Y here makes it sometimes possible to use modules + compiled for different kernels, by adding enough information + to the modules to (hopefully) spot any changes which would + make them incompatible with the kernel you are running. If + unsure, say N. + +config ASM_MODVERSIONS + bool + default HAVE_ASM_MODVERSIONS && MODVERSIONS + help + This enables module versioning for exported symbols also from + assembly. This can be enabled only when the target architecture + supports it. + +config MODULE_SRCVERSION_ALL + bool "Source checksum for all modules" + help + Modules which contain a MODULE_VERSION get an extra "srcversion" + field inserted into their modinfo section, which contains a + sum of the source files which made it. This helps maintainers + see exactly which source was used to build a module (since + others sometimes change the module source without updating + the version). With this option, such a "srcversion" field + will be created for all modules. If unsure, say N. + +config MODULE_SIG + bool "Module signature verification" + select MODULE_SIG_FORMAT + help + Check modules for valid signatures upon load: the signature + is simply appended to the module. For more information see + <file:Documentation/admin-guide/module-signing.rst>. + + Note that this option adds the OpenSSL development packages as a + kernel build dependency so that the signing tool can use its crypto + library. + + You should enable this option if you wish to use either + CONFIG_SECURITY_LOCKDOWN_LSM or lockdown functionality imposed via + another LSM - otherwise unsigned modules will be loadable regardless + of the lockdown policy. + + !!!WARNING!!! If you enable this option, you MUST make sure that the + module DOES NOT get stripped after being signed. This includes the + debuginfo strip done by some packagers (such as rpmbuild) and + inclusion into an initramfs that wants the module size reduced. + +config MODULE_SIG_FORCE + bool "Require modules to be validly signed" + depends on MODULE_SIG + help + Reject unsigned modules or signed modules for which we don't have a + key. Without this, such modules will simply taint the kernel. + +config MODULE_SIG_ALL + bool "Automatically sign all modules" + default y + depends on MODULE_SIG || IMA_APPRAISE_MODSIG + help + Sign all modules during make modules_install. Without this option, + modules must be signed manually, using the scripts/sign-file tool. + +comment "Do not forget to sign required modules with scripts/sign-file" + depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL + +choice + prompt "Which hash algorithm should modules be signed with?" + depends on MODULE_SIG || IMA_APPRAISE_MODSIG + help + This determines which sort of hashing algorithm will be used during + signature generation. This algorithm _must_ be built into the kernel + directly so that signature verification can take place. It is not + possible to load a signed module containing the algorithm to check + the signature on that module. + +config MODULE_SIG_SHA1 + bool "Sign modules with SHA-1" + select CRYPTO_SHA1 + +config MODULE_SIG_SHA224 + bool "Sign modules with SHA-224" + select CRYPTO_SHA256 + +config MODULE_SIG_SHA256 + bool "Sign modules with SHA-256" + select CRYPTO_SHA256 + +config MODULE_SIG_SHA384 + bool "Sign modules with SHA-384" + select CRYPTO_SHA512 + +config MODULE_SIG_SHA512 + bool "Sign modules with SHA-512" + select CRYPTO_SHA512 + +endchoice + +config MODULE_SIG_HASH + string + depends on MODULE_SIG || IMA_APPRAISE_MODSIG + default "sha1" if MODULE_SIG_SHA1 + default "sha224" if MODULE_SIG_SHA224 + default "sha256" if MODULE_SIG_SHA256 + default "sha384" if MODULE_SIG_SHA384 + default "sha512" if MODULE_SIG_SHA512 + +choice + prompt "Module compression mode" + help + This option allows you to choose the algorithm which will be used to + compress modules when 'make modules_install' is run. (or, you can + choose to not compress modules at all.) + + External modules will also be compressed in the same way during the + installation. + + For modules inside an initrd or initramfs, it's more efficient to + compress the whole initrd or initramfs instead. + + This is fully compatible with signed modules. + + Please note that the tool used to load modules needs to support the + corresponding algorithm. module-init-tools MAY support gzip, and kmod + MAY support gzip, xz and zstd. + + Your build system needs to provide the appropriate compression tool + to compress the modules. + + If in doubt, select 'None'. + +config MODULE_COMPRESS_NONE + bool "None" + help + Do not compress modules. The installed modules are suffixed + with .ko. + +config MODULE_COMPRESS_GZIP + bool "GZIP" + help + Compress modules with GZIP. The installed modules are suffixed + with .ko.gz. + +config MODULE_COMPRESS_XZ + bool "XZ" + help + Compress modules with XZ. The installed modules are suffixed + with .ko.xz. + +config MODULE_COMPRESS_ZSTD + bool "ZSTD" + help + Compress modules with ZSTD. The installed modules are suffixed + with .ko.zst. + +endchoice + +config MODULE_DECOMPRESS + bool "Support in-kernel module decompression" + depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ || MODULE_COMPRESS_ZSTD + select ZLIB_INFLATE if MODULE_COMPRESS_GZIP + select XZ_DEC if MODULE_COMPRESS_XZ + select ZSTD_DECOMPRESS if MODULE_COMPRESS_ZSTD + help + + Support for decompressing kernel modules by the kernel itself + instead of relying on userspace to perform this task. Useful when + load pinning security policy is enabled. + + If unsure, say N. + +config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + bool "Allow loading of modules with missing namespace imports" + help + Symbols exported with EXPORT_SYMBOL_NS*() are considered exported in + a namespace. A module that makes use of a symbol exported with such a + namespace is required to import the namespace via MODULE_IMPORT_NS(). + There is no technical reason to enforce correct namespace imports, + but it creates consistency between symbols defining namespaces and + users importing namespaces they make use of. This option relaxes this + requirement and lifts the enforcement when loading a module. + + If unsure, say N. + +config MODPROBE_PATH + string "Path to modprobe binary" + default "/sbin/modprobe" + help + When kernel code requests a module, it does so by calling + the "modprobe" userspace utility. This option allows you to + set the path where that binary is found. This can be changed + at runtime via the sysctl file + /proc/sys/kernel/modprobe. Setting this to the empty string + removes the kernel's ability to request modules (but + userspace can still load modules explicitly). + +config TRIM_UNUSED_KSYMS + bool "Trim unused exported kernel symbols" if EXPERT + depends on !COMPILE_TEST + help + The kernel and some modules make many symbols available for + other modules to use via EXPORT_SYMBOL() and variants. Depending + on the set of modules being selected in your kernel configuration, + many of those exported symbols might never be used. + + This option allows for unused exported symbols to be dropped from + the build. In turn, this provides the compiler more opportunities + (especially when using LTO) for optimizing the code and reducing + binary size. This might have some security advantages as well. + + If unsure, or if you need to build out-of-tree modules, say N. + +config UNUSED_KSYMS_WHITELIST + string "Whitelist of symbols to keep in ksymtab" + depends on TRIM_UNUSED_KSYMS + help + By default, all unused exported symbols will be un-exported from the + build when TRIM_UNUSED_KSYMS is selected. + + UNUSED_KSYMS_WHITELIST allows to whitelist symbols that must be kept + exported at all times, even in absence of in-tree users. The value to + set here is the path to a text file containing the list of symbols, + one per line. The path can be absolute, or relative to the kernel + source tree. + +config MODULES_TREE_LOOKUP + def_bool y + depends on PERF_EVENTS || TRACING || CFI_CLANG + +endif # MODULES diff --git a/kernel/module/Makefile b/kernel/module/Makefile new file mode 100644 index 0000000000..a10b2b9a6f --- /dev/null +++ b/kernel/module/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for linux kernel module support +# + +# These are called from save_stack_trace() on slub debug path, +# and produce insane amounts of uninteresting coverage. +KCOV_INSTRUMENT_module.o := n + +obj-y += main.o +obj-y += strict_rwx.o +obj-y += kmod.o +obj-$(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS) += dups.o +obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o +obj-$(CONFIG_MODULE_SIG) += signing.o +obj-$(CONFIG_LIVEPATCH) += livepatch.o +obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o +obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o +obj-$(CONFIG_KALLSYMS) += kallsyms.o +obj-$(CONFIG_PROC_FS) += procfs.o +obj-$(CONFIG_SYSFS) += sysfs.o +obj-$(CONFIG_KGDB_KDB) += kdb.o +obj-$(CONFIG_MODVERSIONS) += version.o +obj-$(CONFIG_MODULE_UNLOAD_TAINT_TRACKING) += tracking.o +obj-$(CONFIG_MODULE_STATS) += stats.o diff --git a/kernel/module/debug_kmemleak.c b/kernel/module/debug_kmemleak.c new file mode 100644 index 0000000000..12a569d361 --- /dev/null +++ b/kernel/module/debug_kmemleak.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module kmemleak support + * + * Copyright (C) 2009 Catalin Marinas + */ + +#include <linux/module.h> +#include <linux/kmemleak.h> +#include "internal.h" + +void kmemleak_load_module(const struct module *mod, + const struct load_info *info) +{ + unsigned int i; + + /* only scan the sections containing data */ + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); + + for (i = 1; i < info->hdr->e_shnum; i++) { + /* Scan all writable sections that's not executable */ + if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || + !(info->sechdrs[i].sh_flags & SHF_WRITE) || + (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) + continue; + + kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, + info->sechdrs[i].sh_size, GFP_KERNEL); + } +} diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c new file mode 100644 index 0000000000..474e68f0f0 --- /dev/null +++ b/kernel/module/decompress.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2021 Google LLC. + */ + +#include <linux/init.h> +#include <linux/highmem.h> +#include <linux/kobject.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/sysfs.h> +#include <linux/vmalloc.h> + +#include "internal.h" + +static int module_extend_max_pages(struct load_info *info, unsigned int extent) +{ + struct page **new_pages; + + new_pages = kvmalloc_array(info->max_pages + extent, + sizeof(info->pages), GFP_KERNEL); + if (!new_pages) + return -ENOMEM; + + memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); + kvfree(info->pages); + info->pages = new_pages; + info->max_pages += extent; + + return 0; +} + +static struct page *module_get_next_page(struct load_info *info) +{ + struct page *page; + int error; + + if (info->max_pages == info->used_pages) { + error = module_extend_max_pages(info, info->used_pages); + if (error) + return ERR_PTR(error); + } + + page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + if (!page) + return ERR_PTR(-ENOMEM); + + info->pages[info->used_pages++] = page; + return page; +} + +#if defined(CONFIG_MODULE_COMPRESS_GZIP) +#include <linux/zlib.h> +#define MODULE_COMPRESSION gzip +#define MODULE_DECOMPRESS_FN module_gzip_decompress + +/* + * Calculate length of the header which consists of signature, header + * flags, time stamp and operating system ID (10 bytes total), plus + * an optional filename. + */ +static size_t module_gzip_header_len(const u8 *buf, size_t size) +{ + const u8 signature[] = { 0x1f, 0x8b, 0x08 }; + size_t len = 10; + + if (size < len || memcmp(buf, signature, sizeof(signature))) + return 0; + + if (buf[3] & 0x08) { + do { + /* + * If we can't find the end of the file name we must + * be dealing with a corrupted file. + */ + if (len == size) + return 0; + } while (buf[len++] != '\0'); + } + + return len; +} + +static ssize_t module_gzip_decompress(struct load_info *info, + const void *buf, size_t size) +{ + struct z_stream_s s = { 0 }; + size_t new_size = 0; + size_t gzip_hdr_len; + ssize_t retval; + int rc; + + gzip_hdr_len = module_gzip_header_len(buf, size); + if (!gzip_hdr_len) { + pr_err("not a gzip compressed module\n"); + return -EINVAL; + } + + s.next_in = buf + gzip_hdr_len; + s.avail_in = size - gzip_hdr_len; + + s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); + if (!s.workspace) + return -ENOMEM; + + rc = zlib_inflateInit2(&s, -MAX_WBITS); + if (rc != Z_OK) { + pr_err("failed to initialize decompressor: %d\n", rc); + retval = -EINVAL; + goto out; + } + + do { + struct page *page = module_get_next_page(info); + + if (IS_ERR(page)) { + retval = PTR_ERR(page); + goto out_inflate_end; + } + + s.next_out = kmap_local_page(page); + s.avail_out = PAGE_SIZE; + rc = zlib_inflate(&s, 0); + kunmap_local(s.next_out); + + new_size += PAGE_SIZE - s.avail_out; + } while (rc == Z_OK); + + if (rc != Z_STREAM_END) { + pr_err("decompression failed with status %d\n", rc); + retval = -EINVAL; + goto out_inflate_end; + } + + retval = new_size; + +out_inflate_end: + zlib_inflateEnd(&s); +out: + kvfree(s.workspace); + return retval; +} +#elif defined(CONFIG_MODULE_COMPRESS_XZ) +#include <linux/xz.h> +#define MODULE_COMPRESSION xz +#define MODULE_DECOMPRESS_FN module_xz_decompress + +static ssize_t module_xz_decompress(struct load_info *info, + const void *buf, size_t size) +{ + static const u8 signature[] = { 0xfd, '7', 'z', 'X', 'Z', 0 }; + struct xz_dec *xz_dec; + struct xz_buf xz_buf; + enum xz_ret xz_ret; + size_t new_size = 0; + ssize_t retval; + + if (size < sizeof(signature) || + memcmp(buf, signature, sizeof(signature))) { + pr_err("not an xz compressed module\n"); + return -EINVAL; + } + + xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1); + if (!xz_dec) + return -ENOMEM; + + xz_buf.in_size = size; + xz_buf.in = buf; + xz_buf.in_pos = 0; + + do { + struct page *page = module_get_next_page(info); + + if (IS_ERR(page)) { + retval = PTR_ERR(page); + goto out; + } + + xz_buf.out = kmap_local_page(page); + xz_buf.out_pos = 0; + xz_buf.out_size = PAGE_SIZE; + xz_ret = xz_dec_run(xz_dec, &xz_buf); + kunmap_local(xz_buf.out); + + new_size += xz_buf.out_pos; + } while (xz_buf.out_pos == PAGE_SIZE && xz_ret == XZ_OK); + + if (xz_ret != XZ_STREAM_END) { + pr_err("decompression failed with status %d\n", xz_ret); + retval = -EINVAL; + goto out; + } + + retval = new_size; + + out: + xz_dec_end(xz_dec); + return retval; +} +#elif defined(CONFIG_MODULE_COMPRESS_ZSTD) +#include <linux/zstd.h> +#define MODULE_COMPRESSION zstd +#define MODULE_DECOMPRESS_FN module_zstd_decompress + +static ssize_t module_zstd_decompress(struct load_info *info, + const void *buf, size_t size) +{ + static const u8 signature[] = { 0x28, 0xb5, 0x2f, 0xfd }; + ZSTD_outBuffer zstd_dec; + ZSTD_inBuffer zstd_buf; + zstd_frame_header header; + size_t wksp_size; + void *wksp = NULL; + ZSTD_DStream *dstream; + size_t ret; + size_t new_size = 0; + int retval; + + if (size < sizeof(signature) || + memcmp(buf, signature, sizeof(signature))) { + pr_err("not a zstd compressed module\n"); + return -EINVAL; + } + + zstd_buf.src = buf; + zstd_buf.pos = 0; + zstd_buf.size = size; + + ret = zstd_get_frame_header(&header, zstd_buf.src, zstd_buf.size); + if (ret != 0) { + pr_err("ZSTD-compressed data has an incomplete frame header\n"); + retval = -EINVAL; + goto out; + } + if (header.windowSize > (1 << ZSTD_WINDOWLOG_MAX)) { + pr_err("ZSTD-compressed data has too large a window size\n"); + retval = -EINVAL; + goto out; + } + + wksp_size = zstd_dstream_workspace_bound(header.windowSize); + wksp = kvmalloc(wksp_size, GFP_KERNEL); + if (!wksp) { + retval = -ENOMEM; + goto out; + } + + dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size); + if (!dstream) { + pr_err("Can't initialize ZSTD stream\n"); + retval = -ENOMEM; + goto out; + } + + do { + struct page *page = module_get_next_page(info); + + if (IS_ERR(page)) { + retval = PTR_ERR(page); + goto out; + } + + zstd_dec.dst = kmap_local_page(page); + zstd_dec.pos = 0; + zstd_dec.size = PAGE_SIZE; + + ret = zstd_decompress_stream(dstream, &zstd_dec, &zstd_buf); + kunmap_local(zstd_dec.dst); + retval = zstd_get_error_code(ret); + if (retval) + break; + + new_size += zstd_dec.pos; + } while (zstd_dec.pos == PAGE_SIZE && ret != 0); + + if (retval) { + pr_err("ZSTD-decompression failed with status %d\n", retval); + retval = -EINVAL; + goto out; + } + + retval = new_size; + + out: + kvfree(wksp); + return retval; +} +#else +#error "Unexpected configuration for CONFIG_MODULE_DECOMPRESS" +#endif + +int module_decompress(struct load_info *info, const void *buf, size_t size) +{ + unsigned int n_pages; + ssize_t data_size; + int error; + +#if defined(CONFIG_MODULE_STATS) + info->compressed_len = size; +#endif + + /* + * Start with number of pages twice as big as needed for + * compressed data. + */ + n_pages = DIV_ROUND_UP(size, PAGE_SIZE) * 2; + error = module_extend_max_pages(info, n_pages); + + data_size = MODULE_DECOMPRESS_FN(info, buf, size); + if (data_size < 0) { + error = data_size; + goto err; + } + + info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL); + if (!info->hdr) { + error = -ENOMEM; + goto err; + } + + info->len = data_size; + return 0; + +err: + module_decompress_cleanup(info); + return error; +} + +void module_decompress_cleanup(struct load_info *info) +{ + int i; + + if (info->hdr) + vunmap(info->hdr); + + for (i = 0; i < info->used_pages; i++) + __free_page(info->pages[i]); + + kvfree(info->pages); + + info->pages = NULL; + info->max_pages = info->used_pages = 0; +} + +#ifdef CONFIG_SYSFS +static ssize_t compression_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, __stringify(MODULE_COMPRESSION) "\n"); +} + +static struct kobj_attribute module_compression_attr = __ATTR_RO(compression); + +static int __init module_decompress_sysfs_init(void) +{ + int error; + + error = sysfs_create_file(&module_kset->kobj, + &module_compression_attr.attr); + if (error) + pr_warn("Failed to create 'compression' attribute"); + + return 0; +} +late_initcall(module_decompress_sysfs_init); +#endif diff --git a/kernel/module/dups.c b/kernel/module/dups.c new file mode 100644 index 0000000000..f3d7ea1e96 --- /dev/null +++ b/kernel/module/dups.c @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * kmod dups - the kernel module autoloader duplicate suppressor + * + * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> + */ + +#define pr_fmt(fmt) "module: " fmt + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/binfmts.h> +#include <linux/syscalls.h> +#include <linux/unistd.h> +#include <linux/kmod.h> +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/cred.h> +#include <linux/file.h> +#include <linux/fdtable.h> +#include <linux/workqueue.h> +#include <linux/security.h> +#include <linux/mount.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/resource.h> +#include <linux/notifier.h> +#include <linux/suspend.h> +#include <linux/rwsem.h> +#include <linux/ptrace.h> +#include <linux/async.h> +#include <linux/uaccess.h> + +#include "internal.h" + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "module." +static bool enable_dups_trace = IS_ENABLED(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS_TRACE); +module_param(enable_dups_trace, bool_enable_only, 0644); + +/* + * Protects dup_kmod_reqs list, adds / removals with RCU. + */ +static DEFINE_MUTEX(kmod_dup_mutex); +static LIST_HEAD(dup_kmod_reqs); + +struct kmod_dup_req { + struct list_head list; + char name[MODULE_NAME_LEN]; + struct completion first_req_done; + struct work_struct complete_work; + struct delayed_work delete_work; + int dup_ret; +}; + +static struct kmod_dup_req *kmod_dup_request_lookup(char *module_name) +{ + struct kmod_dup_req *kmod_req; + + list_for_each_entry_rcu(kmod_req, &dup_kmod_reqs, list, + lockdep_is_held(&kmod_dup_mutex)) { + if (strlen(kmod_req->name) == strlen(module_name) && + !memcmp(kmod_req->name, module_name, strlen(module_name))) { + return kmod_req; + } + } + + return NULL; +} + +static void kmod_dup_request_delete(struct work_struct *work) +{ + struct kmod_dup_req *kmod_req; + kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work); + + /* + * The typical situation is a module successully loaded. In that + * situation the module will be present already in userspace. If + * new requests come in after that, userspace will already know the + * module is loaded so will just return 0 right away. There is still + * a small chance right after we delete this entry new request_module() + * calls may happen after that, they can happen. These heuristics + * are to protect finit_module() abuse for auto-loading, if modules + * are still tryign to auto-load even if a module is already loaded, + * that's on them, and those inneficiencies should not be fixed by + * kmod. The inneficies there are a call to modprobe and modprobe + * just returning 0. + */ + mutex_lock(&kmod_dup_mutex); + list_del_rcu(&kmod_req->list); + synchronize_rcu(); + mutex_unlock(&kmod_dup_mutex); + kfree(kmod_req); +} + +static void kmod_dup_request_complete(struct work_struct *work) +{ + struct kmod_dup_req *kmod_req; + + kmod_req = container_of(work, struct kmod_dup_req, complete_work); + + /* + * This will ensure that the kernel will let all the waiters get + * informed its time to check the return value. It's time to + * go home. + */ + complete_all(&kmod_req->first_req_done); + + /* + * Now that we have allowed prior request_module() calls to go on + * with life, let's schedule deleting this entry. We don't have + * to do it right away, but we *eventually* want to do it so to not + * let this linger forever as this is just a boot optimization for + * possible abuses of vmalloc() incurred by finit_module() thrashing. + */ + queue_delayed_work(system_wq, &kmod_req->delete_work, 60 * HZ); +} + +bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret) +{ + struct kmod_dup_req *kmod_req, *new_kmod_req; + int ret; + + /* + * Pre-allocate the entry in case we have to use it later + * to avoid contention with the mutex. + */ + new_kmod_req = kzalloc(sizeof(*new_kmod_req), GFP_KERNEL); + if (!new_kmod_req) + return false; + + memcpy(new_kmod_req->name, module_name, strlen(module_name)); + INIT_WORK(&new_kmod_req->complete_work, kmod_dup_request_complete); + INIT_DELAYED_WORK(&new_kmod_req->delete_work, kmod_dup_request_delete); + init_completion(&new_kmod_req->first_req_done); + + mutex_lock(&kmod_dup_mutex); + + kmod_req = kmod_dup_request_lookup(module_name); + if (!kmod_req) { + /* + * If the first request that came through for a module + * was with request_module_nowait() we cannot wait for it + * and share its return value with other users which may + * have used request_module() and need a proper return value + * so just skip using them as an anchor. + * + * If a prior request to this one came through with + * request_module() though, then a request_module_nowait() + * would benefit from duplicate detection. + */ + if (!wait) { + kfree(new_kmod_req); + pr_debug("New request_module_nowait() for %s -- cannot track duplicates for this request\n", module_name); + mutex_unlock(&kmod_dup_mutex); + return false; + } + + /* + * There was no duplicate, just add the request so we can + * keep tab on duplicates later. + */ + pr_debug("New request_module() for %s\n", module_name); + list_add_rcu(&new_kmod_req->list, &dup_kmod_reqs); + mutex_unlock(&kmod_dup_mutex); + return false; + } + mutex_unlock(&kmod_dup_mutex); + + /* We are dealing with a duplicate request now */ + kfree(new_kmod_req); + + /* + * To fix these try to use try_then_request_module() instead as that + * will check if the component you are looking for is present or not. + * You could also just queue a single request to load the module once, + * instead of having each and everything you need try to request for + * the module. + * + * Duplicate request_module() calls can cause quite a bit of wasted + * vmalloc() space when racing with userspace. + */ + if (enable_dups_trace) + WARN(1, "module-autoload: duplicate request for module %s\n", module_name); + else + pr_warn("module-autoload: duplicate request for module %s\n", module_name); + + if (!wait) { + /* + * If request_module_nowait() was used then the user just + * wanted to issue the request and if another module request + * was already its way with the same name we don't care for + * the return value either. Let duplicate request_module_nowait() + * calls bail out right away. + */ + *dup_ret = 0; + return true; + } + + /* + * If a duplicate request_module() was used they *may* care for + * the return value, so we have no other option but to wait for + * the first caller to complete. If the first caller used + * the request_module_nowait() call, subsquent callers will + * deal with the comprmise of getting a successful call with this + * optimization enabled ... + */ + ret = wait_for_completion_state(&kmod_req->first_req_done, + TASK_UNINTERRUPTIBLE | TASK_KILLABLE); + if (ret) { + *dup_ret = ret; + return true; + } + + /* Now the duplicate request has the same exact return value as the first request */ + *dup_ret = kmod_req->dup_ret; + + return true; +} + +void kmod_dup_request_announce(char *module_name, int ret) +{ + struct kmod_dup_req *kmod_req; + + mutex_lock(&kmod_dup_mutex); + + kmod_req = kmod_dup_request_lookup(module_name); + if (!kmod_req) + goto out; + + kmod_req->dup_ret = ret; + + /* + * If we complete() here we may allow duplicate threads + * to continue before the first one that submitted the + * request. We're in no rush also, given that each and + * every bounce back to userspace is slow we avoid that + * with a slight delay here. So queueue up the completion + * and let duplicates suffer, just wait a tad bit longer. + * There is no rush. But we also don't want to hold the + * caller up forever or introduce any boot delays. + */ + queue_work(system_wq, &kmod_req->complete_work); + +out: + mutex_unlock(&kmod_dup_mutex); +} diff --git a/kernel/module/internal.h b/kernel/module/internal.h new file mode 100644 index 0000000000..c8b7b4dcf7 --- /dev/null +++ b/kernel/module/internal.h @@ -0,0 +1,406 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Module internals + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> + */ + +#include <linux/elf.h> +#include <linux/compiler.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/mm.h> + +#ifndef ARCH_SHF_SMALL +#define ARCH_SHF_SMALL 0 +#endif + +/* + * Use highest 4 bits of sh_entsize to store the mod_mem_type of this + * section. This leaves 28 bits for offset on 32-bit systems, which is + * about 256 MiB (WARN_ON_ONCE if we exceed that). + */ + +#define SH_ENTSIZE_TYPE_BITS 4 +#define SH_ENTSIZE_TYPE_SHIFT (BITS_PER_LONG - SH_ENTSIZE_TYPE_BITS) +#define SH_ENTSIZE_TYPE_MASK ((1UL << SH_ENTSIZE_TYPE_BITS) - 1) +#define SH_ENTSIZE_OFFSET_MASK ((1UL << (BITS_PER_LONG - SH_ENTSIZE_TYPE_BITS)) - 1) + +/* Maximum number of characters written by module_flags() */ +#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) + +struct kernel_symbol { +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + int value_offset; + int name_offset; + int namespace_offset; +#else + unsigned long value; + const char *name; + const char *namespace; +#endif +}; + +extern struct mutex module_mutex; +extern struct list_head modules; + +extern struct module_attribute *modinfo_attrs[]; +extern size_t modinfo_attrs_count; + +/* Provided by the linker */ +extern const struct kernel_symbol __start___ksymtab[]; +extern const struct kernel_symbol __stop___ksymtab[]; +extern const struct kernel_symbol __start___ksymtab_gpl[]; +extern const struct kernel_symbol __stop___ksymtab_gpl[]; +extern const s32 __start___kcrctab[]; +extern const s32 __start___kcrctab_gpl[]; + +struct load_info { + const char *name; + /* pointer to module in temporary copy, freed at end of load_module() */ + struct module *mod; + Elf_Ehdr *hdr; + unsigned long len; + Elf_Shdr *sechdrs; + char *secstrings, *strtab; + unsigned long symoffs, stroffs, init_typeoffs, core_typeoffs; + bool sig_ok; +#ifdef CONFIG_KALLSYMS + unsigned long mod_kallsyms_init_off; +#endif +#ifdef CONFIG_MODULE_DECOMPRESS +#ifdef CONFIG_MODULE_STATS + unsigned long compressed_len; +#endif + struct page **pages; + unsigned int max_pages; + unsigned int used_pages; +#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +}; + +enum mod_license { + NOT_GPL_ONLY, + GPL_ONLY, +}; + +struct find_symbol_arg { + /* Input */ + const char *name; + bool gplok; + bool warn; + + /* Output */ + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +int mod_verify_sig(const void *mod, struct load_info *info); +int try_to_force_load(struct module *mod, const char *reason); +bool find_symbol(struct find_symbol_arg *fsa); +struct module *find_module_all(const char *name, size_t len, bool even_unformed); +int cmp_name(const void *name, const void *sym); +long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, + Elf_Shdr *sechdr, unsigned int section); +char *module_flags(struct module *mod, char *buf, bool show_state); +size_t module_flags_taint(unsigned long taints, char *buf); + +char *module_next_tag_pair(char *string, unsigned long *secsize); + +#define for_each_modinfo_entry(entry, info, name) \ + for (entry = get_modinfo(info, name); entry; entry = get_next_modinfo(info, name, entry)) + +static inline void module_assert_mutex_or_preempt(void) +{ +#ifdef CONFIG_LOCKDEP + if (unlikely(!debug_locks)) + return; + + WARN_ON_ONCE(!rcu_read_lock_sched_held() && + !lockdep_is_held(&module_mutex)); +#endif +} + +static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + return (unsigned long)offset_to_ptr(&sym->value_offset); +#else + return sym->value; +#endif +} + +#ifdef CONFIG_LIVEPATCH +int copy_module_elf(struct module *mod, struct load_info *info); +void free_module_elf(struct module *mod); +#else /* !CONFIG_LIVEPATCH */ +static inline int copy_module_elf(struct module *mod, struct load_info *info) +{ + return 0; +} + +static inline void free_module_elf(struct module *mod) { } +#endif /* CONFIG_LIVEPATCH */ + +static inline bool set_livepatch_module(struct module *mod) +{ +#ifdef CONFIG_LIVEPATCH + mod->klp = true; + return true; +#else + return false; +#endif +} + +/** + * enum fail_dup_mod_reason - state at which a duplicate module was detected + * + * @FAIL_DUP_MOD_BECOMING: the module is read properly, passes all checks but + * we've determined that another module with the same name is already loaded + * or being processed on our &modules list. This happens on early_mod_check() + * right before layout_and_allocate(). The kernel would have already + * vmalloc()'d space for the entire module through finit_module(). If + * decompression was used two vmap() spaces were used. These failures can + * happen when userspace has not seen the module present on the kernel and + * tries to load the module multiple times at same time. + * @FAIL_DUP_MOD_LOAD: the module has been read properly, passes all validation + * checks and the kernel determines that the module was unique and because + * of this allocated yet another private kernel copy of the module space in + * layout_and_allocate() but after this determined in add_unformed_module() + * that another module with the same name is already loaded or being processed. + * These failures should be mitigated as much as possible and are indicative + * of really fast races in loading modules. Without module decompression + * they waste twice as much vmap space. With module decompression three + * times the module's size vmap space is wasted. + */ +enum fail_dup_mod_reason { + FAIL_DUP_MOD_BECOMING = 0, + FAIL_DUP_MOD_LOAD, +}; + +#ifdef CONFIG_MODULE_DEBUGFS +extern struct dentry *mod_debugfs_root; +#endif + +#ifdef CONFIG_MODULE_STATS + +#define mod_stat_add_long(count, var) atomic_long_add(count, var) +#define mod_stat_inc(name) atomic_inc(name) + +extern atomic_long_t total_mod_size; +extern atomic_long_t total_text_size; +extern atomic_long_t invalid_kread_bytes; +extern atomic_long_t invalid_decompress_bytes; + +extern atomic_t modcount; +extern atomic_t failed_kreads; +extern atomic_t failed_decompress; +struct mod_fail_load { + struct list_head list; + char name[MODULE_NAME_LEN]; + atomic_long_t count; + unsigned long dup_fail_mask; +}; + +int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason); +void mod_stat_bump_invalid(struct load_info *info, int flags); +void mod_stat_bump_becoming(struct load_info *info, int flags); + +#else + +#define mod_stat_add_long(name, var) +#define mod_stat_inc(name) + +static inline int try_add_failed_module(const char *name, + enum fail_dup_mod_reason reason) +{ + return 0; +} + +static inline void mod_stat_bump_invalid(struct load_info *info, int flags) +{ +} + +static inline void mod_stat_bump_becoming(struct load_info *info, int flags) +{ +} + +#endif /* CONFIG_MODULE_STATS */ + +#ifdef CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS +bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret); +void kmod_dup_request_announce(char *module_name, int ret); +#else +static inline bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret) +{ + return false; +} + +static inline void kmod_dup_request_announce(char *module_name, int ret) +{ +} +#endif + +#ifdef CONFIG_MODULE_UNLOAD_TAINT_TRACKING +struct mod_unload_taint { + struct list_head list; + char name[MODULE_NAME_LEN]; + unsigned long taints; + u64 count; +}; + +int try_add_tainted_module(struct module *mod); +void print_unloaded_tainted_modules(void); +#else /* !CONFIG_MODULE_UNLOAD_TAINT_TRACKING */ +static inline int try_add_tainted_module(struct module *mod) +{ + return 0; +} + +static inline void print_unloaded_tainted_modules(void) +{ +} +#endif /* CONFIG_MODULE_UNLOAD_TAINT_TRACKING */ + +#ifdef CONFIG_MODULE_DECOMPRESS +int module_decompress(struct load_info *info, const void *buf, size_t size); +void module_decompress_cleanup(struct load_info *info); +#else +static inline int module_decompress(struct load_info *info, + const void *buf, size_t size) +{ + return -EOPNOTSUPP; +} + +static inline void module_decompress_cleanup(struct load_info *info) +{ +} +#endif + +struct mod_tree_root { +#ifdef CONFIG_MODULES_TREE_LOOKUP + struct latch_tree_root root; +#endif + unsigned long addr_min; + unsigned long addr_max; +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + unsigned long data_addr_min; + unsigned long data_addr_max; +#endif +}; + +extern struct mod_tree_root mod_tree; + +#ifdef CONFIG_MODULES_TREE_LOOKUP +void mod_tree_insert(struct module *mod); +void mod_tree_remove_init(struct module *mod); +void mod_tree_remove(struct module *mod); +struct module *mod_find(unsigned long addr, struct mod_tree_root *tree); +#else /* !CONFIG_MODULES_TREE_LOOKUP */ + +static inline void mod_tree_insert(struct module *mod) { } +static inline void mod_tree_remove_init(struct module *mod) { } +static inline void mod_tree_remove(struct module *mod) { } +static inline struct module *mod_find(unsigned long addr, struct mod_tree_root *tree) +{ + struct module *mod; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + if (within_module(addr, mod)) + return mod; + } + + return NULL; +} +#endif /* CONFIG_MODULES_TREE_LOOKUP */ + +void module_enable_ro(const struct module *mod, bool after_init); +void module_enable_nx(const struct module *mod); +void module_enable_x(const struct module *mod); +int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod); + +#ifdef CONFIG_MODULE_SIG +int module_sig_check(struct load_info *info, int flags); +#else /* !CONFIG_MODULE_SIG */ +static inline int module_sig_check(struct load_info *info, int flags) +{ + return 0; +} +#endif /* !CONFIG_MODULE_SIG */ + +#ifdef CONFIG_DEBUG_KMEMLEAK +void kmemleak_load_module(const struct module *mod, const struct load_info *info); +#else /* !CONFIG_DEBUG_KMEMLEAK */ +static inline void kmemleak_load_module(const struct module *mod, + const struct load_info *info) { } +#endif /* CONFIG_DEBUG_KMEMLEAK */ + +#ifdef CONFIG_KALLSYMS +void init_build_id(struct module *mod, const struct load_info *info); +void layout_symtab(struct module *mod, struct load_info *info); +void add_kallsyms(struct module *mod, const struct load_info *info); + +static inline bool sect_empty(const Elf_Shdr *sect) +{ + return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; +} +#else /* !CONFIG_KALLSYMS */ +static inline void init_build_id(struct module *mod, const struct load_info *info) { } +static inline void layout_symtab(struct module *mod, struct load_info *info) { } +static inline void add_kallsyms(struct module *mod, const struct load_info *info) { } +#endif /* CONFIG_KALLSYMS */ + +#ifdef CONFIG_SYSFS +int mod_sysfs_setup(struct module *mod, const struct load_info *info, + struct kernel_param *kparam, unsigned int num_params); +void mod_sysfs_teardown(struct module *mod); +void init_param_lock(struct module *mod); +#else /* !CONFIG_SYSFS */ +static inline int mod_sysfs_setup(struct module *mod, + const struct load_info *info, + struct kernel_param *kparam, + unsigned int num_params) +{ + return 0; +} + +static inline void mod_sysfs_teardown(struct module *mod) { } +static inline void init_param_lock(struct module *mod) { } +#endif /* CONFIG_SYSFS */ + +#ifdef CONFIG_MODVERSIONS +int check_version(const struct load_info *info, + const char *symname, struct module *mod, const s32 *crc); +void module_layout(struct module *mod, struct modversion_info *ver, struct kernel_param *kp, + struct kernel_symbol *ks, struct tracepoint * const *tp); +int check_modstruct_version(const struct load_info *info, struct module *mod); +int same_magic(const char *amagic, const char *bmagic, bool has_crcs); +#else /* !CONFIG_MODVERSIONS */ +static inline int check_version(const struct load_info *info, + const char *symname, + struct module *mod, + const s32 *crc) +{ + return 1; +} + +static inline int check_modstruct_version(const struct load_info *info, + struct module *mod) +{ + return 1; +} + +static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) +{ + return strcmp(amagic, bmagic) == 0; +} +#endif /* CONFIG_MODVERSIONS */ diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c new file mode 100644 index 0000000000..ef73ae7c89 --- /dev/null +++ b/kernel/module/kallsyms.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module kallsyms support + * + * Copyright (C) 2010 Rusty Russell + */ + +#include <linux/module.h> +#include <linux/module_symbol.h> +#include <linux/kallsyms.h> +#include <linux/buildid.h> +#include <linux/bsearch.h> +#include "internal.h" + +/* Lookup exported symbol in given range of kernel_symbols */ +static const struct kernel_symbol *lookup_exported_symbol(const char *name, + const struct kernel_symbol *start, + const struct kernel_symbol *stop) +{ + return bsearch(name, start, stop - start, + sizeof(struct kernel_symbol), cmp_name); +} + +static int is_exported(const char *name, unsigned long value, + const struct module *mod) +{ + const struct kernel_symbol *ks; + + if (!mod) + ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); + else + ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); + + return ks && kernel_symbol_value(ks) == value; +} + +/* As per nm */ +static char elf_type(const Elf_Sym *sym, const struct load_info *info) +{ + const Elf_Shdr *sechdrs = info->sechdrs; + + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { + if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) + return 'v'; + else + return 'w'; + } + if (sym->st_shndx == SHN_UNDEF) + return 'U'; + if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) + return 'a'; + if (sym->st_shndx >= SHN_LORESERVE) + return '?'; + if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) + return 't'; + if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC && + sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { + if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) + return 'r'; + else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) + return 'g'; + else + return 'd'; + } + if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { + if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) + return 's'; + else + return 'b'; + } + if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, + ".debug")) { + return 'n'; + } + return '?'; +} + +static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, + unsigned int shnum, unsigned int pcpundx) +{ + const Elf_Shdr *sec; + enum mod_mem_type type; + + if (src->st_shndx == SHN_UNDEF || + src->st_shndx >= shnum || + !src->st_name) + return false; + +#ifdef CONFIG_KALLSYMS_ALL + if (src->st_shndx == pcpundx) + return true; +#endif + + sec = sechdrs + src->st_shndx; + type = sec->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; + if (!(sec->sh_flags & SHF_ALLOC) +#ifndef CONFIG_KALLSYMS_ALL + || !(sec->sh_flags & SHF_EXECINSTR) +#endif + || mod_mem_type_is_init(type)) + return false; + + return true; +} + +/* + * We only allocate and copy the strings needed by the parts of symtab + * we keep. This is simple, but has the effect of making multiple + * copies of duplicates. We could be more sophisticated, see + * linux-kernel thread starting with + * <73defb5e4bca04a6431392cc341112b1@localhost>. + */ +void layout_symtab(struct module *mod, struct load_info *info) +{ + Elf_Shdr *symsect = info->sechdrs + info->index.sym; + Elf_Shdr *strsect = info->sechdrs + info->index.str; + const Elf_Sym *src; + unsigned int i, nsrc, ndst, strtab_size = 0; + struct module_memory *mod_mem_data = &mod->mem[MOD_DATA]; + struct module_memory *mod_mem_init_data = &mod->mem[MOD_INIT_DATA]; + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; + symsect->sh_entsize = module_get_offset_and_type(mod, MOD_INIT_DATA, + symsect, info->index.sym); + pr_debug("\t%s\n", info->secstrings + symsect->sh_name); + + src = (void *)info->hdr + symsect->sh_offset; + nsrc = symsect->sh_size / sizeof(*src); + + /* Compute total space required for the core symbols' strtab. */ + for (ndst = i = 0; i < nsrc; i++) { + if (i == 0 || is_livepatch_module(mod) || + is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, + info->index.pcpu)) { + strtab_size += strlen(&info->strtab[src[i].st_name]) + 1; + ndst++; + } + } + + /* Append room for core symbols at end of core part. */ + info->symoffs = ALIGN(mod_mem_data->size, symsect->sh_addralign ?: 1); + info->stroffs = mod_mem_data->size = info->symoffs + ndst * sizeof(Elf_Sym); + mod_mem_data->size += strtab_size; + /* Note add_kallsyms() computes strtab_size as core_typeoffs - stroffs */ + info->core_typeoffs = mod_mem_data->size; + mod_mem_data->size += ndst * sizeof(char); + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; + strsect->sh_entsize = module_get_offset_and_type(mod, MOD_INIT_DATA, + strsect, info->index.str); + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); + + /* We'll tack temporary mod_kallsyms on the end. */ + mod_mem_init_data->size = ALIGN(mod_mem_init_data->size, + __alignof__(struct mod_kallsyms)); + info->mod_kallsyms_init_off = mod_mem_init_data->size; + + mod_mem_init_data->size += sizeof(struct mod_kallsyms); + info->init_typeoffs = mod_mem_init_data->size; + mod_mem_init_data->size += nsrc * sizeof(char); +} + +/* + * We use the full symtab and strtab which layout_symtab arranged to + * be appended to the init section. Later we switch to the cut-down + * core-only ones. + */ +void add_kallsyms(struct module *mod, const struct load_info *info) +{ + unsigned int i, ndst; + const Elf_Sym *src; + Elf_Sym *dst; + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + unsigned long strtab_size; + void *data_base = mod->mem[MOD_DATA].base; + void *init_data_base = mod->mem[MOD_INIT_DATA].base; + + /* Set up to point into init section. */ + mod->kallsyms = (void __rcu *)init_data_base + + info->mod_kallsyms_init_off; + + rcu_read_lock(); + /* The following is safe since this pointer cannot change */ + rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr; + rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ + rcu_dereference(mod->kallsyms)->strtab = + (void *)info->sechdrs[info->index.str].sh_addr; + rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs; + + /* + * Now populate the cut down core kallsyms for after init + * and set types up while we still have access to sections. + */ + mod->core_kallsyms.symtab = dst = data_base + info->symoffs; + mod->core_kallsyms.strtab = s = data_base + info->stroffs; + mod->core_kallsyms.typetab = data_base + info->core_typeoffs; + strtab_size = info->core_typeoffs - info->stroffs; + src = rcu_dereference(mod->kallsyms)->symtab; + for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) { + rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info); + if (i == 0 || is_livepatch_module(mod) || + is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, + info->index.pcpu)) { + ssize_t ret; + + mod->core_kallsyms.typetab[ndst] = + rcu_dereference(mod->kallsyms)->typetab[i]; + dst[ndst] = src[i]; + dst[ndst++].st_name = s - mod->core_kallsyms.strtab; + ret = strscpy(s, + &rcu_dereference(mod->kallsyms)->strtab[src[i].st_name], + strtab_size); + if (ret < 0) + break; + s += ret + 1; + strtab_size -= ret + 1; + } + } + rcu_read_unlock(); + mod->core_kallsyms.num_symtab = ndst; +} + +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) +void init_build_id(struct module *mod, const struct load_info *info) +{ + const Elf_Shdr *sechdr; + unsigned int i; + + for (i = 0; i < info->hdr->e_shnum; i++) { + sechdr = &info->sechdrs[i]; + if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE && + !build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id, + sechdr->sh_size)) + break; + } +} +#else +void init_build_id(struct module *mod, const struct load_info *info) +{ +} +#endif + +static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) +{ + return kallsyms->strtab + kallsyms->symtab[symnum].st_name; +} + +/* + * Given a module and address, find the corresponding symbol and return its name + * while providing its size and offset if needed. + */ +static const char *find_kallsyms_symbol(struct module *mod, + unsigned long addr, + unsigned long *size, + unsigned long *offset) +{ + unsigned int i, best = 0; + unsigned long nextval, bestval; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + struct module_memory *mod_mem; + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) + mod_mem = &mod->mem[MOD_INIT_TEXT]; + else + mod_mem = &mod->mem[MOD_TEXT]; + + nextval = (unsigned long)mod_mem->base + mod_mem->size; + + bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); + + /* + * Scan for closest preceding symbol, and next symbol. (ELF + * starts real symbols at 1). + */ + for (i = 1; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + unsigned long thisval = kallsyms_symbol_value(sym); + + if (sym->st_shndx == SHN_UNDEF) + continue; + + /* + * We ignore unnamed symbols: they're uninformative + * and inserted at a whim. + */ + if (*kallsyms_symbol_name(kallsyms, i) == '\0' || + is_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) + continue; + + if (thisval <= addr && thisval > bestval) { + best = i; + bestval = thisval; + } + if (thisval > addr && thisval < nextval) + nextval = thisval; + } + + if (!best) + return NULL; + + if (size) + *size = nextval - bestval; + if (offset) + *offset = addr - bestval; + + return kallsyms_symbol_name(kallsyms, best); +} + +void * __weak dereference_module_function_descriptor(struct module *mod, + void *ptr) +{ + return ptr; +} + +/* + * For kallsyms to ask for address resolution. NULL means not found. Careful + * not to lock to avoid deadlock on oopses, simply disable preemption. + */ +const char *module_address_lookup(unsigned long addr, + unsigned long *size, + unsigned long *offset, + char **modname, + const unsigned char **modbuildid, + char *namebuf) +{ + const char *ret = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (mod) { + if (modname) + *modname = mod->name; + if (modbuildid) { +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) + *modbuildid = mod->build_id; +#else + *modbuildid = NULL; +#endif + } + + ret = find_kallsyms_symbol(mod, addr, size, offset); + } + /* Make a copy in here where it's safe */ + if (ret) { + strncpy(namebuf, ret, KSYM_NAME_LEN - 1); + ret = namebuf; + } + preempt_enable(); + + return ret; +} + +int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (within_module(addr, mod)) { + const char *sym; + + sym = find_kallsyms_symbol(mod, addr, NULL, NULL); + if (!sym) + goto out; + + strscpy(symname, sym, KSYM_NAME_LEN); + preempt_enable(); + return 0; + } + } +out: + preempt_enable(); + return -ERANGE; +} + +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + struct mod_kallsyms *kallsyms; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + kallsyms = rcu_dereference_sched(mod->kallsyms); + if (symnum < kallsyms->num_symtab) { + const Elf_Sym *sym = &kallsyms->symtab[symnum]; + + *value = kallsyms_symbol_value(sym); + *type = kallsyms->typetab[symnum]; + strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); + strscpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } + symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +} + +/* Given a module and name of symbol, find and return the symbol's value */ +static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name) +{ + unsigned int i; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && + sym->st_shndx != SHN_UNDEF) + return kallsyms_symbol_value(sym); + } + return 0; +} + +static unsigned long __module_kallsyms_lookup_name(const char *name) +{ + struct module *mod; + char *colon; + + colon = strnchr(name, MODULE_NAME_LEN, ':'); + if (colon) { + mod = find_module_all(name, colon - name, false); + if (mod) + return __find_kallsyms_symbol_value(mod, colon + 1); + return 0; + } + + list_for_each_entry_rcu(mod, &modules, list) { + unsigned long ret; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + ret = __find_kallsyms_symbol_value(mod, name); + if (ret) + return ret; + } + return 0; +} + +/* Look for this name: can be of form module:name. */ +unsigned long module_kallsyms_lookup_name(const char *name) +{ + unsigned long ret; + + /* Don't lock: we're in enough trouble already. */ + preempt_disable(); + ret = __module_kallsyms_lookup_name(name); + preempt_enable(); + return ret; +} + +unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) +{ + unsigned long ret; + + preempt_disable(); + ret = __find_kallsyms_symbol_value(mod, name); + preempt_enable(); + return ret; +} + +int module_kallsyms_on_each_symbol(const char *modname, + int (*fn)(void *, const char *, unsigned long), + void *data) +{ + struct module *mod; + unsigned int i; + int ret = 0; + + mutex_lock(&module_mutex); + list_for_each_entry(mod, &modules, list) { + struct mod_kallsyms *kallsyms; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + + if (modname && strcmp(modname, mod->name)) + continue; + + /* Use rcu_dereference_sched() to remain compliant with the sparse tool */ + preempt_disable(); + kallsyms = rcu_dereference_sched(mod->kallsyms); + preempt_enable(); + + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (sym->st_shndx == SHN_UNDEF) + continue; + + ret = fn(data, kallsyms_symbol_name(kallsyms, i), + kallsyms_symbol_value(sym)); + if (ret != 0) + goto out; + } + + /* + * The given module is found, the subsequent modules do not + * need to be compared. + */ + if (modname) + break; + } +out: + mutex_unlock(&module_mutex); + return ret; +} diff --git a/kernel/module/kdb.c b/kernel/module/kdb.c new file mode 100644 index 0000000000..995c32d369 --- /dev/null +++ b/kernel/module/kdb.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module kdb support + * + * Copyright (C) 2010 Jason Wessel + */ + +#include <linux/module.h> +#include <linux/kdb.h> +#include "internal.h" + +/* + * kdb_lsmod - This function implements the 'lsmod' command. Lists + * currently loaded kernel modules. + * Mostly taken from userland lsmod. + */ +int kdb_lsmod(int argc, const char **argv) +{ + struct module *mod; + + if (argc != 0) + return KDB_ARGCOUNT; + + kdb_printf("Module Size modstruct Used by\n"); + list_for_each_entry(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + + kdb_printf("%-20s%8u", mod->name, mod->mem[MOD_TEXT].size); + kdb_printf("/%8u", mod->mem[MOD_RODATA].size); + kdb_printf("/%8u", mod->mem[MOD_RO_AFTER_INIT].size); + kdb_printf("/%8u", mod->mem[MOD_DATA].size); + + kdb_printf(" 0x%px ", (void *)mod); +#ifdef CONFIG_MODULE_UNLOAD + kdb_printf("%4d ", module_refcount(mod)); +#endif + if (mod->state == MODULE_STATE_GOING) + kdb_printf(" (Unloading)"); + else if (mod->state == MODULE_STATE_COMING) + kdb_printf(" (Loading)"); + else + kdb_printf(" (Live)"); + kdb_printf(" 0x%px", mod->mem[MOD_TEXT].base); + kdb_printf("/0x%px", mod->mem[MOD_RODATA].base); + kdb_printf("/0x%px", mod->mem[MOD_RO_AFTER_INIT].base); + kdb_printf("/0x%px", mod->mem[MOD_DATA].base); + +#ifdef CONFIG_MODULE_UNLOAD + { + struct module_use *use; + + kdb_printf(" [ "); + list_for_each_entry(use, &mod->source_list, + source_list) + kdb_printf("%s ", use->target->name); + kdb_printf("]\n"); + } +#endif + } + + return 0; +} diff --git a/kernel/module/kmod.c b/kernel/module/kmod.c new file mode 100644 index 0000000000..0800d98916 --- /dev/null +++ b/kernel/module/kmod.c @@ -0,0 +1,180 @@ +/* + * kmod - the kernel module loader + * + * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/binfmts.h> +#include <linux/syscalls.h> +#include <linux/unistd.h> +#include <linux/kmod.h> +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/cred.h> +#include <linux/file.h> +#include <linux/fdtable.h> +#include <linux/workqueue.h> +#include <linux/security.h> +#include <linux/mount.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/resource.h> +#include <linux/notifier.h> +#include <linux/suspend.h> +#include <linux/rwsem.h> +#include <linux/ptrace.h> +#include <linux/async.h> +#include <linux/uaccess.h> + +#include <trace/events/module.h> +#include "internal.h" + +/* + * Assuming: + * + * threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, + * (u64) THREAD_SIZE * 8UL); + * + * If you need less than 50 threads would mean we're dealing with systems + * smaller than 3200 pages. This assumes you are capable of having ~13M memory, + * and this would only be an upper limit, after which the OOM killer would take + * effect. Systems like these are very unlikely if modules are enabled. + */ +#define MAX_KMOD_CONCURRENT 50 +static DEFINE_SEMAPHORE(kmod_concurrent_max, MAX_KMOD_CONCURRENT); + +/* + * This is a restriction on having *all* MAX_KMOD_CONCURRENT threads + * running at the same time without returning. When this happens we + * believe you've somehow ended up with a recursive module dependency + * creating a loop. + * + * We have no option but to fail. + * + * Userspace should proactively try to detect and prevent these. + */ +#define MAX_KMOD_ALL_BUSY_TIMEOUT 5 + +/* + modprobe_path is set via /proc/sys. +*/ +char modprobe_path[KMOD_PATH_LEN] = CONFIG_MODPROBE_PATH; + +static void free_modprobe_argv(struct subprocess_info *info) +{ + kfree(info->argv[3]); /* check call_modprobe() */ + kfree(info->argv); +} + +static int call_modprobe(char *orig_module_name, int wait) +{ + struct subprocess_info *info; + static char *envp[] = { + "HOME=/", + "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", + NULL + }; + char *module_name; + int ret; + + char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); + if (!argv) + goto out; + + module_name = kstrdup(orig_module_name, GFP_KERNEL); + if (!module_name) + goto free_argv; + + argv[0] = modprobe_path; + argv[1] = "-q"; + argv[2] = "--"; + argv[3] = module_name; /* check free_modprobe_argv() */ + argv[4] = NULL; + + info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL, + NULL, free_modprobe_argv, NULL); + if (!info) + goto free_module_name; + + ret = call_usermodehelper_exec(info, wait | UMH_KILLABLE); + kmod_dup_request_announce(orig_module_name, ret); + return ret; + +free_module_name: + kfree(module_name); +free_argv: + kfree(argv); +out: + kmod_dup_request_announce(orig_module_name, -ENOMEM); + return -ENOMEM; +} + +/** + * __request_module - try to load a kernel module + * @wait: wait (or not) for the operation to complete + * @fmt: printf style format string for the name of the module + * @...: arguments as specified in the format string + * + * Load a module using the user mode module loader. The function returns + * zero on success or a negative errno code or positive exit code from + * "modprobe" on failure. Note that a successful module load does not mean + * the module did not then unload and exit on an error of its own. Callers + * must check that the service they requested is now available not blindly + * invoke it. + * + * If module auto-loading support is disabled then this function + * simply returns -ENOENT. + */ +int __request_module(bool wait, const char *fmt, ...) +{ + va_list args; + char module_name[MODULE_NAME_LEN]; + int ret, dup_ret; + + /* + * We don't allow synchronous module loading from async. Module + * init may invoke async_synchronize_full() which will end up + * waiting for this task which already is waiting for the module + * loading to complete, leading to a deadlock. + */ + WARN_ON_ONCE(wait && current_is_async()); + + if (!modprobe_path[0]) + return -ENOENT; + + va_start(args, fmt); + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); + va_end(args); + if (ret >= MODULE_NAME_LEN) + return -ENAMETOOLONG; + + ret = security_kernel_module_request(module_name); + if (ret) + return ret; + + ret = down_timeout(&kmod_concurrent_max, MAX_KMOD_ALL_BUSY_TIMEOUT * HZ); + if (ret) { + pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now", + module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT); + return ret; + } + + trace_module_request(module_name, wait, _RET_IP_); + + if (kmod_dup_request_exists_wait(module_name, wait, &dup_ret)) { + ret = dup_ret; + goto out; + } + + ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); + +out: + up(&kmod_concurrent_max); + + return ret; +} +EXPORT_SYMBOL(__request_module); diff --git a/kernel/module/livepatch.c b/kernel/module/livepatch.c new file mode 100644 index 0000000000..a89f01e1d6 --- /dev/null +++ b/kernel/module/livepatch.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module livepatch support + * + * Copyright (C) 2016 Jessica Yu <jeyu@redhat.com> + */ + +#include <linux/module.h> +#include <linux/string.h> +#include <linux/slab.h> +#include "internal.h" + +/* + * Persist ELF information about a module. Copy the ELF header, + * section header table, section string table, and symtab section + * index from info to mod->klp_info. + */ +int copy_module_elf(struct module *mod, struct load_info *info) +{ + unsigned int size, symndx; + int ret; + + size = sizeof(*mod->klp_info); + mod->klp_info = kmalloc(size, GFP_KERNEL); + if (!mod->klp_info) + return -ENOMEM; + + /* ELF header */ + size = sizeof(mod->klp_info->hdr); + memcpy(&mod->klp_info->hdr, info->hdr, size); + + /* ELF section header table */ + size = sizeof(*info->sechdrs) * info->hdr->e_shnum; + mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); + if (!mod->klp_info->sechdrs) { + ret = -ENOMEM; + goto free_info; + } + + /* ELF section name string table */ + size = info->sechdrs[info->hdr->e_shstrndx].sh_size; + mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); + if (!mod->klp_info->secstrings) { + ret = -ENOMEM; + goto free_sechdrs; + } + + /* ELF symbol section index */ + symndx = info->index.sym; + mod->klp_info->symndx = symndx; + + /* + * For livepatch modules, core_kallsyms.symtab is a complete + * copy of the original symbol table. Adjust sh_addr to point + * to core_kallsyms.symtab since the copy of the symtab in module + * init memory is freed at the end of do_init_module(). + */ + mod->klp_info->sechdrs[symndx].sh_addr = (unsigned long)mod->core_kallsyms.symtab; + + return 0; + +free_sechdrs: + kfree(mod->klp_info->sechdrs); +free_info: + kfree(mod->klp_info); + return ret; +} + +void free_module_elf(struct module *mod) +{ + kfree(mod->klp_info->sechdrs); + kfree(mod->klp_info->secstrings); + kfree(mod->klp_info); +} diff --git a/kernel/module/main.c b/kernel/module/main.c new file mode 100644 index 0000000000..98fedfdb8d --- /dev/null +++ b/kernel/module/main.c @@ -0,0 +1,3366 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2002 Richard Henderson + * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. + * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> + */ + +#define INCLUDE_VERMAGIC + +#include <linux/export.h> +#include <linux/extable.h> +#include <linux/moduleloader.h> +#include <linux/module_signature.h> +#include <linux/trace_events.h> +#include <linux/init.h> +#include <linux/kallsyms.h> +#include <linux/buildid.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/kernel_read_file.h> +#include <linux/kstrtox.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/elf.h> +#include <linux/seq_file.h> +#include <linux/syscalls.h> +#include <linux/fcntl.h> +#include <linux/rcupdate.h> +#include <linux/capability.h> +#include <linux/cpu.h> +#include <linux/moduleparam.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/vermagic.h> +#include <linux/notifier.h> +#include <linux/sched.h> +#include <linux/device.h> +#include <linux/string.h> +#include <linux/mutex.h> +#include <linux/rculist.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> +#include <linux/set_memory.h> +#include <asm/mmu_context.h> +#include <linux/license.h> +#include <asm/sections.h> +#include <linux/tracepoint.h> +#include <linux/ftrace.h> +#include <linux/livepatch.h> +#include <linux/async.h> +#include <linux/percpu.h> +#include <linux/kmemleak.h> +#include <linux/jump_label.h> +#include <linux/pfn.h> +#include <linux/bsearch.h> +#include <linux/dynamic_debug.h> +#include <linux/audit.h> +#include <linux/cfi.h> +#include <linux/debugfs.h> +#include <uapi/linux/module.h> +#include "internal.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/module.h> + +/* + * Mutex protects: + * 1) List of modules (also safely readable with preempt_disable), + * 2) module_use links, + * 3) mod_tree.addr_min/mod_tree.addr_max. + * (delete and add uses RCU list operations). + */ +DEFINE_MUTEX(module_mutex); +LIST_HEAD(modules); + +/* Work queue for freeing init sections in success case */ +static void do_free_init(struct work_struct *w); +static DECLARE_WORK(init_free_wq, do_free_init); +static LLIST_HEAD(init_free_list); + +struct mod_tree_root mod_tree __cacheline_aligned = { + .addr_min = -1UL, +}; + +struct symsearch { + const struct kernel_symbol *start, *stop; + const s32 *crcs; + enum mod_license license; +}; + +/* + * Bounds of module memory, for speeding up __module_address. + * Protected by module_mutex. + */ +static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, + unsigned int size, struct mod_tree_root *tree) +{ + unsigned long min = (unsigned long)base; + unsigned long max = min + size; + +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + if (mod_mem_type_is_core_data(type)) { + if (min < tree->data_addr_min) + tree->data_addr_min = min; + if (max > tree->data_addr_max) + tree->data_addr_max = max; + return; + } +#endif + if (min < tree->addr_min) + tree->addr_min = min; + if (max > tree->addr_max) + tree->addr_max = max; +} + +static void mod_update_bounds(struct module *mod) +{ + for_each_mod_mem_type(type) { + struct module_memory *mod_mem = &mod->mem[type]; + + if (mod_mem->size) + __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); + } +} + +/* Block module loading/unloading? */ +int modules_disabled; +core_param(nomodule, modules_disabled, bint, 0); + +/* Waiting for a module to finish initializing? */ +static DECLARE_WAIT_QUEUE_HEAD(module_wq); + +static BLOCKING_NOTIFIER_HEAD(module_notify_list); + +int register_module_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&module_notify_list, nb); +} +EXPORT_SYMBOL(register_module_notifier); + +int unregister_module_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&module_notify_list, nb); +} +EXPORT_SYMBOL(unregister_module_notifier); + +/* + * We require a truly strong try_module_get(): 0 means success. + * Otherwise an error is returned due to ongoing or failed + * initialization etc. + */ +static inline int strong_try_module_get(struct module *mod) +{ + BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); + if (mod && mod->state == MODULE_STATE_COMING) + return -EBUSY; + if (try_module_get(mod)) + return 0; + else + return -ENOENT; +} + +static inline void add_taint_module(struct module *mod, unsigned flag, + enum lockdep_ok lockdep_ok) +{ + add_taint(flag, lockdep_ok); + set_bit(flag, &mod->taints); +} + +/* + * A thread that wants to hold a reference to a module only while it + * is running can call this to safely exit. + */ +void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) +{ + module_put(mod); + kthread_exit(code); +} +EXPORT_SYMBOL(__module_put_and_kthread_exit); + +/* Find a module section: 0 means not found. */ +static unsigned int find_sec(const struct load_info *info, const char *name) +{ + unsigned int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + /* Alloc bit cleared means "ignore it." */ + if ((shdr->sh_flags & SHF_ALLOC) + && strcmp(info->secstrings + shdr->sh_name, name) == 0) + return i; + } + return 0; +} + +/* Find a module section, or NULL. */ +static void *section_addr(const struct load_info *info, const char *name) +{ + /* Section 0 has sh_addr 0. */ + return (void *)info->sechdrs[find_sec(info, name)].sh_addr; +} + +/* Find a module section, or NULL. Fill in number of "objects" in section. */ +static void *section_objs(const struct load_info *info, + const char *name, + size_t object_size, + unsigned int *num) +{ + unsigned int sec = find_sec(info, name); + + /* Section 0 has sh_addr 0 and sh_size 0. */ + *num = info->sechdrs[sec].sh_size / object_size; + return (void *)info->sechdrs[sec].sh_addr; +} + +/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ +static unsigned int find_any_sec(const struct load_info *info, const char *name) +{ + unsigned int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + if (strcmp(info->secstrings + shdr->sh_name, name) == 0) + return i; + } + return 0; +} + +/* + * Find a module section, or NULL. Fill in number of "objects" in section. + * Ignores SHF_ALLOC flag. + */ +static __maybe_unused void *any_section_objs(const struct load_info *info, + const char *name, + size_t object_size, + unsigned int *num) +{ + unsigned int sec = find_any_sec(info, name); + + /* Section 0 has sh_addr 0 and sh_size 0. */ + *num = info->sechdrs[sec].sh_size / object_size; + return (void *)info->sechdrs[sec].sh_addr; +} + +#ifndef CONFIG_MODVERSIONS +#define symversion(base, idx) NULL +#else +#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) +#endif + +static const char *kernel_symbol_name(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + return offset_to_ptr(&sym->name_offset); +#else + return sym->name; +#endif +} + +static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + if (!sym->namespace_offset) + return NULL; + return offset_to_ptr(&sym->namespace_offset); +#else + return sym->namespace; +#endif +} + +int cmp_name(const void *name, const void *sym) +{ + return strcmp(name, kernel_symbol_name(sym)); +} + +static bool find_exported_symbol_in_section(const struct symsearch *syms, + struct module *owner, + struct find_symbol_arg *fsa) +{ + struct kernel_symbol *sym; + + if (!fsa->gplok && syms->license == GPL_ONLY) + return false; + + sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, + sizeof(struct kernel_symbol), cmp_name); + if (!sym) + return false; + + fsa->owner = owner; + fsa->crc = symversion(syms->crcs, sym - syms->start); + fsa->sym = sym; + fsa->license = syms->license; + + return true; +} + +/* + * Find an exported symbol and return it, along with, (optional) crc and + * (optional) module which owns it. Needs preempt disabled or module_mutex. + */ +bool find_symbol(struct find_symbol_arg *fsa) +{ + static const struct symsearch arr[] = { + { __start___ksymtab, __stop___ksymtab, __start___kcrctab, + NOT_GPL_ONLY }, + { __start___ksymtab_gpl, __stop___ksymtab_gpl, + __start___kcrctab_gpl, + GPL_ONLY }, + }; + struct module *mod; + unsigned int i; + + module_assert_mutex_or_preempt(); + + for (i = 0; i < ARRAY_SIZE(arr); i++) + if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) + return true; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + struct symsearch arr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, + mod->gpl_crcs, + GPL_ONLY }, + }; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + + for (i = 0; i < ARRAY_SIZE(arr); i++) + if (find_exported_symbol_in_section(&arr[i], mod, fsa)) + return true; + } + + pr_debug("Failed to find symbol %s\n", fsa->name); + return false; +} + +/* + * Search for module by name: must hold module_mutex (or preempt disabled + * for read-only access). + */ +struct module *find_module_all(const char *name, size_t len, + bool even_unformed) +{ + struct module *mod; + + module_assert_mutex_or_preempt(); + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) + continue; + if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) + return mod; + } + return NULL; +} + +struct module *find_module(const char *name) +{ + return find_module_all(name, strlen(name), false); +} + +#ifdef CONFIG_SMP + +static inline void __percpu *mod_percpu(struct module *mod) +{ + return mod->percpu; +} + +static int percpu_modalloc(struct module *mod, struct load_info *info) +{ + Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; + unsigned long align = pcpusec->sh_addralign; + + if (!pcpusec->sh_size) + return 0; + + if (align > PAGE_SIZE) { + pr_warn("%s: per-cpu alignment %li > %li\n", + mod->name, align, PAGE_SIZE); + align = PAGE_SIZE; + } + + mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); + if (!mod->percpu) { + pr_warn("%s: Could not allocate %lu bytes percpu data\n", + mod->name, (unsigned long)pcpusec->sh_size); + return -ENOMEM; + } + mod->percpu_size = pcpusec->sh_size; + return 0; +} + +static void percpu_modfree(struct module *mod) +{ + free_percpu(mod->percpu); +} + +static unsigned int find_pcpusec(struct load_info *info) +{ + return find_sec(info, ".data..percpu"); +} + +static void percpu_modcopy(struct module *mod, + const void *from, unsigned long size) +{ + int cpu; + + for_each_possible_cpu(cpu) + memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); +} + +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + struct module *mod; + unsigned int cpu; + + preempt_disable(); + + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (!mod->percpu_size) + continue; + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(mod->percpu, cpu); + void *va = (void *)addr; + + if (va >= start && va < start + mod->percpu_size) { + if (can_addr) { + *can_addr = (unsigned long) (va - start); + *can_addr += (unsigned long) + per_cpu_ptr(mod->percpu, + get_boot_cpu_id()); + } + preempt_enable(); + return true; + } + } + } + + preempt_enable(); + return false; +} + +/** + * is_module_percpu_address() - test whether address is from module static percpu + * @addr: address to test + * + * Test whether @addr belongs to module static percpu area. + * + * Return: %true if @addr is from module static percpu area + */ +bool is_module_percpu_address(unsigned long addr) +{ + return __is_module_percpu_address(addr, NULL); +} + +#else /* ... !CONFIG_SMP */ + +static inline void __percpu *mod_percpu(struct module *mod) +{ + return NULL; +} +static int percpu_modalloc(struct module *mod, struct load_info *info) +{ + /* UP modules shouldn't have this section: ENOMEM isn't quite right */ + if (info->sechdrs[info->index.pcpu].sh_size != 0) + return -ENOMEM; + return 0; +} +static inline void percpu_modfree(struct module *mod) +{ +} +static unsigned int find_pcpusec(struct load_info *info) +{ + return 0; +} +static inline void percpu_modcopy(struct module *mod, + const void *from, unsigned long size) +{ + /* pcpusec should be 0, and size of that section should be 0. */ + BUG_ON(size != 0); +} +bool is_module_percpu_address(unsigned long addr) +{ + return false; +} + +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + +#endif /* CONFIG_SMP */ + +#define MODINFO_ATTR(field) \ +static void setup_modinfo_##field(struct module *mod, const char *s) \ +{ \ + mod->field = kstrdup(s, GFP_KERNEL); \ +} \ +static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ + struct module_kobject *mk, char *buffer) \ +{ \ + return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ +} \ +static int modinfo_##field##_exists(struct module *mod) \ +{ \ + return mod->field != NULL; \ +} \ +static void free_modinfo_##field(struct module *mod) \ +{ \ + kfree(mod->field); \ + mod->field = NULL; \ +} \ +static struct module_attribute modinfo_##field = { \ + .attr = { .name = __stringify(field), .mode = 0444 }, \ + .show = show_modinfo_##field, \ + .setup = setup_modinfo_##field, \ + .test = modinfo_##field##_exists, \ + .free = free_modinfo_##field, \ +}; + +MODINFO_ATTR(version); +MODINFO_ATTR(srcversion); + +static struct { + char name[MODULE_NAME_LEN + 1]; + char taints[MODULE_FLAGS_BUF_SIZE]; +} last_unloaded_module; + +#ifdef CONFIG_MODULE_UNLOAD + +EXPORT_TRACEPOINT_SYMBOL(module_get); + +/* MODULE_REF_BASE is the base reference count by kmodule loader. */ +#define MODULE_REF_BASE 1 + +/* Init the unload section of the module. */ +static int module_unload_init(struct module *mod) +{ + /* + * Initialize reference counter to MODULE_REF_BASE. + * refcnt == 0 means module is going. + */ + atomic_set(&mod->refcnt, MODULE_REF_BASE); + + INIT_LIST_HEAD(&mod->source_list); + INIT_LIST_HEAD(&mod->target_list); + + /* Hold reference count during initialization. */ + atomic_inc(&mod->refcnt); + + return 0; +} + +/* Does a already use b? */ +static int already_uses(struct module *a, struct module *b) +{ + struct module_use *use; + + list_for_each_entry(use, &b->source_list, source_list) { + if (use->source == a) + return 1; + } + pr_debug("%s does not use %s!\n", a->name, b->name); + return 0; +} + +/* + * Module a uses b + * - we add 'a' as a "source", 'b' as a "target" of module use + * - the module_use is added to the list of 'b' sources (so + * 'b' can walk the list to see who sourced them), and of 'a' + * targets (so 'a' can see what modules it targets). + */ +static int add_module_usage(struct module *a, struct module *b) +{ + struct module_use *use; + + pr_debug("Allocating new usage for %s.\n", a->name); + use = kmalloc(sizeof(*use), GFP_ATOMIC); + if (!use) + return -ENOMEM; + + use->source = a; + use->target = b; + list_add(&use->source_list, &b->source_list); + list_add(&use->target_list, &a->target_list); + return 0; +} + +/* Module a uses b: caller needs module_mutex() */ +static int ref_module(struct module *a, struct module *b) +{ + int err; + + if (b == NULL || already_uses(a, b)) + return 0; + + /* If module isn't available, we fail. */ + err = strong_try_module_get(b); + if (err) + return err; + + err = add_module_usage(a, b); + if (err) { + module_put(b); + return err; + } + return 0; +} + +/* Clear the unload stuff of the module. */ +static void module_unload_free(struct module *mod) +{ + struct module_use *use, *tmp; + + mutex_lock(&module_mutex); + list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { + struct module *i = use->target; + pr_debug("%s unusing %s\n", mod->name, i->name); + module_put(i); + list_del(&use->source_list); + list_del(&use->target_list); + kfree(use); + } + mutex_unlock(&module_mutex); +} + +#ifdef CONFIG_MODULE_FORCE_UNLOAD +static inline int try_force_unload(unsigned int flags) +{ + int ret = (flags & O_TRUNC); + if (ret) + add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); + return ret; +} +#else +static inline int try_force_unload(unsigned int flags) +{ + return 0; +} +#endif /* CONFIG_MODULE_FORCE_UNLOAD */ + +/* Try to release refcount of module, 0 means success. */ +static int try_release_module_ref(struct module *mod) +{ + int ret; + + /* Try to decrement refcnt which we set at loading */ + ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); + BUG_ON(ret < 0); + if (ret) + /* Someone can put this right now, recover with checking */ + ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); + + return ret; +} + +static int try_stop_module(struct module *mod, int flags, int *forced) +{ + /* If it's not unused, quit unless we're forcing. */ + if (try_release_module_ref(mod) != 0) { + *forced = try_force_unload(flags); + if (!(*forced)) + return -EWOULDBLOCK; + } + + /* Mark it as dying. */ + mod->state = MODULE_STATE_GOING; + + return 0; +} + +/** + * module_refcount() - return the refcount or -1 if unloading + * @mod: the module we're checking + * + * Return: + * -1 if the module is in the process of unloading + * otherwise the number of references in the kernel to the module + */ +int module_refcount(struct module *mod) +{ + return atomic_read(&mod->refcnt) - MODULE_REF_BASE; +} +EXPORT_SYMBOL(module_refcount); + +/* This exists whether we can unload or not */ +static void free_module(struct module *mod); + +SYSCALL_DEFINE2(delete_module, const char __user *, name_user, + unsigned int, flags) +{ + struct module *mod; + char name[MODULE_NAME_LEN]; + char buf[MODULE_FLAGS_BUF_SIZE]; + int ret, forced = 0; + + if (!capable(CAP_SYS_MODULE) || modules_disabled) + return -EPERM; + + if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) + return -EFAULT; + name[MODULE_NAME_LEN-1] = '\0'; + + audit_log_kern_module(name); + + if (mutex_lock_interruptible(&module_mutex) != 0) + return -EINTR; + + mod = find_module(name); + if (!mod) { + ret = -ENOENT; + goto out; + } + + if (!list_empty(&mod->source_list)) { + /* Other modules depend on us: get rid of them first. */ + ret = -EWOULDBLOCK; + goto out; + } + + /* Doing init or already dying? */ + if (mod->state != MODULE_STATE_LIVE) { + /* FIXME: if (force), slam module count damn the torpedoes */ + pr_debug("%s already dying\n", mod->name); + ret = -EBUSY; + goto out; + } + + /* If it has an init func, it must have an exit func to unload */ + if (mod->init && !mod->exit) { + forced = try_force_unload(flags); + if (!forced) { + /* This module can't be removed */ + ret = -EBUSY; + goto out; + } + } + + ret = try_stop_module(mod, flags, &forced); + if (ret != 0) + goto out; + + mutex_unlock(&module_mutex); + /* Final destruction now no one is using it. */ + if (mod->exit != NULL) + mod->exit(); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + ftrace_release_mod(mod); + + async_synchronize_full(); + + /* Store the name and taints of the last unloaded module for diagnostic purposes */ + strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); + strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); + + free_module(mod); + /* someone could wait for the module in add_unformed_module() */ + wake_up_all(&module_wq); + return 0; +out: + mutex_unlock(&module_mutex); + return ret; +} + +void __symbol_put(const char *symbol) +{ + struct find_symbol_arg fsa = { + .name = symbol, + .gplok = true, + }; + + preempt_disable(); + BUG_ON(!find_symbol(&fsa)); + module_put(fsa.owner); + preempt_enable(); +} +EXPORT_SYMBOL(__symbol_put); + +/* Note this assumes addr is a function, which it currently always is. */ +void symbol_put_addr(void *addr) +{ + struct module *modaddr; + unsigned long a = (unsigned long)dereference_function_descriptor(addr); + + if (core_kernel_text(a)) + return; + + /* + * Even though we hold a reference on the module; we still need to + * disable preemption in order to safely traverse the data structure. + */ + preempt_disable(); + modaddr = __module_text_address(a); + BUG_ON(!modaddr); + module_put(modaddr); + preempt_enable(); +} +EXPORT_SYMBOL_GPL(symbol_put_addr); + +static ssize_t show_refcnt(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + return sprintf(buffer, "%i\n", module_refcount(mk->mod)); +} + +static struct module_attribute modinfo_refcnt = + __ATTR(refcnt, 0444, show_refcnt, NULL); + +void __module_get(struct module *module) +{ + if (module) { + atomic_inc(&module->refcnt); + trace_module_get(module, _RET_IP_); + } +} +EXPORT_SYMBOL(__module_get); + +bool try_module_get(struct module *module) +{ + bool ret = true; + + if (module) { + /* Note: here, we can fail to get a reference */ + if (likely(module_is_live(module) && + atomic_inc_not_zero(&module->refcnt) != 0)) + trace_module_get(module, _RET_IP_); + else + ret = false; + } + return ret; +} +EXPORT_SYMBOL(try_module_get); + +void module_put(struct module *module) +{ + int ret; + + if (module) { + ret = atomic_dec_if_positive(&module->refcnt); + WARN_ON(ret < 0); /* Failed to put refcount */ + trace_module_put(module, _RET_IP_); + } +} +EXPORT_SYMBOL(module_put); + +#else /* !CONFIG_MODULE_UNLOAD */ +static inline void module_unload_free(struct module *mod) +{ +} + +static int ref_module(struct module *a, struct module *b) +{ + return strong_try_module_get(b); +} + +static inline int module_unload_init(struct module *mod) +{ + return 0; +} +#endif /* CONFIG_MODULE_UNLOAD */ + +size_t module_flags_taint(unsigned long taints, char *buf) +{ + size_t l = 0; + int i; + + for (i = 0; i < TAINT_FLAGS_COUNT; i++) { + if (taint_flags[i].module && test_bit(i, &taints)) + buf[l++] = taint_flags[i].c_true; + } + + return l; +} + +static ssize_t show_initstate(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + const char *state = "unknown"; + + switch (mk->mod->state) { + case MODULE_STATE_LIVE: + state = "live"; + break; + case MODULE_STATE_COMING: + state = "coming"; + break; + case MODULE_STATE_GOING: + state = "going"; + break; + default: + BUG(); + } + return sprintf(buffer, "%s\n", state); +} + +static struct module_attribute modinfo_initstate = + __ATTR(initstate, 0444, show_initstate, NULL); + +static ssize_t store_uevent(struct module_attribute *mattr, + struct module_kobject *mk, + const char *buffer, size_t count) +{ + int rc; + + rc = kobject_synth_uevent(&mk->kobj, buffer, count); + return rc ? rc : count; +} + +struct module_attribute module_uevent = + __ATTR(uevent, 0200, NULL, store_uevent); + +static ssize_t show_coresize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + unsigned int size = mk->mod->mem[MOD_TEXT].size; + + if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { + for_class_mod_mem_type(type, core_data) + size += mk->mod->mem[type].size; + } + return sprintf(buffer, "%u\n", size); +} + +static struct module_attribute modinfo_coresize = + __ATTR(coresize, 0444, show_coresize, NULL); + +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC +static ssize_t show_datasize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + unsigned int size = 0; + + for_class_mod_mem_type(type, core_data) + size += mk->mod->mem[type].size; + return sprintf(buffer, "%u\n", size); +} + +static struct module_attribute modinfo_datasize = + __ATTR(datasize, 0444, show_datasize, NULL); +#endif + +static ssize_t show_initsize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + unsigned int size = 0; + + for_class_mod_mem_type(type, init) + size += mk->mod->mem[type].size; + return sprintf(buffer, "%u\n", size); +} + +static struct module_attribute modinfo_initsize = + __ATTR(initsize, 0444, show_initsize, NULL); + +static ssize_t show_taint(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + size_t l; + + l = module_flags_taint(mk->mod->taints, buffer); + buffer[l++] = '\n'; + return l; +} + +static struct module_attribute modinfo_taint = + __ATTR(taint, 0444, show_taint, NULL); + +struct module_attribute *modinfo_attrs[] = { + &module_uevent, + &modinfo_version, + &modinfo_srcversion, + &modinfo_initstate, + &modinfo_coresize, +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + &modinfo_datasize, +#endif + &modinfo_initsize, + &modinfo_taint, +#ifdef CONFIG_MODULE_UNLOAD + &modinfo_refcnt, +#endif + NULL, +}; + +size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); + +static const char vermagic[] = VERMAGIC_STRING; + +int try_to_force_load(struct module *mod, const char *reason) +{ +#ifdef CONFIG_MODULE_FORCE_LOAD + if (!test_taint(TAINT_FORCED_MODULE)) + pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); + add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); + return 0; +#else + return -ENOEXEC; +#endif +} + +/* Parse tag=value strings from .modinfo section */ +char *module_next_tag_pair(char *string, unsigned long *secsize) +{ + /* Skip non-zero chars */ + while (string[0]) { + string++; + if ((*secsize)-- <= 1) + return NULL; + } + + /* Skip any zero padding. */ + while (!string[0]) { + string++; + if ((*secsize)-- <= 1) + return NULL; + } + return string; +} + +static char *get_next_modinfo(const struct load_info *info, const char *tag, + char *prev) +{ + char *p; + unsigned int taglen = strlen(tag); + Elf_Shdr *infosec = &info->sechdrs[info->index.info]; + unsigned long size = infosec->sh_size; + + /* + * get_modinfo() calls made before rewrite_section_headers() + * must use sh_offset, as sh_addr isn't set! + */ + char *modinfo = (char *)info->hdr + infosec->sh_offset; + + if (prev) { + size -= prev - modinfo; + modinfo = module_next_tag_pair(prev, &size); + } + + for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { + if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') + return p + taglen + 1; + } + return NULL; +} + +static char *get_modinfo(const struct load_info *info, const char *tag) +{ + return get_next_modinfo(info, tag, NULL); +} + +static int verify_namespace_is_imported(const struct load_info *info, + const struct kernel_symbol *sym, + struct module *mod) +{ + const char *namespace; + char *imported_namespace; + + namespace = kernel_symbol_namespace(sym); + if (namespace && namespace[0]) { + for_each_modinfo_entry(imported_namespace, info, "import_ns") { + if (strcmp(namespace, imported_namespace) == 0) + return 0; + } +#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + pr_warn( +#else + pr_err( +#endif + "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", + mod->name, kernel_symbol_name(sym), namespace); +#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + return -EINVAL; +#endif + } + return 0; +} + +static bool inherit_taint(struct module *mod, struct module *owner, const char *name) +{ + if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) + return true; + + if (mod->using_gplonly_symbols) { + pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", + mod->name, name, owner->name); + return false; + } + + if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { + pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", + mod->name, name, owner->name); + set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); + } + return true; +} + +/* Resolve a symbol for this module. I.e. if we find one, record usage. */ +static const struct kernel_symbol *resolve_symbol(struct module *mod, + const struct load_info *info, + const char *name, + char ownername[]) +{ + struct find_symbol_arg fsa = { + .name = name, + .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), + .warn = true, + }; + int err; + + /* + * The module_mutex should not be a heavily contended lock; + * if we get the occasional sleep here, we'll go an extra iteration + * in the wait_event_interruptible(), which is harmless. + */ + sched_annotate_sleep(); + mutex_lock(&module_mutex); + if (!find_symbol(&fsa)) + goto unlock; + + if (fsa.license == GPL_ONLY) + mod->using_gplonly_symbols = true; + + if (!inherit_taint(mod, fsa.owner, name)) { + fsa.sym = NULL; + goto getname; + } + + if (!check_version(info, name, mod, fsa.crc)) { + fsa.sym = ERR_PTR(-EINVAL); + goto getname; + } + + err = verify_namespace_is_imported(info, fsa.sym, mod); + if (err) { + fsa.sym = ERR_PTR(err); + goto getname; + } + + err = ref_module(mod, fsa.owner); + if (err) { + fsa.sym = ERR_PTR(err); + goto getname; + } + +getname: + /* We must make copy under the lock if we failed to get ref. */ + strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); +unlock: + mutex_unlock(&module_mutex); + return fsa.sym; +} + +static const struct kernel_symbol * +resolve_symbol_wait(struct module *mod, + const struct load_info *info, + const char *name) +{ + const struct kernel_symbol *ksym; + char owner[MODULE_NAME_LEN]; + + if (wait_event_interruptible_timeout(module_wq, + !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) + || PTR_ERR(ksym) != -EBUSY, + 30 * HZ) <= 0) { + pr_warn("%s: gave up waiting for init of module %s.\n", + mod->name, owner); + } + return ksym; +} + +void __weak module_memfree(void *module_region) +{ + /* + * This memory may be RO, and freeing RO memory in an interrupt is not + * supported by vmalloc. + */ + WARN_ON(in_interrupt()); + vfree(module_region); +} + +void __weak module_arch_cleanup(struct module *mod) +{ +} + +void __weak module_arch_freeing_init(struct module *mod) +{ +} + +static bool mod_mem_use_vmalloc(enum mod_mem_type type) +{ + return IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC) && + mod_mem_type_is_core_data(type); +} + +static void *module_memory_alloc(unsigned int size, enum mod_mem_type type) +{ + if (mod_mem_use_vmalloc(type)) + return vzalloc(size); + return module_alloc(size); +} + +static void module_memory_free(void *ptr, enum mod_mem_type type) +{ + if (mod_mem_use_vmalloc(type)) + vfree(ptr); + else + module_memfree(ptr); +} + +static void free_mod_mem(struct module *mod) +{ + for_each_mod_mem_type(type) { + struct module_memory *mod_mem = &mod->mem[type]; + + if (type == MOD_DATA) + continue; + + /* Free lock-classes; relies on the preceding sync_rcu(). */ + lockdep_free_key_range(mod_mem->base, mod_mem->size); + if (mod_mem->size) + module_memory_free(mod_mem->base, type); + } + + /* MOD_DATA hosts mod, so free it at last */ + lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); + module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA); +} + +/* Free a module, remove from lists, etc. */ +static void free_module(struct module *mod) +{ + trace_module_free(mod); + + mod_sysfs_teardown(mod); + + /* + * We leave it in list to prevent duplicate loads, but make sure + * that noone uses it while it's being deconstructed. + */ + mutex_lock(&module_mutex); + mod->state = MODULE_STATE_UNFORMED; + mutex_unlock(&module_mutex); + + /* Arch-specific cleanup. */ + module_arch_cleanup(mod); + + /* Module unload stuff */ + module_unload_free(mod); + + /* Free any allocated parameters. */ + destroy_params(mod->kp, mod->num_kp); + + if (is_livepatch_module(mod)) + free_module_elf(mod); + + /* Now we can delete it from the lists */ + mutex_lock(&module_mutex); + /* Unlink carefully: kallsyms could be walking list. */ + list_del_rcu(&mod->list); + mod_tree_remove(mod); + /* Remove this module from bug list, this uses list_del_rcu */ + module_bug_cleanup(mod); + /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ + synchronize_rcu(); + if (try_add_tainted_module(mod)) + pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", + mod->name); + mutex_unlock(&module_mutex); + + /* This may be empty, but that's OK */ + module_arch_freeing_init(mod); + kfree(mod->args); + percpu_modfree(mod); + + free_mod_mem(mod); +} + +void *__symbol_get(const char *symbol) +{ + struct find_symbol_arg fsa = { + .name = symbol, + .gplok = true, + .warn = true, + }; + + preempt_disable(); + if (!find_symbol(&fsa)) + goto fail; + if (fsa.license != GPL_ONLY) { + pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", + symbol); + goto fail; + } + if (strong_try_module_get(fsa.owner)) + goto fail; + preempt_enable(); + return (void *)kernel_symbol_value(fsa.sym); +fail: + preempt_enable(); + return NULL; +} +EXPORT_SYMBOL_GPL(__symbol_get); + +/* + * Ensure that an exported symbol [global namespace] does not already exist + * in the kernel or in some other module's exported symbol table. + * + * You must hold the module_mutex. + */ +static int verify_exported_symbols(struct module *mod) +{ + unsigned int i; + const struct kernel_symbol *s; + struct { + const struct kernel_symbol *sym; + unsigned int num; + } arr[] = { + { mod->syms, mod->num_syms }, + { mod->gpl_syms, mod->num_gpl_syms }, + }; + + for (i = 0; i < ARRAY_SIZE(arr); i++) { + for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { + struct find_symbol_arg fsa = { + .name = kernel_symbol_name(s), + .gplok = true, + }; + if (find_symbol(&fsa)) { + pr_err("%s: exports duplicate symbol %s" + " (owned by %s)\n", + mod->name, kernel_symbol_name(s), + module_name(fsa.owner)); + return -ENOEXEC; + } + } + } + return 0; +} + +static bool ignore_undef_symbol(Elf_Half emachine, const char *name) +{ + /* + * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as + * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. + * i386 has a similar problem but may not deserve a fix. + * + * If we ever have to ignore many symbols, consider refactoring the code to + * only warn if referenced by a relocation. + */ + if (emachine == EM_386 || emachine == EM_X86_64) + return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); + return false; +} + +/* Change all symbols so that st_value encodes the pointer directly. */ +static int simplify_symbols(struct module *mod, const struct load_info *info) +{ + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + Elf_Sym *sym = (void *)symsec->sh_addr; + unsigned long secbase; + unsigned int i; + int ret = 0; + const struct kernel_symbol *ksym; + + for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { + const char *name = info->strtab + sym[i].st_name; + + switch (sym[i].st_shndx) { + case SHN_COMMON: + /* Ignore common symbols */ + if (!strncmp(name, "__gnu_lto", 9)) + break; + + /* + * We compiled with -fno-common. These are not + * supposed to happen. + */ + pr_debug("Common symbol: %s\n", name); + pr_warn("%s: please compile with -fno-common\n", + mod->name); + ret = -ENOEXEC; + break; + + case SHN_ABS: + /* Don't need to do anything */ + pr_debug("Absolute symbol: 0x%08lx %s\n", + (long)sym[i].st_value, name); + break; + + case SHN_LIVEPATCH: + /* Livepatch symbols are resolved by livepatch */ + break; + + case SHN_UNDEF: + ksym = resolve_symbol_wait(mod, info, name); + /* Ok if resolved. */ + if (ksym && !IS_ERR(ksym)) { + sym[i].st_value = kernel_symbol_value(ksym); + break; + } + + /* Ok if weak or ignored. */ + if (!ksym && + (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || + ignore_undef_symbol(info->hdr->e_machine, name))) + break; + + ret = PTR_ERR(ksym) ?: -ENOENT; + pr_warn("%s: Unknown symbol %s (err %d)\n", + mod->name, name, ret); + break; + + default: + /* Divert to percpu allocation if a percpu var. */ + if (sym[i].st_shndx == info->index.pcpu) + secbase = (unsigned long)mod_percpu(mod); + else + secbase = info->sechdrs[sym[i].st_shndx].sh_addr; + sym[i].st_value += secbase; + break; + } + } + + return ret; +} + +static int apply_relocations(struct module *mod, const struct load_info *info) +{ + unsigned int i; + int err = 0; + + /* Now do relocations. */ + for (i = 1; i < info->hdr->e_shnum; i++) { + unsigned int infosec = info->sechdrs[i].sh_info; + + /* Not a valid relocation section? */ + if (infosec >= info->hdr->e_shnum) + continue; + + /* Don't bother with non-allocated sections */ + if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) + continue; + + if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) + err = klp_apply_section_relocs(mod, info->sechdrs, + info->secstrings, + info->strtab, + info->index.sym, i, + NULL); + else if (info->sechdrs[i].sh_type == SHT_REL) + err = apply_relocate(info->sechdrs, info->strtab, + info->index.sym, i, mod); + else if (info->sechdrs[i].sh_type == SHT_RELA) + err = apply_relocate_add(info->sechdrs, info->strtab, + info->index.sym, i, mod); + if (err < 0) + break; + } + return err; +} + +/* Additional bytes needed by arch in front of individual sections */ +unsigned int __weak arch_mod_section_prepend(struct module *mod, + unsigned int section) +{ + /* default implementation just returns zero */ + return 0; +} + +long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, + Elf_Shdr *sechdr, unsigned int section) +{ + long offset; + long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; + + mod->mem[type].size += arch_mod_section_prepend(mod, section); + offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); + mod->mem[type].size = offset + sechdr->sh_size; + + WARN_ON_ONCE(offset & mask); + return offset | mask; +} + +bool module_init_layout_section(const char *sname) +{ +#ifndef CONFIG_MODULE_UNLOAD + if (module_exit_section(sname)) + return true; +#endif + return module_init_section(sname); +} + +static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) +{ + unsigned int m, i; + + static const unsigned long masks[][2] = { + /* + * NOTE: all executable code must be the first section + * in this array; otherwise modify the text_size + * finder in the two loops below + */ + { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, + { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, + { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, + { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, + { ARCH_SHF_SMALL | SHF_ALLOC, 0 } + }; + static const int core_m_to_mem_type[] = { + MOD_TEXT, + MOD_RODATA, + MOD_RO_AFTER_INIT, + MOD_DATA, + MOD_DATA, + }; + static const int init_m_to_mem_type[] = { + MOD_INIT_TEXT, + MOD_INIT_RODATA, + MOD_INVALID, + MOD_INIT_DATA, + MOD_INIT_DATA, + }; + + for (m = 0; m < ARRAY_SIZE(masks); ++m) { + enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; + + for (i = 0; i < info->hdr->e_shnum; ++i) { + Elf_Shdr *s = &info->sechdrs[i]; + const char *sname = info->secstrings + s->sh_name; + + if ((s->sh_flags & masks[m][0]) != masks[m][0] + || (s->sh_flags & masks[m][1]) + || s->sh_entsize != ~0UL + || is_init != module_init_layout_section(sname)) + continue; + + if (WARN_ON_ONCE(type == MOD_INVALID)) + continue; + + s->sh_entsize = module_get_offset_and_type(mod, type, s, i); + pr_debug("\t%s\n", sname); + } + } +} + +/* + * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld + * might -- code, read-only data, read-write data, small data. Tally + * sizes, and place the offsets into sh_entsize fields: high bit means it + * belongs in init. + */ +static void layout_sections(struct module *mod, struct load_info *info) +{ + unsigned int i; + + for (i = 0; i < info->hdr->e_shnum; i++) + info->sechdrs[i].sh_entsize = ~0UL; + + pr_debug("Core section allocation order for %s:\n", mod->name); + __layout_sections(mod, info, false); + + pr_debug("Init section allocation order for %s:\n", mod->name); + __layout_sections(mod, info, true); +} + +static void module_license_taint_check(struct module *mod, const char *license) +{ + if (!license) + license = "unspecified"; + + if (!license_is_gpl_compatible(license)) { + if (!test_taint(TAINT_PROPRIETARY_MODULE)) + pr_warn("%s: module license '%s' taints kernel.\n", + mod->name, license); + add_taint_module(mod, TAINT_PROPRIETARY_MODULE, + LOCKDEP_NOW_UNRELIABLE); + } +} + +static void setup_modinfo(struct module *mod, struct load_info *info) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (attr->setup) + attr->setup(mod, get_modinfo(info, attr->attr.name)); + } +} + +static void free_modinfo(struct module *mod) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (attr->free) + attr->free(mod); + } +} + +void * __weak module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); +} + +bool __weak module_init_section(const char *name) +{ + return strstarts(name, ".init"); +} + +bool __weak module_exit_section(const char *name) +{ + return strstarts(name, ".exit"); +} + +static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) +{ +#if defined(CONFIG_64BIT) + unsigned long long secend; +#else + unsigned long secend; +#endif + + /* + * Check for both overflow and offset/size being + * too large. + */ + secend = shdr->sh_offset + shdr->sh_size; + if (secend < shdr->sh_offset || secend > info->len) + return -ENOEXEC; + + return 0; +} + +/* + * Check userspace passed ELF module against our expectations, and cache + * useful variables for further processing as we go. + * + * This does basic validity checks against section offsets and sizes, the + * section name string table, and the indices used for it (sh_name). + * + * As a last step, since we're already checking the ELF sections we cache + * useful variables which will be used later for our convenience: + * + * o pointers to section headers + * o cache the modinfo symbol section + * o cache the string symbol section + * o cache the module section + * + * As a last step we set info->mod to the temporary copy of the module in + * info->hdr. The final one will be allocated in move_module(). Any + * modifications we make to our copy of the module will be carried over + * to the final minted module. + */ +static int elf_validity_cache_copy(struct load_info *info, int flags) +{ + unsigned int i; + Elf_Shdr *shdr, *strhdr; + int err; + unsigned int num_mod_secs = 0, mod_idx; + unsigned int num_info_secs = 0, info_idx; + unsigned int num_sym_secs = 0, sym_idx; + + if (info->len < sizeof(*(info->hdr))) { + pr_err("Invalid ELF header len %lu\n", info->len); + goto no_exec; + } + + if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { + pr_err("Invalid ELF header magic: != %s\n", ELFMAG); + goto no_exec; + } + if (info->hdr->e_type != ET_REL) { + pr_err("Invalid ELF header type: %u != %u\n", + info->hdr->e_type, ET_REL); + goto no_exec; + } + if (!elf_check_arch(info->hdr)) { + pr_err("Invalid architecture in ELF header: %u\n", + info->hdr->e_machine); + goto no_exec; + } + if (!module_elf_check_arch(info->hdr)) { + pr_err("Invalid module architecture in ELF header: %u\n", + info->hdr->e_machine); + goto no_exec; + } + if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { + pr_err("Invalid ELF section header size\n"); + goto no_exec; + } + + /* + * e_shnum is 16 bits, and sizeof(Elf_Shdr) is + * known and small. So e_shnum * sizeof(Elf_Shdr) + * will not overflow unsigned long on any platform. + */ + if (info->hdr->e_shoff >= info->len + || (info->hdr->e_shnum * sizeof(Elf_Shdr) > + info->len - info->hdr->e_shoff)) { + pr_err("Invalid ELF section header overflow\n"); + goto no_exec; + } + + info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + + /* + * Verify if the section name table index is valid. + */ + if (info->hdr->e_shstrndx == SHN_UNDEF + || info->hdr->e_shstrndx >= info->hdr->e_shnum) { + pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", + info->hdr->e_shstrndx, info->hdr->e_shstrndx, + info->hdr->e_shnum); + goto no_exec; + } + + strhdr = &info->sechdrs[info->hdr->e_shstrndx]; + err = validate_section_offset(info, strhdr); + if (err < 0) { + pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); + return err; + } + + /* + * The section name table must be NUL-terminated, as required + * by the spec. This makes strcmp and pr_* calls that access + * strings in the section safe. + */ + info->secstrings = (void *)info->hdr + strhdr->sh_offset; + if (strhdr->sh_size == 0) { + pr_err("empty section name table\n"); + goto no_exec; + } + if (info->secstrings[strhdr->sh_size - 1] != '\0') { + pr_err("ELF Spec violation: section name table isn't null terminated\n"); + goto no_exec; + } + + /* + * The code assumes that section 0 has a length of zero and + * an addr of zero, so check for it. + */ + if (info->sechdrs[0].sh_type != SHT_NULL + || info->sechdrs[0].sh_size != 0 + || info->sechdrs[0].sh_addr != 0) { + pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", + info->sechdrs[0].sh_type); + goto no_exec; + } + + for (i = 1; i < info->hdr->e_shnum; i++) { + shdr = &info->sechdrs[i]; + switch (shdr->sh_type) { + case SHT_NULL: + case SHT_NOBITS: + continue; + case SHT_SYMTAB: + if (shdr->sh_link == SHN_UNDEF + || shdr->sh_link >= info->hdr->e_shnum) { + pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", + shdr->sh_link, shdr->sh_link, + info->hdr->e_shnum); + goto no_exec; + } + num_sym_secs++; + sym_idx = i; + fallthrough; + default: + err = validate_section_offset(info, shdr); + if (err < 0) { + pr_err("Invalid ELF section in module (section %u type %u)\n", + i, shdr->sh_type); + return err; + } + if (strcmp(info->secstrings + shdr->sh_name, + ".gnu.linkonce.this_module") == 0) { + num_mod_secs++; + mod_idx = i; + } else if (strcmp(info->secstrings + shdr->sh_name, + ".modinfo") == 0) { + num_info_secs++; + info_idx = i; + } + + if (shdr->sh_flags & SHF_ALLOC) { + if (shdr->sh_name >= strhdr->sh_size) { + pr_err("Invalid ELF section name in module (section %u type %u)\n", + i, shdr->sh_type); + return -ENOEXEC; + } + } + break; + } + } + + if (num_info_secs > 1) { + pr_err("Only one .modinfo section must exist.\n"); + goto no_exec; + } else if (num_info_secs == 1) { + /* Try to find a name early so we can log errors with a module name */ + info->index.info = info_idx; + info->name = get_modinfo(info, "name"); + } + + if (num_sym_secs != 1) { + pr_warn("%s: module has no symbols (stripped?)\n", + info->name ?: "(missing .modinfo section or name field)"); + goto no_exec; + } + + /* Sets internal symbols and strings. */ + info->index.sym = sym_idx; + shdr = &info->sechdrs[sym_idx]; + info->index.str = shdr->sh_link; + info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; + + /* + * The ".gnu.linkonce.this_module" ELF section is special. It is + * what modpost uses to refer to __this_module and let's use rely + * on THIS_MODULE to point to &__this_module properly. The kernel's + * modpost declares it on each modules's *.mod.c file. If the struct + * module of the kernel changes a full kernel rebuild is required. + * + * We have a few expectaions for this special section, the following + * code validates all this for us: + * + * o Only one section must exist + * o We expect the kernel to always have to allocate it: SHF_ALLOC + * o The section size must match the kernel's run time's struct module + * size + */ + if (num_mod_secs != 1) { + pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n", + info->name ?: "(missing .modinfo section or name field)"); + goto no_exec; + } + + shdr = &info->sechdrs[mod_idx]; + + /* + * This is already implied on the switch above, however let's be + * pedantic about it. + */ + if (shdr->sh_type == SHT_NOBITS) { + pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", + info->name ?: "(missing .modinfo section or name field)"); + goto no_exec; + } + + if (!(shdr->sh_flags & SHF_ALLOC)) { + pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", + info->name ?: "(missing .modinfo section or name field)"); + goto no_exec; + } + + if (shdr->sh_size != sizeof(struct module)) { + pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", + info->name ?: "(missing .modinfo section or name field)"); + goto no_exec; + } + + info->index.mod = mod_idx; + + /* This is temporary: point mod into copy of data. */ + info->mod = (void *)info->hdr + shdr->sh_offset; + + /* + * If we didn't load the .modinfo 'name' field earlier, fall back to + * on-disk struct mod 'name' field. + */ + if (!info->name) + info->name = info->mod->name; + + if (flags & MODULE_INIT_IGNORE_MODVERSIONS) + info->index.vers = 0; /* Pretend no __versions section! */ + else + info->index.vers = find_sec(info, "__versions"); + + info->index.pcpu = find_pcpusec(info); + + return 0; + +no_exec: + return -ENOEXEC; +} + +#define COPY_CHUNK_SIZE (16*PAGE_SIZE) + +static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) +{ + do { + unsigned long n = min(len, COPY_CHUNK_SIZE); + + if (copy_from_user(dst, usrc, n) != 0) + return -EFAULT; + cond_resched(); + dst += n; + usrc += n; + len -= n; + } while (len); + return 0; +} + +static int check_modinfo_livepatch(struct module *mod, struct load_info *info) +{ + if (!get_modinfo(info, "livepatch")) + /* Nothing more to do */ + return 0; + + if (set_livepatch_module(mod)) + return 0; + + pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", + mod->name); + return -ENOEXEC; +} + +static void check_modinfo_retpoline(struct module *mod, struct load_info *info) +{ + if (retpoline_module_ok(get_modinfo(info, "retpoline"))) + return; + + pr_warn("%s: loading module not compiled with retpoline compiler.\n", + mod->name); +} + +/* Sets info->hdr and info->len. */ +static int copy_module_from_user(const void __user *umod, unsigned long len, + struct load_info *info) +{ + int err; + + info->len = len; + if (info->len < sizeof(*(info->hdr))) + return -ENOEXEC; + + err = security_kernel_load_data(LOADING_MODULE, true); + if (err) + return err; + + /* Suck in entire file: we'll want most of it. */ + info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); + if (!info->hdr) + return -ENOMEM; + + if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { + err = -EFAULT; + goto out; + } + + err = security_kernel_post_load_data((char *)info->hdr, info->len, + LOADING_MODULE, "init_module"); +out: + if (err) + vfree(info->hdr); + + return err; +} + +static void free_copy(struct load_info *info, int flags) +{ + if (flags & MODULE_INIT_COMPRESSED_FILE) + module_decompress_cleanup(info); + else + vfree(info->hdr); +} + +static int rewrite_section_headers(struct load_info *info, int flags) +{ + unsigned int i; + + /* This should always be true, but let's be sure. */ + info->sechdrs[0].sh_addr = 0; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + + /* + * Mark all sections sh_addr with their address in the + * temporary image. + */ + shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; + + } + + /* Track but don't keep modinfo and version sections. */ + info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; + info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; + + return 0; +} + +/* + * These calls taint the kernel depending certain module circumstances */ +static void module_augment_kernel_taints(struct module *mod, struct load_info *info) +{ + int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); + + if (!get_modinfo(info, "intree")) { + if (!test_taint(TAINT_OOT_MODULE)) + pr_warn("%s: loading out-of-tree module taints kernel.\n", + mod->name); + add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); + } + + check_modinfo_retpoline(mod, info); + + if (get_modinfo(info, "staging")) { + add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); + pr_warn("%s: module is from the staging directory, the quality " + "is unknown, you have been warned.\n", mod->name); + } + + if (is_livepatch_module(mod)) { + add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); + pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", + mod->name); + } + + module_license_taint_check(mod, get_modinfo(info, "license")); + + if (get_modinfo(info, "test")) { + if (!test_taint(TAINT_TEST)) + pr_warn("%s: loading test module taints kernel.\n", + mod->name); + add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); + } +#ifdef CONFIG_MODULE_SIG + mod->sig_ok = info->sig_ok; + if (!mod->sig_ok) { + pr_notice_once("%s: module verification failed: signature " + "and/or required key missing - tainting " + "kernel\n", mod->name); + add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); + } +#endif + + /* + * ndiswrapper is under GPL by itself, but loads proprietary modules. + * Don't use add_taint_module(), as it would prevent ndiswrapper from + * using GPL-only symbols it needs. + */ + if (strcmp(mod->name, "ndiswrapper") == 0) + add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); + + /* driverloader was caught wrongly pretending to be under GPL */ + if (strcmp(mod->name, "driverloader") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE, + LOCKDEP_NOW_UNRELIABLE); + + /* lve claims to be GPL but upstream won't provide source */ + if (strcmp(mod->name, "lve") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE, + LOCKDEP_NOW_UNRELIABLE); + + if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) + pr_warn("%s: module license taints kernel.\n", mod->name); + +} + +static int check_modinfo(struct module *mod, struct load_info *info, int flags) +{ + const char *modmagic = get_modinfo(info, "vermagic"); + int err; + + if (flags & MODULE_INIT_IGNORE_VERMAGIC) + modmagic = NULL; + + /* This is allowed: modprobe --force will invalidate it. */ + if (!modmagic) { + err = try_to_force_load(mod, "bad vermagic"); + if (err) + return err; + } else if (!same_magic(modmagic, vermagic, info->index.vers)) { + pr_err("%s: version magic '%s' should be '%s'\n", + info->name, modmagic, vermagic); + return -ENOEXEC; + } + + err = check_modinfo_livepatch(mod, info); + if (err) + return err; + + return 0; +} + +static int find_module_sections(struct module *mod, struct load_info *info) +{ + mod->kp = section_objs(info, "__param", + sizeof(*mod->kp), &mod->num_kp); + mod->syms = section_objs(info, "__ksymtab", + sizeof(*mod->syms), &mod->num_syms); + mod->crcs = section_addr(info, "__kcrctab"); + mod->gpl_syms = section_objs(info, "__ksymtab_gpl", + sizeof(*mod->gpl_syms), + &mod->num_gpl_syms); + mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); + +#ifdef CONFIG_CONSTRUCTORS + mod->ctors = section_objs(info, ".ctors", + sizeof(*mod->ctors), &mod->num_ctors); + if (!mod->ctors) + mod->ctors = section_objs(info, ".init_array", + sizeof(*mod->ctors), &mod->num_ctors); + else if (find_sec(info, ".init_array")) { + /* + * This shouldn't happen with same compiler and binutils + * building all parts of the module. + */ + pr_warn("%s: has both .ctors and .init_array.\n", + mod->name); + return -EINVAL; + } +#endif + + mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, + &mod->noinstr_text_size); + +#ifdef CONFIG_TRACEPOINTS + mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", + sizeof(*mod->tracepoints_ptrs), + &mod->num_tracepoints); +#endif +#ifdef CONFIG_TREE_SRCU + mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", + sizeof(*mod->srcu_struct_ptrs), + &mod->num_srcu_structs); +#endif +#ifdef CONFIG_BPF_EVENTS + mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", + sizeof(*mod->bpf_raw_events), + &mod->num_bpf_raw_events); +#endif +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); +#endif +#ifdef CONFIG_JUMP_LABEL + mod->jump_entries = section_objs(info, "__jump_table", + sizeof(*mod->jump_entries), + &mod->num_jump_entries); +#endif +#ifdef CONFIG_EVENT_TRACING + mod->trace_events = section_objs(info, "_ftrace_events", + sizeof(*mod->trace_events), + &mod->num_trace_events); + mod->trace_evals = section_objs(info, "_ftrace_eval_map", + sizeof(*mod->trace_evals), + &mod->num_trace_evals); +#endif +#ifdef CONFIG_TRACING + mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", + sizeof(*mod->trace_bprintk_fmt_start), + &mod->num_trace_bprintk_fmt); +#endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD + /* sechdrs[0].sh_size is always zero */ + mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, + sizeof(*mod->ftrace_callsites), + &mod->num_ftrace_callsites); +#endif +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + mod->ei_funcs = section_objs(info, "_error_injection_whitelist", + sizeof(*mod->ei_funcs), + &mod->num_ei_funcs); +#endif +#ifdef CONFIG_KPROBES + mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, + &mod->kprobes_text_size); + mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", + sizeof(unsigned long), + &mod->num_kprobe_blacklist); +#endif +#ifdef CONFIG_PRINTK_INDEX + mod->printk_index_start = section_objs(info, ".printk_index", + sizeof(*mod->printk_index_start), + &mod->printk_index_size); +#endif +#ifdef CONFIG_HAVE_STATIC_CALL_INLINE + mod->static_call_sites = section_objs(info, ".static_call_sites", + sizeof(*mod->static_call_sites), + &mod->num_static_call_sites); +#endif +#if IS_ENABLED(CONFIG_KUNIT) + mod->kunit_suites = section_objs(info, ".kunit_test_suites", + sizeof(*mod->kunit_suites), + &mod->num_kunit_suites); +#endif + + mod->extable = section_objs(info, "__ex_table", + sizeof(*mod->extable), &mod->num_exentries); + + if (section_addr(info, "__obsparm")) + pr_warn("%s: Ignoring obsolete parameters\n", mod->name); + +#ifdef CONFIG_DYNAMIC_DEBUG_CORE + mod->dyndbg_info.descs = section_objs(info, "__dyndbg", + sizeof(*mod->dyndbg_info.descs), + &mod->dyndbg_info.num_descs); + mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", + sizeof(*mod->dyndbg_info.classes), + &mod->dyndbg_info.num_classes); +#endif + + return 0; +} + +static int move_module(struct module *mod, struct load_info *info) +{ + int i; + void *ptr; + enum mod_mem_type t = 0; + int ret = -ENOMEM; + + for_each_mod_mem_type(type) { + if (!mod->mem[type].size) { + mod->mem[type].base = NULL; + continue; + } + mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size); + ptr = module_memory_alloc(mod->mem[type].size, type); + /* + * The pointer to these blocks of memory are stored on the module + * structure and we keep that around so long as the module is + * around. We only free that memory when we unload the module. + * Just mark them as not being a leak then. The .init* ELF + * sections *do* get freed after boot so we *could* treat them + * slightly differently with kmemleak_ignore() and only grey + * them out as they work as typical memory allocations which + * *do* eventually get freed, but let's just keep things simple + * and avoid *any* false positives. + */ + kmemleak_not_leak(ptr); + if (!ptr) { + t = type; + goto out_enomem; + } + memset(ptr, 0, mod->mem[type].size); + mod->mem[type].base = ptr; + } + + /* Transfer each section which specifies SHF_ALLOC */ + pr_debug("Final section addresses for %s:\n", mod->name); + for (i = 0; i < info->hdr->e_shnum; i++) { + void *dest; + Elf_Shdr *shdr = &info->sechdrs[i]; + enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; + + if (!(shdr->sh_flags & SHF_ALLOC)) + continue; + + dest = mod->mem[type].base + (shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK); + + if (shdr->sh_type != SHT_NOBITS) { + /* + * Our ELF checker already validated this, but let's + * be pedantic and make the goal clearer. We actually + * end up copying over all modifications made to the + * userspace copy of the entire struct module. + */ + if (i == info->index.mod && + (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { + ret = -ENOEXEC; + goto out_enomem; + } + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); + } + /* + * Update the userspace copy's ELF section address to point to + * our newly allocated memory as a pure convenience so that + * users of info can keep taking advantage and using the newly + * minted official memory area. + */ + shdr->sh_addr = (unsigned long)dest; + pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, + (long)shdr->sh_size, info->secstrings + shdr->sh_name); + } + + return 0; +out_enomem: + for (t--; t >= 0; t--) + module_memory_free(mod->mem[t].base, t); + return ret; +} + +static int check_export_symbol_versions(struct module *mod) +{ +#ifdef CONFIG_MODVERSIONS + if ((mod->num_syms && !mod->crcs) || + (mod->num_gpl_syms && !mod->gpl_crcs)) { + return try_to_force_load(mod, + "no versions for exported symbols"); + } +#endif + return 0; +} + +static void flush_module_icache(const struct module *mod) +{ + /* + * Flush the instruction cache, since we've played with text. + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ + for_each_mod_mem_type(type) { + const struct module_memory *mod_mem = &mod->mem[type]; + + if (mod_mem->size) { + flush_icache_range((unsigned long)mod_mem->base, + (unsigned long)mod_mem->base + mod_mem->size); + } + } +} + +bool __weak module_elf_check_arch(Elf_Ehdr *hdr) +{ + return true; +} + +int __weak module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod) +{ + return 0; +} + +/* module_blacklist is a comma-separated list of module names */ +static char *module_blacklist; +static bool blacklisted(const char *module_name) +{ + const char *p; + size_t len; + + if (!module_blacklist) + return false; + + for (p = module_blacklist; *p; p += len) { + len = strcspn(p, ","); + if (strlen(module_name) == len && !memcmp(module_name, p, len)) + return true; + if (p[len] == ',') + len++; + } + return false; +} +core_param(module_blacklist, module_blacklist, charp, 0400); + +static struct module *layout_and_allocate(struct load_info *info, int flags) +{ + struct module *mod; + unsigned int ndx; + int err; + + /* Allow arches to frob section contents and sizes. */ + err = module_frob_arch_sections(info->hdr, info->sechdrs, + info->secstrings, info->mod); + if (err < 0) + return ERR_PTR(err); + + err = module_enforce_rwx_sections(info->hdr, info->sechdrs, + info->secstrings, info->mod); + if (err < 0) + return ERR_PTR(err); + + /* We will do a special allocation for per-cpu sections later. */ + info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; + + /* + * Mark ro_after_init section with SHF_RO_AFTER_INIT so that + * layout_sections() can put it in the right place. + * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. + */ + ndx = find_sec(info, ".data..ro_after_init"); + if (ndx) + info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; + /* + * Mark the __jump_table section as ro_after_init as well: these data + * structures are never modified, with the exception of entries that + * refer to code in the __init section, which are annotated as such + * at module load time. + */ + ndx = find_sec(info, "__jump_table"); + if (ndx) + info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; + + /* + * Determine total sizes, and put offsets in sh_entsize. For now + * this is done generically; there doesn't appear to be any + * special cases for the architectures. + */ + layout_sections(info->mod, info); + layout_symtab(info->mod, info); + + /* Allocate and move to the final place */ + err = move_module(info->mod, info); + if (err) + return ERR_PTR(err); + + /* Module has been copied to its final place now: return it. */ + mod = (void *)info->sechdrs[info->index.mod].sh_addr; + kmemleak_load_module(mod, info); + return mod; +} + +/* mod is no longer valid after this! */ +static void module_deallocate(struct module *mod, struct load_info *info) +{ + percpu_modfree(mod); + module_arch_freeing_init(mod); + + free_mod_mem(mod); +} + +int __weak module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + return 0; +} + +static int post_relocation(struct module *mod, const struct load_info *info) +{ + /* Sort exception table now relocations are done. */ + sort_extable(mod->extable, mod->extable + mod->num_exentries); + + /* Copy relocated percpu area over. */ + percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, + info->sechdrs[info->index.pcpu].sh_size); + + /* Setup kallsyms-specific fields. */ + add_kallsyms(mod, info); + + /* Arch-specific module finalizing. */ + return module_finalize(info->hdr, info->sechdrs, mod); +} + +/* Call module constructors. */ +static void do_mod_ctors(struct module *mod) +{ +#ifdef CONFIG_CONSTRUCTORS + unsigned long i; + + for (i = 0; i < mod->num_ctors; i++) + mod->ctors[i](); +#endif +} + +/* For freeing module_init on success, in case kallsyms traversing */ +struct mod_initfree { + struct llist_node node; + void *init_text; + void *init_data; + void *init_rodata; +}; + +static void do_free_init(struct work_struct *w) +{ + struct llist_node *pos, *n, *list; + struct mod_initfree *initfree; + + list = llist_del_all(&init_free_list); + + synchronize_rcu(); + + llist_for_each_safe(pos, n, list) { + initfree = container_of(pos, struct mod_initfree, node); + module_memfree(initfree->init_text); + module_memfree(initfree->init_data); + module_memfree(initfree->init_rodata); + kfree(initfree); + } +} + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "module." +/* Default value for module->async_probe_requested */ +static bool async_probe; +module_param(async_probe, bool, 0644); + +/* + * This is where the real work happens. + * + * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb + * helper command 'lx-symbols'. + */ +static noinline int do_init_module(struct module *mod) +{ + int ret = 0; + struct mod_initfree *freeinit; +#if defined(CONFIG_MODULE_STATS) + unsigned int text_size = 0, total_size = 0; + + for_each_mod_mem_type(type) { + const struct module_memory *mod_mem = &mod->mem[type]; + if (mod_mem->size) { + total_size += mod_mem->size; + if (type == MOD_TEXT || type == MOD_INIT_TEXT) + text_size += mod_mem->size; + } + } +#endif + + freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); + if (!freeinit) { + ret = -ENOMEM; + goto fail; + } + freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; + freeinit->init_data = mod->mem[MOD_INIT_DATA].base; + freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; + + do_mod_ctors(mod); + /* Start the module */ + if (mod->init != NULL) + ret = do_one_initcall(mod->init); + if (ret < 0) { + goto fail_free_freeinit; + } + if (ret > 0) { + pr_warn("%s: '%s'->init suspiciously returned %d, it should " + "follow 0/-E convention\n" + "%s: loading module anyway...\n", + __func__, mod->name, ret, __func__); + dump_stack(); + } + + /* Now it's a first class citizen! */ + mod->state = MODULE_STATE_LIVE; + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_LIVE, mod); + + /* Delay uevent until module has finished its init routine */ + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + + /* + * We need to finish all async code before the module init sequence + * is done. This has potential to deadlock if synchronous module + * loading is requested from async (which is not allowed!). + * + * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous + * request_module() from async workers") for more details. + */ + if (!mod->async_probe_requested) + async_synchronize_full(); + + ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, + mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); + mutex_lock(&module_mutex); + /* Drop initial reference. */ + module_put(mod); + trim_init_extable(mod); +#ifdef CONFIG_KALLSYMS + /* Switch to core kallsyms now init is done: kallsyms may be walking! */ + rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); +#endif + module_enable_ro(mod, true); + mod_tree_remove_init(mod); + module_arch_freeing_init(mod); + for_class_mod_mem_type(type, init) { + mod->mem[type].base = NULL; + mod->mem[type].size = 0; + } + +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ + mod->btf_data = NULL; +#endif + /* + * We want to free module_init, but be aware that kallsyms may be + * walking this with preempt disabled. In all the failure paths, we + * call synchronize_rcu(), but we don't want to slow down the success + * path. module_memfree() cannot be called in an interrupt, so do the + * work and call synchronize_rcu() in a work queue. + * + * Note that module_alloc() on most architectures creates W+X page + * mappings which won't be cleaned up until do_free_init() runs. Any + * code such as mark_rodata_ro() which depends on those mappings to + * be cleaned up needs to sync with the queued work - ie + * rcu_barrier() + */ + if (llist_add(&freeinit->node, &init_free_list)) + schedule_work(&init_free_wq); + + mutex_unlock(&module_mutex); + wake_up_all(&module_wq); + + mod_stat_add_long(text_size, &total_text_size); + mod_stat_add_long(total_size, &total_mod_size); + + mod_stat_inc(&modcount); + + return 0; + +fail_free_freeinit: + kfree(freeinit); +fail: + /* Try to protect us from buggy refcounters. */ + mod->state = MODULE_STATE_GOING; + synchronize_rcu(); + module_put(mod); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + ftrace_release_mod(mod); + free_module(mod); + wake_up_all(&module_wq); + + return ret; +} + +static int may_init_module(void) +{ + if (!capable(CAP_SYS_MODULE) || modules_disabled) + return -EPERM; + + return 0; +} + +/* Is this module of this name done loading? No locks held. */ +static bool finished_loading(const char *name) +{ + struct module *mod; + bool ret; + + /* + * The module_mutex should not be a heavily contended lock; + * if we get the occasional sleep here, we'll go an extra iteration + * in the wait_event_interruptible(), which is harmless. + */ + sched_annotate_sleep(); + mutex_lock(&module_mutex); + mod = find_module_all(name, strlen(name), true); + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; + mutex_unlock(&module_mutex); + + return ret; +} + +/* Must be called with module_mutex held */ +static int module_patient_check_exists(const char *name, + enum fail_dup_mod_reason reason) +{ + struct module *old; + int err = 0; + + old = find_module_all(name, strlen(name), true); + if (old == NULL) + return 0; + + if (old->state == MODULE_STATE_COMING || + old->state == MODULE_STATE_UNFORMED) { + /* Wait in case it fails to load. */ + mutex_unlock(&module_mutex); + err = wait_event_interruptible(module_wq, + finished_loading(name)); + mutex_lock(&module_mutex); + if (err) + return err; + + /* The module might have gone in the meantime. */ + old = find_module_all(name, strlen(name), true); + } + + if (try_add_failed_module(name, reason)) + pr_warn("Could not add fail-tracking for module: %s\n", name); + + /* + * We are here only when the same module was being loaded. Do + * not try to load it again right now. It prevents long delays + * caused by serialized module load failures. It might happen + * when more devices of the same type trigger load of + * a particular module. + */ + if (old && old->state == MODULE_STATE_LIVE) + return -EEXIST; + return -EBUSY; +} + +/* + * We try to place it in the list now to make sure it's unique before + * we dedicate too many resources. In particular, temporary percpu + * memory exhaustion. + */ +static int add_unformed_module(struct module *mod) +{ + int err; + + mod->state = MODULE_STATE_UNFORMED; + + mutex_lock(&module_mutex); + err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); + if (err) + goto out; + + mod_update_bounds(mod); + list_add_rcu(&mod->list, &modules); + mod_tree_insert(mod); + err = 0; + +out: + mutex_unlock(&module_mutex); + return err; +} + +static int complete_formation(struct module *mod, struct load_info *info) +{ + int err; + + mutex_lock(&module_mutex); + + /* Find duplicate symbols (must be called under lock). */ + err = verify_exported_symbols(mod); + if (err < 0) + goto out; + + /* These rely on module_mutex for list integrity. */ + module_bug_finalize(info->hdr, info->sechdrs, mod); + module_cfi_finalize(info->hdr, info->sechdrs, mod); + + module_enable_ro(mod, false); + module_enable_nx(mod); + module_enable_x(mod); + + /* + * Mark state as coming so strong_try_module_get() ignores us, + * but kallsyms etc. can see us. + */ + mod->state = MODULE_STATE_COMING; + mutex_unlock(&module_mutex); + + return 0; + +out: + mutex_unlock(&module_mutex); + return err; +} + +static int prepare_coming_module(struct module *mod) +{ + int err; + + ftrace_module_enable(mod); + err = klp_module_coming(mod); + if (err) + return err; + + err = blocking_notifier_call_chain_robust(&module_notify_list, + MODULE_STATE_COMING, MODULE_STATE_GOING, mod); + err = notifier_to_errno(err); + if (err) + klp_module_going(mod); + + return err; +} + +static int unknown_module_param_cb(char *param, char *val, const char *modname, + void *arg) +{ + struct module *mod = arg; + int ret; + + if (strcmp(param, "async_probe") == 0) { + if (kstrtobool(val, &mod->async_probe_requested)) + mod->async_probe_requested = true; + return 0; + } + + /* Check for magic 'dyndbg' arg */ + ret = ddebug_dyndbg_module_param_cb(param, val, modname); + if (ret != 0) + pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); + return 0; +} + +/* Module within temporary copy, this doesn't do any allocation */ +static int early_mod_check(struct load_info *info, int flags) +{ + int err; + + /* + * Now that we know we have the correct module name, check + * if it's blacklisted. + */ + if (blacklisted(info->name)) { + pr_err("Module %s is blacklisted\n", info->name); + return -EPERM; + } + + err = rewrite_section_headers(info, flags); + if (err) + return err; + + /* Check module struct version now, before we try to use module. */ + if (!check_modstruct_version(info, info->mod)) + return -ENOEXEC; + + err = check_modinfo(info->mod, info, flags); + if (err) + return err; + + mutex_lock(&module_mutex); + err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); + mutex_unlock(&module_mutex); + + return err; +} + +/* + * Allocate and load the module: note that size of section 0 is always + * zero, and we rely on this for optional sections. + */ +static int load_module(struct load_info *info, const char __user *uargs, + int flags) +{ + struct module *mod; + bool module_allocated = false; + long err = 0; + char *after_dashes; + + /* + * Do the signature check (if any) first. All that + * the signature check needs is info->len, it does + * not need any of the section info. That can be + * set up later. This will minimize the chances + * of a corrupt module causing problems before + * we even get to the signature check. + * + * The check will also adjust info->len by stripping + * off the sig length at the end of the module, making + * checks against info->len more correct. + */ + err = module_sig_check(info, flags); + if (err) + goto free_copy; + + /* + * Do basic sanity checks against the ELF header and + * sections. Cache useful sections and set the + * info->mod to the userspace passed struct module. + */ + err = elf_validity_cache_copy(info, flags); + if (err) + goto free_copy; + + err = early_mod_check(info, flags); + if (err) + goto free_copy; + + /* Figure out module layout, and allocate all the memory. */ + mod = layout_and_allocate(info, flags); + if (IS_ERR(mod)) { + err = PTR_ERR(mod); + goto free_copy; + } + + module_allocated = true; + + audit_log_kern_module(mod->name); + + /* Reserve our place in the list. */ + err = add_unformed_module(mod); + if (err) + goto free_module; + + /* + * We are tainting your kernel if your module gets into + * the modules linked list somehow. + */ + module_augment_kernel_taints(mod, info); + + /* To avoid stressing percpu allocator, do this once we're unique. */ + err = percpu_modalloc(mod, info); + if (err) + goto unlink_mod; + + /* Now module is in final location, initialize linked lists, etc. */ + err = module_unload_init(mod); + if (err) + goto unlink_mod; + + init_param_lock(mod); + + /* + * Now we've got everything in the final locations, we can + * find optional sections. + */ + err = find_module_sections(mod, info); + if (err) + goto free_unload; + + err = check_export_symbol_versions(mod); + if (err) + goto free_unload; + + /* Set up MODINFO_ATTR fields */ + setup_modinfo(mod, info); + + /* Fix up syms, so that st_value is a pointer to location. */ + err = simplify_symbols(mod, info); + if (err < 0) + goto free_modinfo; + + err = apply_relocations(mod, info); + if (err < 0) + goto free_modinfo; + + err = post_relocation(mod, info); + if (err < 0) + goto free_modinfo; + + flush_module_icache(mod); + + /* Now copy in args */ + mod->args = strndup_user(uargs, ~0UL >> 1); + if (IS_ERR(mod->args)) { + err = PTR_ERR(mod->args); + goto free_arch_cleanup; + } + + init_build_id(mod, info); + + /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ + ftrace_module_init(mod); + + /* Finally it's fully formed, ready to start executing. */ + err = complete_formation(mod, info); + if (err) + goto ddebug_cleanup; + + err = prepare_coming_module(mod); + if (err) + goto bug_cleanup; + + mod->async_probe_requested = async_probe; + + /* Module is ready to execute: parsing args may do that. */ + after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, + -32768, 32767, mod, + unknown_module_param_cb); + if (IS_ERR(after_dashes)) { + err = PTR_ERR(after_dashes); + goto coming_cleanup; + } else if (after_dashes) { + pr_warn("%s: parameters '%s' after `--' ignored\n", + mod->name, after_dashes); + } + + /* Link in to sysfs. */ + err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); + if (err < 0) + goto coming_cleanup; + + if (is_livepatch_module(mod)) { + err = copy_module_elf(mod, info); + if (err < 0) + goto sysfs_cleanup; + } + + /* Get rid of temporary copy. */ + free_copy(info, flags); + + /* Done! */ + trace_module_load(mod); + + return do_init_module(mod); + + sysfs_cleanup: + mod_sysfs_teardown(mod); + coming_cleanup: + mod->state = MODULE_STATE_GOING; + destroy_params(mod->kp, mod->num_kp); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + bug_cleanup: + mod->state = MODULE_STATE_GOING; + /* module_bug_cleanup needs module_mutex protection */ + mutex_lock(&module_mutex); + module_bug_cleanup(mod); + mutex_unlock(&module_mutex); + + ddebug_cleanup: + ftrace_release_mod(mod); + synchronize_rcu(); + kfree(mod->args); + free_arch_cleanup: + module_arch_cleanup(mod); + free_modinfo: + free_modinfo(mod); + free_unload: + module_unload_free(mod); + unlink_mod: + mutex_lock(&module_mutex); + /* Unlink carefully: kallsyms could be walking list. */ + list_del_rcu(&mod->list); + mod_tree_remove(mod); + wake_up_all(&module_wq); + /* Wait for RCU-sched synchronizing before releasing mod->list. */ + synchronize_rcu(); + mutex_unlock(&module_mutex); + free_module: + mod_stat_bump_invalid(info, flags); + /* Free lock-classes; relies on the preceding sync_rcu() */ + for_class_mod_mem_type(type, core_data) { + lockdep_free_key_range(mod->mem[type].base, + mod->mem[type].size); + } + + module_deallocate(mod, info); + free_copy: + /* + * The info->len is always set. We distinguish between + * failures once the proper module was allocated and + * before that. + */ + if (!module_allocated) + mod_stat_bump_becoming(info, flags); + free_copy(info, flags); + return err; +} + +SYSCALL_DEFINE3(init_module, void __user *, umod, + unsigned long, len, const char __user *, uargs) +{ + int err; + struct load_info info = { }; + + err = may_init_module(); + if (err) + return err; + + pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", + umod, len, uargs); + + err = copy_module_from_user(umod, len, &info); + if (err) { + mod_stat_inc(&failed_kreads); + mod_stat_add_long(len, &invalid_kread_bytes); + return err; + } + + return load_module(&info, uargs, 0); +} + +struct idempotent { + const void *cookie; + struct hlist_node entry; + struct completion complete; + int ret; +}; + +#define IDEM_HASH_BITS 8 +static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; +static DEFINE_SPINLOCK(idem_lock); + +static bool idempotent(struct idempotent *u, const void *cookie) +{ + int hash = hash_ptr(cookie, IDEM_HASH_BITS); + struct hlist_head *head = idem_hash + hash; + struct idempotent *existing; + bool first; + + u->ret = 0; + u->cookie = cookie; + init_completion(&u->complete); + + spin_lock(&idem_lock); + first = true; + hlist_for_each_entry(existing, head, entry) { + if (existing->cookie != cookie) + continue; + first = false; + break; + } + hlist_add_head(&u->entry, idem_hash + hash); + spin_unlock(&idem_lock); + + return !first; +} + +/* + * We were the first one with 'cookie' on the list, and we ended + * up completing the operation. We now need to walk the list, + * remove everybody - which includes ourselves - fill in the return + * value, and then complete the operation. + */ +static int idempotent_complete(struct idempotent *u, int ret) +{ + const void *cookie = u->cookie; + int hash = hash_ptr(cookie, IDEM_HASH_BITS); + struct hlist_head *head = idem_hash + hash; + struct hlist_node *next; + struct idempotent *pos; + + spin_lock(&idem_lock); + hlist_for_each_entry_safe(pos, next, head, entry) { + if (pos->cookie != cookie) + continue; + hlist_del(&pos->entry); + pos->ret = ret; + complete(&pos->complete); + } + spin_unlock(&idem_lock); + return ret; +} + +static int init_module_from_file(struct file *f, const char __user * uargs, int flags) +{ + struct load_info info = { }; + void *buf = NULL; + int len; + + len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); + if (len < 0) { + mod_stat_inc(&failed_kreads); + return len; + } + + if (flags & MODULE_INIT_COMPRESSED_FILE) { + int err = module_decompress(&info, buf, len); + vfree(buf); /* compressed data is no longer needed */ + if (err) { + mod_stat_inc(&failed_decompress); + mod_stat_add_long(len, &invalid_decompress_bytes); + return err; + } + } else { + info.hdr = buf; + info.len = len; + } + + return load_module(&info, uargs, flags); +} + +static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) +{ + struct idempotent idem; + + if (!f || !(f->f_mode & FMODE_READ)) + return -EBADF; + + /* See if somebody else is doing the operation? */ + if (idempotent(&idem, file_inode(f))) { + wait_for_completion(&idem.complete); + return idem.ret; + } + + /* Otherwise, we'll do it and complete others */ + return idempotent_complete(&idem, + init_module_from_file(f, uargs, flags)); +} + +SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) +{ + int err; + struct fd f; + + err = may_init_module(); + if (err) + return err; + + pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); + + if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS + |MODULE_INIT_IGNORE_VERMAGIC + |MODULE_INIT_COMPRESSED_FILE)) + return -EINVAL; + + f = fdget(fd); + err = idempotent_init_module(f.file, uargs, flags); + fdput(f); + return err; +} + +/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ +char *module_flags(struct module *mod, char *buf, bool show_state) +{ + int bx = 0; + + BUG_ON(mod->state == MODULE_STATE_UNFORMED); + if (!mod->taints && !show_state) + goto out; + if (mod->taints || + mod->state == MODULE_STATE_GOING || + mod->state == MODULE_STATE_COMING) { + buf[bx++] = '('; + bx += module_flags_taint(mod->taints, buf + bx); + /* Show a - for module-is-being-unloaded */ + if (mod->state == MODULE_STATE_GOING && show_state) + buf[bx++] = '-'; + /* Show a + for module-is-being-loaded */ + if (mod->state == MODULE_STATE_COMING && show_state) + buf[bx++] = '+'; + buf[bx++] = ')'; + } +out: + buf[bx] = '\0'; + + return buf; +} + +/* Given an address, look for it in the module exception tables. */ +const struct exception_table_entry *search_module_extables(unsigned long addr) +{ + const struct exception_table_entry *e = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (!mod) + goto out; + + if (!mod->num_exentries) + goto out; + + e = search_extable(mod->extable, + mod->num_exentries, + addr); +out: + preempt_enable(); + + /* + * Now, if we found one, we are running inside it now, hence + * we cannot unload the module, hence no refcnt needed. + */ + return e; +} + +/** + * is_module_address() - is this address inside a module? + * @addr: the address to check. + * + * See is_module_text_address() if you simply want to see if the address + * is code (not data). + */ +bool is_module_address(unsigned long addr) +{ + bool ret; + + preempt_disable(); + ret = __module_address(addr) != NULL; + preempt_enable(); + + return ret; +} + +/** + * __module_address() - get the module which contains an address. + * @addr: the address. + * + * Must be called with preempt disabled or module mutex held so that + * module doesn't get freed during this. + */ +struct module *__module_address(unsigned long addr) +{ + struct module *mod; + + if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) + goto lookup; + +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) + goto lookup; +#endif + + return NULL; + +lookup: + module_assert_mutex_or_preempt(); + + mod = mod_find(addr, &mod_tree); + if (mod) { + BUG_ON(!within_module(addr, mod)); + if (mod->state == MODULE_STATE_UNFORMED) + mod = NULL; + } + return mod; +} + +/** + * is_module_text_address() - is this address inside module code? + * @addr: the address to check. + * + * See is_module_address() if you simply want to see if the address is + * anywhere in a module. See kernel_text_address() for testing if an + * address corresponds to kernel or module code. + */ +bool is_module_text_address(unsigned long addr) +{ + bool ret; + + preempt_disable(); + ret = __module_text_address(addr) != NULL; + preempt_enable(); + + return ret; +} + +/** + * __module_text_address() - get the module whose code contains an address. + * @addr: the address. + * + * Must be called with preempt disabled or module mutex held so that + * module doesn't get freed during this. + */ +struct module *__module_text_address(unsigned long addr) +{ + struct module *mod = __module_address(addr); + if (mod) { + /* Make sure it's within the text section. */ + if (!within_module_mem_type(addr, mod, MOD_TEXT) && + !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) + mod = NULL; + } + return mod; +} + +/* Don't grab lock, we're oopsing. */ +void print_modules(void) +{ + struct module *mod; + char buf[MODULE_FLAGS_BUF_SIZE]; + + printk(KERN_DEFAULT "Modules linked in:"); + /* Most callers should already have preempt disabled, but make sure */ + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); + } + + print_unloaded_tainted_modules(); + preempt_enable(); + if (last_unloaded_module.name[0]) + pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, + last_unloaded_module.taints); + pr_cont("\n"); +} + +#ifdef CONFIG_MODULE_DEBUGFS +struct dentry *mod_debugfs_root; + +static int module_debugfs_init(void) +{ + mod_debugfs_root = debugfs_create_dir("modules", NULL); + return 0; +} +module_init(module_debugfs_init); +#endif diff --git a/kernel/module/procfs.c b/kernel/module/procfs.c new file mode 100644 index 0000000000..0a4841e88a --- /dev/null +++ b/kernel/module/procfs.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module proc support + * + * Copyright (C) 2008 Alexey Dobriyan + */ + +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include "internal.h" + +#ifdef CONFIG_MODULE_UNLOAD +static inline void print_unload_info(struct seq_file *m, struct module *mod) +{ + struct module_use *use; + int printed_something = 0; + + seq_printf(m, " %i ", module_refcount(mod)); + + /* + * Always include a trailing , so userspace can differentiate + * between this and the old multi-field proc format. + */ + list_for_each_entry(use, &mod->source_list, source_list) { + printed_something = 1; + seq_printf(m, "%s,", use->source->name); + } + + if (mod->init && !mod->exit) { + printed_something = 1; + seq_puts(m, "[permanent],"); + } + + if (!printed_something) + seq_puts(m, "-"); +} +#else /* !CONFIG_MODULE_UNLOAD */ +static inline void print_unload_info(struct seq_file *m, struct module *mod) +{ + /* We don't know the usage count, or what modules are using. */ + seq_puts(m, " - -"); +} +#endif /* CONFIG_MODULE_UNLOAD */ + +/* Called by the /proc file system to return a list of modules. */ +static void *m_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&module_mutex); + return seq_list_start(&modules, *pos); +} + +static void *m_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &modules, pos); +} + +static void m_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&module_mutex); +} + +static unsigned int module_total_size(struct module *mod) +{ + int size = 0; + + for_each_mod_mem_type(type) + size += mod->mem[type].size; + return size; +} + +static int m_show(struct seq_file *m, void *p) +{ + struct module *mod = list_entry(p, struct module, list); + char buf[MODULE_FLAGS_BUF_SIZE]; + void *value; + unsigned int size; + + /* We always ignore unformed modules. */ + if (mod->state == MODULE_STATE_UNFORMED) + return 0; + + size = module_total_size(mod); + seq_printf(m, "%s %u", mod->name, size); + print_unload_info(m, mod); + + /* Informative for users. */ + seq_printf(m, " %s", + mod->state == MODULE_STATE_GOING ? "Unloading" : + mod->state == MODULE_STATE_COMING ? "Loading" : + "Live"); + /* Used by oprofile and other similar tools. */ + value = m->private ? NULL : mod->mem[MOD_TEXT].base; + seq_printf(m, " 0x%px", value); + + /* Taints info */ + if (mod->taints) + seq_printf(m, " %s", module_flags(mod, buf, true)); + + seq_puts(m, "\n"); + return 0; +} + +/* + * Format: modulename size refcount deps address + * + * Where refcount is a number or -, and deps is a comma-separated list + * of depends or -. + */ +static const struct seq_operations modules_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = m_show +}; + +/* + * This also sets the "private" pointer to non-NULL if the + * kernel pointers should be hidden (so you can just test + * "m->private" to see if you should keep the values private). + * + * We use the same logic as for /proc/kallsyms. + */ +static int modules_open(struct inode *inode, struct file *file) +{ + int err = seq_open(file, &modules_op); + + if (!err) { + struct seq_file *m = file->private_data; + + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; + } + + return err; +} + +static const struct proc_ops modules_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, + .proc_open = modules_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release, +}; + +static int __init proc_modules_init(void) +{ + proc_create("modules", 0, NULL, &modules_proc_ops); + return 0; +} +module_init(proc_modules_init); diff --git a/kernel/module/signing.c b/kernel/module/signing.c new file mode 100644 index 0000000000..a2ff4242e6 --- /dev/null +++ b/kernel/module/signing.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Module signature checker + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/module_signature.h> +#include <linux/string.h> +#include <linux/verification.h> +#include <linux/security.h> +#include <crypto/public_key.h> +#include <uapi/linux/module.h> +#include "internal.h" + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "module." + +static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); +module_param(sig_enforce, bool_enable_only, 0644); + +/* + * Export sig_enforce kernel cmdline parameter to allow other subsystems rely + * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. + */ +bool is_module_sig_enforced(void) +{ + return sig_enforce; +} +EXPORT_SYMBOL(is_module_sig_enforced); + +void set_module_sig_enforced(void) +{ + sig_enforce = true; +} + +/* + * Verify the signature on a module. + */ +int mod_verify_sig(const void *mod, struct load_info *info) +{ + struct module_signature ms; + size_t sig_len, modlen = info->len; + int ret; + + pr_devel("==>%s(,%zu)\n", __func__, modlen); + + if (modlen <= sizeof(ms)) + return -EBADMSG; + + memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); + + ret = mod_check_sig(&ms, modlen, "module"); + if (ret) + return ret; + + sig_len = be32_to_cpu(ms.sig_len); + modlen -= sig_len + sizeof(ms); + info->len = modlen; + + return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, + VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); +} + +int module_sig_check(struct load_info *info, int flags) +{ + int err = -ENODATA; + const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; + const char *reason; + const void *mod = info->hdr; + bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | + MODULE_INIT_IGNORE_VERMAGIC); + /* + * Do not allow mangled modules as a module with version information + * removed is no longer the module that was signed. + */ + if (!mangled_module && + info->len > markerlen && + memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { + /* We truncate the module to discard the signature */ + info->len -= markerlen; + err = mod_verify_sig(mod, info); + if (!err) { + info->sig_ok = true; + return 0; + } + } + + /* + * We don't permit modules to be loaded into the trusted kernels + * without a valid signature on them, but if we're not enforcing, + * certain errors are non-fatal. + */ + switch (err) { + case -ENODATA: + reason = "unsigned module"; + break; + case -ENOPKG: + reason = "module with unsupported crypto"; + break; + case -ENOKEY: + reason = "module with unavailable key"; + break; + + default: + /* + * All other errors are fatal, including lack of memory, + * unparseable signatures, and signature check failures -- + * even if signatures aren't required. + */ + return err; + } + + if (is_module_sig_enforced()) { + pr_notice("Loading of %s is rejected\n", reason); + return -EKEYREJECTED; + } + + return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); +} diff --git a/kernel/module/stats.c b/kernel/module/stats.c new file mode 100644 index 0000000000..6ab2c94d6b --- /dev/null +++ b/kernel/module/stats.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Debugging module statistics. + * + * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> + */ + +#include <linux/module.h> +#include <uapi/linux/module.h> +#include <linux/string.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/debugfs.h> +#include <linux/rculist.h> +#include <linux/math.h> + +#include "internal.h" + +/** + * DOC: module debugging statistics overview + * + * Enabling CONFIG_MODULE_STATS enables module debugging statistics which + * are useful to monitor and root cause memory pressure issues with module + * loading. These statistics are useful to allow us to improve production + * workloads. + * + * The current module debugging statistics supported help keep track of module + * loading failures to enable improvements either for kernel module auto-loading + * usage (request_module()) or interactions with userspace. Statistics are + * provided to track all possible failures in the finit_module() path and memory + * wasted in this process space. Each of the failure counters are associated + * to a type of module loading failure which is known to incur a certain amount + * of memory allocation loss. In the worst case loading a module will fail after + * a 3 step memory allocation process: + * + * a) memory allocated with kernel_read_file_from_fd() + * b) module decompression processes the file read from + * kernel_read_file_from_fd(), and vmap() is used to map + * the decompressed module to a new local buffer which represents + * a copy of the decompressed module passed from userspace. The buffer + * from kernel_read_file_from_fd() is freed right away. + * c) layout_and_allocate() allocates space for the final resting + * place where we would keep the module if it were to be processed + * successfully. + * + * If a failure occurs after these three different allocations only one + * counter will be incremented with the summation of the allocated bytes freed + * incurred during this failure. Likewise, if module loading failed only after + * step b) a separate counter is used and incremented for the bytes freed and + * not used during both of those allocations. + * + * Virtual memory space can be limited, for example on x86 virtual memory size + * defaults to 128 MiB. We should strive to limit and avoid wasting virtual + * memory allocations when possible. These module debugging statistics help + * to evaluate how much memory is being wasted on bootup due to module loading + * failures. + * + * All counters are designed to be incremental. Atomic counters are used so to + * remain simple and avoid delays and deadlocks. + */ + +/** + * DOC: dup_failed_modules - tracks duplicate failed modules + * + * Linked list of modules which failed to be loaded because an already existing + * module with the same name was already being processed or already loaded. + * The finit_module() system call incurs heavy virtual memory allocations. In + * the worst case an finit_module() system call can end up allocating virtual + * memory 3 times: + * + * 1) kernel_read_file_from_fd() call uses vmalloc() + * 2) optional module decompression uses vmap() + * 3) layout_and allocate() can use vzalloc() or an arch specific variation of + * vmalloc to deal with ELF sections requiring special permissions + * + * In practice on a typical boot today most finit_module() calls fail due to + * the module with the same name already being loaded or about to be processed. + * All virtual memory allocated to these failed modules will be freed with + * no functional use. + * + * To help with this the dup_failed_modules allows us to track modules which + * failed to load due to the fact that a module was already loaded or being + * processed. There are only two points at which we can fail such calls, + * we list them below along with the number of virtual memory allocation + * calls: + * + * a) FAIL_DUP_MOD_BECOMING: at the end of early_mod_check() before + * layout_and_allocate(). + * - with module decompression: 2 virtual memory allocation calls + * - without module decompression: 1 virtual memory allocation calls + * b) FAIL_DUP_MOD_LOAD: after layout_and_allocate() on add_unformed_module() + * - with module decompression 3 virtual memory allocation calls + * - without module decompression 2 virtual memory allocation calls + * + * We should strive to get this list to be as small as possible. If this list + * is not empty it is a reflection of possible work or optimizations possible + * either in-kernel or in userspace. + */ +static LIST_HEAD(dup_failed_modules); + +/** + * DOC: module statistics debugfs counters + * + * The total amount of wasted virtual memory allocation space during module + * loading can be computed by adding the total from the summation: + * + * * @invalid_kread_bytes + + * @invalid_decompress_bytes + + * @invalid_becoming_bytes + + * @invalid_mod_bytes + * + * The following debugfs counters are available to inspect module loading + * failures: + * + * * total_mod_size: total bytes ever used by all modules we've dealt with on + * this system + * * total_text_size: total bytes of the .text and .init.text ELF section + * sizes we've dealt with on this system + * * invalid_kread_bytes: bytes allocated and then freed on failures which + * happen due to the initial kernel_read_file_from_fd(). kernel_read_file_from_fd() + * uses vmalloc(). These should typically not happen unless your system is + * under memory pressure. + * * invalid_decompress_bytes: number of bytes allocated and freed due to + * memory allocations in the module decompression path that use vmap(). + * These typically should not happen unless your system is under memory + * pressure. + * * invalid_becoming_bytes: total number of bytes allocated and freed used + * used to read the kernel module userspace wants us to read before we + * promote it to be processed to be added to our @modules linked list. These + * failures can happen if we had a check in between a successful kernel_read_file_from_fd() + * call and right before we allocate the our private memory for the module + * which would be kept if the module is successfully loaded. The most common + * reason for this failure is when userspace is racing to load a module + * which it does not yet see loaded. The first module to succeed in + * add_unformed_module() will add a module to our &modules list and + * subsequent loads of modules with the same name will error out at the + * end of early_mod_check(). The check for module_patient_check_exists() + * at the end of early_mod_check() prevents duplicate allocations + * on layout_and_allocate() for modules already being processed. These + * duplicate failed modules are non-fatal, however they typically are + * indicative of userspace not seeing a module in userspace loaded yet and + * unnecessarily trying to load a module before the kernel even has a chance + * to begin to process prior requests. Although duplicate failures can be + * non-fatal, we should try to reduce vmalloc() pressure proactively, so + * ideally after boot this will be close to as 0 as possible. If module + * decompression was used we also add to this counter the cost of the + * initial kernel_read_file_from_fd() of the compressed module. If module + * decompression was not used the value represents the total allocated and + * freed bytes in kernel_read_file_from_fd() calls for these type of + * failures. These failures can occur because: + * + * * module_sig_check() - module signature checks + * * elf_validity_cache_copy() - some ELF validation issue + * * early_mod_check(): + * + * * blacklisting + * * failed to rewrite section headers + * * version magic + * * live patch requirements didn't check out + * * the module was detected as being already present + * + * * invalid_mod_bytes: these are the total number of bytes allocated and + * freed due to failures after we did all the sanity checks of the module + * which userspace passed to us and after our first check that the module + * is unique. A module can still fail to load if we detect the module is + * loaded after we allocate space for it with layout_and_allocate(), we do + * this check right before processing the module as live and run its + * initialization routines. Note that you have a failure of this type it + * also means the respective kernel_read_file_from_fd() memory space was + * also freed and not used, and so we increment this counter with twice + * the size of the module. Additionally if you used module decompression + * the size of the compressed module is also added to this counter. + * + * * modcount: how many modules we've loaded in our kernel life time + * * failed_kreads: how many modules failed due to failed kernel_read_file_from_fd() + * * failed_decompress: how many failed module decompression attempts we've had. + * These really should not happen unless your compression / decompression + * might be broken. + * * failed_becoming: how many modules failed after we kernel_read_file_from_fd() + * it and before we allocate memory for it with layout_and_allocate(). This + * counter is never incremented if you manage to validate the module and + * call layout_and_allocate() for it. + * * failed_load_modules: how many modules failed once we've allocated our + * private space for our module using layout_and_allocate(). These failures + * should hopefully mostly be dealt with already. Races in theory could + * still exist here, but it would just mean the kernel had started processing + * two threads concurrently up to early_mod_check() and one thread won. + * These failures are good signs the kernel or userspace is doing something + * seriously stupid or that could be improved. We should strive to fix these, + * but it is perhaps not easy to fix them. A recent example are the modules + * requests incurred for frequency modules, a separate module request was + * being issued for each CPU on a system. + */ + +atomic_long_t total_mod_size; +atomic_long_t total_text_size; +atomic_long_t invalid_kread_bytes; +atomic_long_t invalid_decompress_bytes; +static atomic_long_t invalid_becoming_bytes; +static atomic_long_t invalid_mod_bytes; +atomic_t modcount; +atomic_t failed_kreads; +atomic_t failed_decompress; +static atomic_t failed_becoming; +static atomic_t failed_load_modules; + +static const char *mod_fail_to_str(struct mod_fail_load *mod_fail) +{ + if (test_bit(FAIL_DUP_MOD_BECOMING, &mod_fail->dup_fail_mask) && + test_bit(FAIL_DUP_MOD_LOAD, &mod_fail->dup_fail_mask)) + return "Becoming & Load"; + if (test_bit(FAIL_DUP_MOD_BECOMING, &mod_fail->dup_fail_mask)) + return "Becoming"; + if (test_bit(FAIL_DUP_MOD_LOAD, &mod_fail->dup_fail_mask)) + return "Load"; + return "Bug-on-stats"; +} + +void mod_stat_bump_invalid(struct load_info *info, int flags) +{ + atomic_long_add(info->len * 2, &invalid_mod_bytes); + atomic_inc(&failed_load_modules); +#if defined(CONFIG_MODULE_DECOMPRESS) + if (flags & MODULE_INIT_COMPRESSED_FILE) + atomic_long_add(info->compressed_len, &invalid_mod_bytes); +#endif +} + +void mod_stat_bump_becoming(struct load_info *info, int flags) +{ + atomic_inc(&failed_becoming); + atomic_long_add(info->len, &invalid_becoming_bytes); +#if defined(CONFIG_MODULE_DECOMPRESS) + if (flags & MODULE_INIT_COMPRESSED_FILE) + atomic_long_add(info->compressed_len, &invalid_becoming_bytes); +#endif +} + +int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason) +{ + struct mod_fail_load *mod_fail; + + list_for_each_entry_rcu(mod_fail, &dup_failed_modules, list, + lockdep_is_held(&module_mutex)) { + if (!strcmp(mod_fail->name, name)) { + atomic_long_inc(&mod_fail->count); + __set_bit(reason, &mod_fail->dup_fail_mask); + goto out; + } + } + + mod_fail = kzalloc(sizeof(*mod_fail), GFP_KERNEL); + if (!mod_fail) + return -ENOMEM; + memcpy(mod_fail->name, name, strlen(name)); + __set_bit(reason, &mod_fail->dup_fail_mask); + atomic_long_inc(&mod_fail->count); + list_add_rcu(&mod_fail->list, &dup_failed_modules); +out: + return 0; +} + +/* + * At 64 bytes per module and assuming a 1024 bytes preamble we can fit the + * 112 module prints within 8k. + * + * 1024 + (64*112) = 8k + */ +#define MAX_PREAMBLE 1024 +#define MAX_FAILED_MOD_PRINT 112 +#define MAX_BYTES_PER_MOD 64 +static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct mod_fail_load *mod_fail; + unsigned int len, size, count_failed = 0; + char *buf; + int ret; + u32 live_mod_count, fkreads, fdecompress, fbecoming, floads; + unsigned long total_size, text_size, ikread_bytes, ibecoming_bytes, + idecompress_bytes, imod_bytes, total_virtual_lost; + + live_mod_count = atomic_read(&modcount); + fkreads = atomic_read(&failed_kreads); + fdecompress = atomic_read(&failed_decompress); + fbecoming = atomic_read(&failed_becoming); + floads = atomic_read(&failed_load_modules); + + total_size = atomic_long_read(&total_mod_size); + text_size = atomic_long_read(&total_text_size); + ikread_bytes = atomic_long_read(&invalid_kread_bytes); + idecompress_bytes = atomic_long_read(&invalid_decompress_bytes); + ibecoming_bytes = atomic_long_read(&invalid_becoming_bytes); + imod_bytes = atomic_long_read(&invalid_mod_bytes); + + total_virtual_lost = ikread_bytes + idecompress_bytes + ibecoming_bytes + imod_bytes; + + size = MAX_PREAMBLE + min((unsigned int)(floads + fbecoming), + (unsigned int)MAX_FAILED_MOD_PRINT) * MAX_BYTES_PER_MOD; + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + /* The beginning of our debug preamble */ + len = scnprintf(buf, size, "%25s\t%u\n", "Mods ever loaded", live_mod_count); + + len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on kread", fkreads); + + len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on decompress", + fdecompress); + len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on becoming", fbecoming); + + len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on load", floads); + + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total module size", total_size); + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total mod text size", text_size); + + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed kread bytes", ikread_bytes); + + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed decompress bytes", + idecompress_bytes); + + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed becoming bytes", ibecoming_bytes); + + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed kmod bytes", imod_bytes); + + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Virtual mem wasted bytes", total_virtual_lost); + + if (live_mod_count && total_size) { + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod size", + DIV_ROUND_UP(total_size, live_mod_count)); + } + + if (live_mod_count && text_size) { + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod text size", + DIV_ROUND_UP(text_size, live_mod_count)); + } + + /* + * We use WARN_ON_ONCE() for the counters to ensure we always have parity + * for keeping tabs on a type of failure with one type of byte counter. + * The counters for imod_bytes does not increase for fkreads failures + * for example, and so on. + */ + + WARN_ON_ONCE(ikread_bytes && !fkreads); + if (fkreads && ikread_bytes) { + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail kread bytes", + DIV_ROUND_UP(ikread_bytes, fkreads)); + } + + WARN_ON_ONCE(ibecoming_bytes && !fbecoming); + if (fbecoming && ibecoming_bytes) { + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail becoming bytes", + DIV_ROUND_UP(ibecoming_bytes, fbecoming)); + } + + WARN_ON_ONCE(idecompress_bytes && !fdecompress); + if (fdecompress && idecompress_bytes) { + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail decomp bytes", + DIV_ROUND_UP(idecompress_bytes, fdecompress)); + } + + WARN_ON_ONCE(imod_bytes && !floads); + if (floads && imod_bytes) { + len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average fail load bytes", + DIV_ROUND_UP(imod_bytes, floads)); + } + + /* End of our debug preamble header. */ + + /* Catch when we've gone beyond our expected preamble */ + WARN_ON_ONCE(len >= MAX_PREAMBLE); + + if (list_empty(&dup_failed_modules)) + goto out; + + len += scnprintf(buf + len, size - len, "Duplicate failed modules:\n"); + len += scnprintf(buf + len, size - len, "%25s\t%15s\t%25s\n", + "Module-name", "How-many-times", "Reason"); + mutex_lock(&module_mutex); + + + list_for_each_entry_rcu(mod_fail, &dup_failed_modules, list) { + if (WARN_ON_ONCE(++count_failed >= MAX_FAILED_MOD_PRINT)) + goto out_unlock; + len += scnprintf(buf + len, size - len, "%25s\t%15lu\t%25s\n", mod_fail->name, + atomic_long_read(&mod_fail->count), mod_fail_to_str(mod_fail)); + } +out_unlock: + mutex_unlock(&module_mutex); +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret; +} +#undef MAX_PREAMBLE +#undef MAX_FAILED_MOD_PRINT +#undef MAX_BYTES_PER_MOD + +static const struct file_operations fops_mod_stats = { + .read = read_file_mod_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#define mod_debug_add_ulong(name) debugfs_create_ulong(#name, 0400, mod_debugfs_root, (unsigned long *) &name.counter) +#define mod_debug_add_atomic(name) debugfs_create_atomic_t(#name, 0400, mod_debugfs_root, &name) +static int __init module_stats_init(void) +{ + mod_debug_add_ulong(total_mod_size); + mod_debug_add_ulong(total_text_size); + mod_debug_add_ulong(invalid_kread_bytes); + mod_debug_add_ulong(invalid_decompress_bytes); + mod_debug_add_ulong(invalid_becoming_bytes); + mod_debug_add_ulong(invalid_mod_bytes); + + mod_debug_add_atomic(modcount); + mod_debug_add_atomic(failed_kreads); + mod_debug_add_atomic(failed_decompress); + mod_debug_add_atomic(failed_becoming); + mod_debug_add_atomic(failed_load_modules); + + debugfs_create_file("stats", 0400, mod_debugfs_root, mod_debugfs_root, &fops_mod_stats); + + return 0; +} +#undef mod_debug_add_ulong +#undef mod_debug_add_atomic +module_init(module_stats_init); diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c new file mode 100644 index 0000000000..a2b656b4e3 --- /dev/null +++ b/kernel/module/strict_rwx.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module strict rwx + * + * Copyright (C) 2015 Rusty Russell + */ + +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/set_memory.h> +#include "internal.h" + +static void module_set_memory(const struct module *mod, enum mod_mem_type type, + int (*set_memory)(unsigned long start, int num_pages)) +{ + const struct module_memory *mod_mem = &mod->mem[type]; + + set_vm_flush_reset_perms(mod_mem->base); + set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT); +} + +/* + * Since some arches are moving towards PAGE_KERNEL module allocations instead + * of PAGE_KERNEL_EXEC, keep module_enable_x() independent of + * CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we + * are strict. + */ +void module_enable_x(const struct module *mod) +{ + for_class_mod_mem_type(type, text) + module_set_memory(mod, type, set_memory_x); +} + +void module_enable_ro(const struct module *mod, bool after_init) +{ + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return; +#ifdef CONFIG_STRICT_MODULE_RWX + if (!rodata_enabled) + return; +#endif + + module_set_memory(mod, MOD_TEXT, set_memory_ro); + module_set_memory(mod, MOD_INIT_TEXT, set_memory_ro); + module_set_memory(mod, MOD_RODATA, set_memory_ro); + module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro); + + if (after_init) + module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro); +} + +void module_enable_nx(const struct module *mod) +{ + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return; + + for_class_mod_mem_type(type, data) + module_set_memory(mod, type, set_memory_nx); +} + +int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + const unsigned long shf_wx = SHF_WRITE | SHF_EXECINSTR; + int i; + + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return 0; + + for (i = 0; i < hdr->e_shnum; i++) { + if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { + pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", + mod->name, secstrings + sechdrs[i].sh_name, i); + return -ENOEXEC; + } + } + + return 0; +} diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c new file mode 100644 index 0000000000..c921bf0440 --- /dev/null +++ b/kernel/module/sysfs.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module sysfs support + * + * Copyright (C) 2008 Rusty Russell + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/sysfs.h> +#include <linux/slab.h> +#include <linux/kallsyms.h> +#include <linux/mutex.h> +#include "internal.h" + +/* + * /sys/module/foo/sections stuff + * J. Corbet <corbet@lwn.net> + */ +#ifdef CONFIG_KALLSYMS +struct module_sect_attr { + struct bin_attribute battr; + unsigned long address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[]; +}; + +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) +static ssize_t module_sect_read(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, + char *buf, loff_t pos, size_t count) +{ + struct module_sect_attr *sattr = + container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; + + if (pos != 0) + return -EINVAL; + + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; +} + +static void free_sect_attrs(struct module_sect_attrs *sect_attrs) +{ + unsigned int section; + + for (section = 0; section < sect_attrs->nsections; section++) + kfree(sect_attrs->attrs[section].battr.attr.name); + kfree(sect_attrs); +} + +static void add_sect_attrs(struct module *mod, const struct load_info *info) +{ + unsigned int nloaded = 0, i, size[2]; + struct module_sect_attrs *sect_attrs; + struct module_sect_attr *sattr; + struct bin_attribute **gattr; + + /* Count loaded sections and allocate structures */ + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i])) + nloaded++; + size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), + sizeof(sect_attrs->grp.bin_attrs[0])); + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); + sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); + if (!sect_attrs) + return; + + /* Setup section attributes. */ + sect_attrs->grp.name = "sections"; + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; + + sect_attrs->nsections = 0; + sattr = §_attrs->attrs[0]; + gattr = §_attrs->grp.bin_attrs[0]; + for (i = 0; i < info->hdr->e_shnum; i++) { + Elf_Shdr *sec = &info->sechdrs[i]; + + if (sect_empty(sec)) + continue; + sysfs_bin_attr_init(&sattr->battr); + sattr->address = sec->sh_addr; + sattr->battr.attr.name = + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); + if (!sattr->battr.attr.name) + goto out; + sect_attrs->nsections++; + sattr->battr.read = module_sect_read; + sattr->battr.size = MODULE_SECT_READ_SIZE; + sattr->battr.attr.mode = 0400; + *(gattr++) = &(sattr++)->battr; + } + *gattr = NULL; + + if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) + goto out; + + mod->sect_attrs = sect_attrs; + return; +out: + free_sect_attrs(sect_attrs); +} + +static void remove_sect_attrs(struct module *mod) +{ + if (mod->sect_attrs) { + sysfs_remove_group(&mod->mkobj.kobj, + &mod->sect_attrs->grp); + /* + * We are positive that no one is using any sect attrs + * at this point. Deallocate immediately. + */ + free_sect_attrs(mod->sect_attrs); + mod->sect_attrs = NULL; + } +} + +/* + * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. + */ + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[]; +}; + +static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) +{ + /* + * The caller checked the pos and count against our size. + */ + memcpy(buf, bin_attr->private + pos, count); + return count; +} + +static void free_notes_attrs(struct module_notes_attrs *notes_attrs, + unsigned int i) +{ + if (notes_attrs->dir) { + while (i-- > 0) + sysfs_remove_bin_file(notes_attrs->dir, + ¬es_attrs->attrs[i]); + kobject_put(notes_attrs->dir); + } + kfree(notes_attrs); +} + +static void add_notes_attrs(struct module *mod, const struct load_info *info) +{ + unsigned int notes, loaded, i; + struct module_notes_attrs *notes_attrs; + struct bin_attribute *nattr; + + /* failed to create section attributes, so can't create notes */ + if (!mod->sect_attrs) + return; + + /* Count notes sections and allocate structures. */ + notes = 0; + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i]) && + info->sechdrs[i].sh_type == SHT_NOTE) + ++notes; + + if (notes == 0) + return; + + notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), + GFP_KERNEL); + if (!notes_attrs) + return; + + notes_attrs->notes = notes; + nattr = ¬es_attrs->attrs[0]; + for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { + if (sect_empty(&info->sechdrs[i])) + continue; + if (info->sechdrs[i].sh_type == SHT_NOTE) { + sysfs_bin_attr_init(nattr); + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; + nattr->attr.mode = 0444; + nattr->size = info->sechdrs[i].sh_size; + nattr->private = (void *)info->sechdrs[i].sh_addr; + nattr->read = module_notes_read; + ++nattr; + } + ++loaded; + } + + notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); + if (!notes_attrs->dir) + goto out; + + for (i = 0; i < notes; ++i) + if (sysfs_create_bin_file(notes_attrs->dir, + ¬es_attrs->attrs[i])) + goto out; + + mod->notes_attrs = notes_attrs; + return; + +out: + free_notes_attrs(notes_attrs, i); +} + +static void remove_notes_attrs(struct module *mod) +{ + if (mod->notes_attrs) + free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); +} + +#else /* !CONFIG_KALLSYMS */ +static inline void add_sect_attrs(struct module *mod, const struct load_info *info) { } +static inline void remove_sect_attrs(struct module *mod) { } +static inline void add_notes_attrs(struct module *mod, const struct load_info *info) { } +static inline void remove_notes_attrs(struct module *mod) { } +#endif /* CONFIG_KALLSYMS */ + +static void del_usage_links(struct module *mod) +{ +#ifdef CONFIG_MODULE_UNLOAD + struct module_use *use; + + mutex_lock(&module_mutex); + list_for_each_entry(use, &mod->target_list, target_list) + sysfs_remove_link(use->target->holders_dir, mod->name); + mutex_unlock(&module_mutex); +#endif +} + +static int add_usage_links(struct module *mod) +{ + int ret = 0; +#ifdef CONFIG_MODULE_UNLOAD + struct module_use *use; + + mutex_lock(&module_mutex); + list_for_each_entry(use, &mod->target_list, target_list) { + ret = sysfs_create_link(use->target->holders_dir, + &mod->mkobj.kobj, mod->name); + if (ret) + break; + } + mutex_unlock(&module_mutex); + if (ret) + del_usage_links(mod); +#endif + return ret; +} + +static void module_remove_modinfo_attrs(struct module *mod, int end) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { + if (end >= 0 && i > end) + break; + /* pick a field to test for end of list */ + if (!attr->attr.name) + break; + sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); + if (attr->free) + attr->free(mod); + } + kfree(mod->modinfo_attrs); +} + +static int module_add_modinfo_attrs(struct module *mod) +{ + struct module_attribute *attr; + struct module_attribute *temp_attr; + int error = 0; + int i; + + mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * + (modinfo_attrs_count + 1)), + GFP_KERNEL); + if (!mod->modinfo_attrs) + return -ENOMEM; + + temp_attr = mod->modinfo_attrs; + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (!attr->test || attr->test(mod)) { + memcpy(temp_attr, attr, sizeof(*temp_attr)); + sysfs_attr_init(&temp_attr->attr); + error = sysfs_create_file(&mod->mkobj.kobj, + &temp_attr->attr); + if (error) + goto error_out; + ++temp_attr; + } + } + + return 0; + +error_out: + if (i > 0) + module_remove_modinfo_attrs(mod, --i); + else + kfree(mod->modinfo_attrs); + return error; +} + +static void mod_kobject_put(struct module *mod) +{ + DECLARE_COMPLETION_ONSTACK(c); + + mod->mkobj.kobj_completion = &c; + kobject_put(&mod->mkobj.kobj); + wait_for_completion(&c); +} + +static int mod_sysfs_init(struct module *mod) +{ + int err; + struct kobject *kobj; + + if (!module_kset) { + pr_err("%s: module sysfs not initialized\n", mod->name); + err = -EINVAL; + goto out; + } + + kobj = kset_find_obj(module_kset, mod->name); + if (kobj) { + pr_err("%s: module is already loaded\n", mod->name); + kobject_put(kobj); + err = -EINVAL; + goto out; + } + + mod->mkobj.mod = mod; + + memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); + mod->mkobj.kobj.kset = module_kset; + err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, + "%s", mod->name); + if (err) + mod_kobject_put(mod); + +out: + return err; +} + +int mod_sysfs_setup(struct module *mod, + const struct load_info *info, + struct kernel_param *kparam, + unsigned int num_params) +{ + int err; + + err = mod_sysfs_init(mod); + if (err) + goto out; + + mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); + if (!mod->holders_dir) { + err = -ENOMEM; + goto out_unreg; + } + + err = module_param_sysfs_setup(mod, kparam, num_params); + if (err) + goto out_unreg_holders; + + err = module_add_modinfo_attrs(mod); + if (err) + goto out_unreg_param; + + err = add_usage_links(mod); + if (err) + goto out_unreg_modinfo_attrs; + + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); + + return 0; + +out_unreg_modinfo_attrs: + module_remove_modinfo_attrs(mod, -1); +out_unreg_param: + module_param_sysfs_remove(mod); +out_unreg_holders: + kobject_put(mod->holders_dir); +out_unreg: + mod_kobject_put(mod); +out: + return err; +} + +static void mod_sysfs_fini(struct module *mod) +{ + remove_notes_attrs(mod); + remove_sect_attrs(mod); + mod_kobject_put(mod); +} + +void mod_sysfs_teardown(struct module *mod) +{ + del_usage_links(mod); + module_remove_modinfo_attrs(mod, -1); + module_param_sysfs_remove(mod); + kobject_put(mod->mkobj.drivers_dir); + kobject_put(mod->holders_dir); + mod_sysfs_fini(mod); +} + +void init_param_lock(struct module *mod) +{ + mutex_init(&mod->param_lock); +} diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c new file mode 100644 index 0000000000..16742d1c63 --- /dev/null +++ b/kernel/module/tracking.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module taint unload tracking support + * + * Copyright (C) 2022 Aaron Tomlin + */ + +#include <linux/module.h> +#include <linux/string.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/debugfs.h> +#include <linux/rculist.h> +#include "internal.h" + +static LIST_HEAD(unloaded_tainted_modules); +extern struct dentry *mod_debugfs_root; + +int try_add_tainted_module(struct module *mod) +{ + struct mod_unload_taint *mod_taint; + + module_assert_mutex_or_preempt(); + + if (!mod->taints) + goto out; + + list_for_each_entry_rcu(mod_taint, &unloaded_tainted_modules, list, + lockdep_is_held(&module_mutex)) { + if (!strcmp(mod_taint->name, mod->name) && + mod_taint->taints & mod->taints) { + mod_taint->count++; + goto out; + } + } + + mod_taint = kmalloc(sizeof(*mod_taint), GFP_KERNEL); + if (unlikely(!mod_taint)) + return -ENOMEM; + strscpy(mod_taint->name, mod->name, MODULE_NAME_LEN); + mod_taint->taints = mod->taints; + list_add_rcu(&mod_taint->list, &unloaded_tainted_modules); + mod_taint->count = 1; +out: + return 0; +} + +void print_unloaded_tainted_modules(void) +{ + struct mod_unload_taint *mod_taint; + char buf[MODULE_FLAGS_BUF_SIZE]; + + if (!list_empty(&unloaded_tainted_modules)) { + printk(KERN_DEFAULT "Unloaded tainted modules:"); + list_for_each_entry_rcu(mod_taint, &unloaded_tainted_modules, + list) { + size_t l; + + l = module_flags_taint(mod_taint->taints, buf); + buf[l++] = '\0'; + pr_cont(" %s(%s):%llu", mod_taint->name, buf, + mod_taint->count); + } + } +} + +#ifdef CONFIG_DEBUG_FS +static void *unloaded_tainted_modules_seq_start(struct seq_file *m, loff_t *pos) + __acquires(rcu) +{ + rcu_read_lock(); + return seq_list_start_rcu(&unloaded_tainted_modules, *pos); +} + +static void *unloaded_tainted_modules_seq_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next_rcu(p, &unloaded_tainted_modules, pos); +} + +static void unloaded_tainted_modules_seq_stop(struct seq_file *m, void *p) + __releases(rcu) +{ + rcu_read_unlock(); +} + +static int unloaded_tainted_modules_seq_show(struct seq_file *m, void *p) +{ + struct mod_unload_taint *mod_taint; + char buf[MODULE_FLAGS_BUF_SIZE]; + size_t l; + + mod_taint = list_entry(p, struct mod_unload_taint, list); + l = module_flags_taint(mod_taint->taints, buf); + buf[l++] = '\0'; + + seq_printf(m, "%s (%s) %llu", mod_taint->name, buf, mod_taint->count); + seq_puts(m, "\n"); + + return 0; +} + +static const struct seq_operations unloaded_tainted_modules_seq_ops = { + .start = unloaded_tainted_modules_seq_start, + .next = unloaded_tainted_modules_seq_next, + .stop = unloaded_tainted_modules_seq_stop, + .show = unloaded_tainted_modules_seq_show, +}; + +static int unloaded_tainted_modules_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &unloaded_tainted_modules_seq_ops); +} + +static const struct file_operations unloaded_tainted_modules_fops = { + .open = unloaded_tainted_modules_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init unloaded_tainted_modules_init(void) +{ + debugfs_create_file("unloaded_tainted", 0444, mod_debugfs_root, NULL, + &unloaded_tainted_modules_fops); + return 0; +} +module_init(unloaded_tainted_modules_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c new file mode 100644 index 0000000000..277197977d --- /dev/null +++ b/kernel/module/tree_lookup.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Modules tree lookup + * + * Copyright (C) 2015 Peter Zijlstra + * Copyright (C) 2015 Rusty Russell + */ + +#include <linux/module.h> +#include <linux/rbtree_latch.h> +#include "internal.h" + +/* + * Use a latched RB-tree for __module_address(); this allows us to use + * RCU-sched lookups of the address from any context. + * + * This is conditional on PERF_EVENTS || TRACING because those can really hit + * __module_address() hard by doing a lot of stack unwinding; potentially from + * NMI context. + */ + +static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) +{ + struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node); + + return (unsigned long)mod_mem->base; +} + +static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) +{ + struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node); + + return (unsigned long)mod_mem->size; +} + +static __always_inline bool +mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) +{ + return __mod_tree_val(a) < __mod_tree_val(b); +} + +static __always_inline int +mod_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long start, end; + + start = __mod_tree_val(n); + if (val < start) + return -1; + + end = start + __mod_tree_size(n); + if (val >= end) + return 1; + + return 0; +} + +static const struct latch_tree_ops mod_tree_ops = { + .less = mod_tree_less, + .comp = mod_tree_comp, +}; + +static noinline void __mod_tree_insert(struct mod_tree_node *node, struct mod_tree_root *tree) +{ + latch_tree_insert(&node->node, &tree->root, &mod_tree_ops); +} + +static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *tree) +{ + latch_tree_erase(&node->node, &tree->root, &mod_tree_ops); +} + +/* + * These modifications: insert, remove_init and remove; are serialized by the + * module_mutex. + */ +void mod_tree_insert(struct module *mod) +{ + for_each_mod_mem_type(type) { + mod->mem[type].mtn.mod = mod; + if (mod->mem[type].size) + __mod_tree_insert(&mod->mem[type].mtn, &mod_tree); + } +} + +void mod_tree_remove_init(struct module *mod) +{ + for_class_mod_mem_type(type, init) { + if (mod->mem[type].size) + __mod_tree_remove(&mod->mem[type].mtn, &mod_tree); + } +} + +void mod_tree_remove(struct module *mod) +{ + for_each_mod_mem_type(type) { + if (mod->mem[type].size) + __mod_tree_remove(&mod->mem[type].mtn, &mod_tree); + } +} + +struct module *mod_find(unsigned long addr, struct mod_tree_root *tree) +{ + struct latch_tree_node *ltn; + + ltn = latch_tree_find((void *)addr, &tree->root, &mod_tree_ops); + if (!ltn) + return NULL; + + return container_of(ltn, struct mod_tree_node, node)->mod; +} diff --git a/kernel/module/version.c b/kernel/module/version.c new file mode 100644 index 0000000000..53f43ac5a7 --- /dev/null +++ b/kernel/module/version.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module version support + * + * Copyright (C) 2008 Rusty Russell + */ + +#include <linux/module.h> +#include <linux/string.h> +#include <linux/printk.h> +#include "internal.h" + +int check_version(const struct load_info *info, + const char *symname, + struct module *mod, + const s32 *crc) +{ + Elf_Shdr *sechdrs = info->sechdrs; + unsigned int versindex = info->index.vers; + unsigned int i, num_versions; + struct modversion_info *versions; + + /* Exporting module didn't supply crcs? OK, we're already tainted. */ + if (!crc) + return 1; + + /* No versions at all? modprobe --force does this. */ + if (versindex == 0) + return try_to_force_load(mod, symname) == 0; + + versions = (void *)sechdrs[versindex].sh_addr; + num_versions = sechdrs[versindex].sh_size + / sizeof(struct modversion_info); + + for (i = 0; i < num_versions; i++) { + u32 crcval; + + if (strcmp(versions[i].name, symname) != 0) + continue; + + crcval = *crc; + if (versions[i].crc == crcval) + return 1; + pr_debug("Found checksum %X vs module %lX\n", + crcval, versions[i].crc); + goto bad_version; + } + + /* Broken toolchain. Warn once, then let it go.. */ + pr_warn_once("%s: no symbol version for %s\n", info->name, symname); + return 1; + +bad_version: + pr_warn("%s: disagrees about version of symbol %s\n", info->name, symname); + return 0; +} + +int check_modstruct_version(const struct load_info *info, + struct module *mod) +{ + struct find_symbol_arg fsa = { + .name = "module_layout", + .gplok = true, + }; + + /* + * Since this should be found in kernel (which can't be removed), no + * locking is necessary -- use preempt_disable() to placate lockdep. + */ + preempt_disable(); + if (!find_symbol(&fsa)) { + preempt_enable(); + BUG(); + } + preempt_enable(); + return check_version(info, "module_layout", mod, fsa.crc); +} + +/* First part is kernel version, which we ignore if module has crcs. */ +int same_magic(const char *amagic, const char *bmagic, + bool has_crcs) +{ + if (has_crcs) { + amagic += strcspn(amagic, " "); + bmagic += strcspn(bmagic, " "); + } + return strcmp(amagic, bmagic) == 0; +} + +/* + * Generate the signature for all relevant module structures here. + * If these change, we don't want to try to parse the module. + */ +void module_layout(struct module *mod, + struct modversion_info *ver, + struct kernel_param *kp, + struct kernel_symbol *ks, + struct tracepoint * const *tp) +{ +} +EXPORT_SYMBOL(module_layout); diff --git a/kernel/module_signature.c b/kernel/module_signature.c new file mode 100644 index 0000000000..00132d1248 --- /dev/null +++ b/kernel/module_signature.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Module signature checker + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/errno.h> +#include <linux/printk.h> +#include <linux/module_signature.h> +#include <asm/byteorder.h> + +/** + * mod_check_sig - check that the given signature is sane + * + * @ms: Signature to check. + * @file_len: Size of the file to which @ms is appended. + * @name: What is being checked. Used for error messages. + */ +int mod_check_sig(const struct module_signature *ms, size_t file_len, + const char *name) +{ + if (be32_to_cpu(ms->sig_len) >= file_len - sizeof(*ms)) + return -EBADMSG; + + if (ms->id_type != PKEY_ID_PKCS7) { + pr_err("%s: not signed with expected PKCS#7 message\n", + name); + return -ENOPKG; + } + + if (ms->algo != 0 || + ms->hash != 0 || + ms->signer_len != 0 || + ms->key_id_len != 0 || + ms->__pad[0] != 0 || + ms->__pad[1] != 0 || + ms->__pad[2] != 0) { + pr_err("%s: PKCS#7 signature info has unexpected non-zero params\n", + name); + return -EBADMSG; + } + + return 0; +} |