From cfc1d277891eb499b3b5354df33b30f598683e90 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:31 +0000 Subject: module: Move all into module/ No functional changes. This patch moves all module related code into a separate directory, modifies each file name and creates a new Makefile. Note: this effort is in preparation to refactor core module code. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 12 + kernel/module/decompress.c | 273 +++ kernel/module/internal.h | 50 + kernel/module/main.c | 4810 ++++++++++++++++++++++++++++++++++++++++++++ kernel/module/signing.c | 45 + 5 files changed, 5190 insertions(+) create mode 100644 kernel/module/Makefile create mode 100644 kernel/module/decompress.c create mode 100644 kernel/module/internal.h create mode 100644 kernel/module/main.c create mode 100644 kernel/module/signing.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile new file mode 100644 index 000000000000..cdd5c61b8c7f --- /dev/null +++ b/kernel/module/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for linux kernel module support +# + +# These are called from save_stack_trace() on slub debug path, +# and produce insane amounts of uninteresting coverage. +KCOV_INSTRUMENT_module.o := n + +obj-y += main.o +obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o +obj-$(CONFIG_MODULE_SIG) += signing.o diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c new file mode 100644 index 000000000000..d14d6443225a --- /dev/null +++ b/kernel/module/decompress.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2021 Google LLC. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +static int module_extend_max_pages(struct load_info *info, unsigned int extent) +{ + struct page **new_pages; + + new_pages = kvmalloc_array(info->max_pages + extent, + sizeof(info->pages), GFP_KERNEL); + if (!new_pages) + return -ENOMEM; + + memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); + kvfree(info->pages); + info->pages = new_pages; + info->max_pages += extent; + + return 0; +} + +static struct page *module_get_next_page(struct load_info *info) +{ + struct page *page; + int error; + + if (info->max_pages == info->used_pages) { + error = module_extend_max_pages(info, info->used_pages); + if (error) + return ERR_PTR(error); + } + + page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + if (!page) + return ERR_PTR(-ENOMEM); + + info->pages[info->used_pages++] = page; + return page; +} + +#ifdef CONFIG_MODULE_COMPRESS_GZIP +#include +#define MODULE_COMPRESSION gzip +#define MODULE_DECOMPRESS_FN module_gzip_decompress + +/* + * Calculate length of the header which consists of signature, header + * flags, time stamp and operating system ID (10 bytes total), plus + * an optional filename. + */ +static size_t module_gzip_header_len(const u8 *buf, size_t size) +{ + const u8 signature[] = { 0x1f, 0x8b, 0x08 }; + size_t len = 10; + + if (size < len || memcmp(buf, signature, sizeof(signature))) + return 0; + + if (buf[3] & 0x08) { + do { + /* + * If we can't find the end of the file name we must + * be dealing with a corrupted file. + */ + if (len == size) + return 0; + } while (buf[len++] != '\0'); + } + + return len; +} + +static ssize_t module_gzip_decompress(struct load_info *info, + const void *buf, size_t size) +{ + struct z_stream_s s = { 0 }; + size_t new_size = 0; + size_t gzip_hdr_len; + ssize_t retval; + int rc; + + gzip_hdr_len = module_gzip_header_len(buf, size); + if (!gzip_hdr_len) { + pr_err("not a gzip compressed module\n"); + return -EINVAL; + } + + s.next_in = buf + gzip_hdr_len; + s.avail_in = size - gzip_hdr_len; + + s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); + if (!s.workspace) + return -ENOMEM; + + rc = zlib_inflateInit2(&s, -MAX_WBITS); + if (rc != Z_OK) { + pr_err("failed to initialize decompressor: %d\n", rc); + retval = -EINVAL; + goto out; + } + + do { + struct page *page = module_get_next_page(info); + if (!page) { + retval = -ENOMEM; + goto out_inflate_end; + } + + s.next_out = kmap(page); + s.avail_out = PAGE_SIZE; + rc = zlib_inflate(&s, 0); + kunmap(page); + + new_size += PAGE_SIZE - s.avail_out; + } while (rc == Z_OK); + + if (rc != Z_STREAM_END) { + pr_err("decompression failed with status %d\n", rc); + retval = -EINVAL; + goto out_inflate_end; + } + + retval = new_size; + +out_inflate_end: + zlib_inflateEnd(&s); +out: + kfree(s.workspace); + return retval; +} +#elif CONFIG_MODULE_COMPRESS_XZ +#include +#define MODULE_COMPRESSION xz +#define MODULE_DECOMPRESS_FN module_xz_decompress + +static ssize_t module_xz_decompress(struct load_info *info, + const void *buf, size_t size) +{ + static const u8 signature[] = { 0xfd, '7', 'z', 'X', 'Z', 0 }; + struct xz_dec *xz_dec; + struct xz_buf xz_buf; + enum xz_ret xz_ret; + size_t new_size = 0; + ssize_t retval; + + if (size < sizeof(signature) || + memcmp(buf, signature, sizeof(signature))) { + pr_err("not an xz compressed module\n"); + return -EINVAL; + } + + xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1); + if (!xz_dec) + return -ENOMEM; + + xz_buf.in_size = size; + xz_buf.in = buf; + xz_buf.in_pos = 0; + + do { + struct page *page = module_get_next_page(info); + if (!page) { + retval = -ENOMEM; + goto out; + } + + xz_buf.out = kmap(page); + xz_buf.out_pos = 0; + xz_buf.out_size = PAGE_SIZE; + xz_ret = xz_dec_run(xz_dec, &xz_buf); + kunmap(page); + + new_size += xz_buf.out_pos; + } while (xz_buf.out_pos == PAGE_SIZE && xz_ret == XZ_OK); + + if (xz_ret != XZ_STREAM_END) { + pr_err("decompression failed with status %d\n", xz_ret); + retval = -EINVAL; + goto out; + } + + retval = new_size; + + out: + xz_dec_end(xz_dec); + return retval; +} +#else +#error "Unexpected configuration for CONFIG_MODULE_DECOMPRESS" +#endif + +int module_decompress(struct load_info *info, const void *buf, size_t size) +{ + unsigned int n_pages; + ssize_t data_size; + int error; + + /* + * Start with number of pages twice as big as needed for + * compressed data. + */ + n_pages = DIV_ROUND_UP(size, PAGE_SIZE) * 2; + error = module_extend_max_pages(info, n_pages); + + data_size = MODULE_DECOMPRESS_FN(info, buf, size); + if (data_size < 0) { + error = data_size; + goto err; + } + + info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL); + if (!info->hdr) { + error = -ENOMEM; + goto err; + } + + info->len = data_size; + return 0; + +err: + module_decompress_cleanup(info); + return error; +} + +void module_decompress_cleanup(struct load_info *info) +{ + int i; + + if (info->hdr) + vunmap(info->hdr); + + for (i = 0; i < info->used_pages; i++) + __free_page(info->pages[i]); + + kvfree(info->pages); + + info->pages = NULL; + info->max_pages = info->used_pages = 0; +} + +#ifdef CONFIG_SYSFS +static ssize_t compression_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%s\n", __stringify(MODULE_COMPRESSION)); +} +static struct kobj_attribute module_compression_attr = __ATTR_RO(compression); + +static int __init module_decompress_sysfs_init(void) +{ + int error; + + error = sysfs_create_file(&module_kset->kobj, + &module_compression_attr.attr); + if (error) + pr_warn("Failed to create 'compression' attribute"); + + return 0; +} +late_initcall(module_decompress_sysfs_init); +#endif diff --git a/kernel/module/internal.h b/kernel/module/internal.h new file mode 100644 index 000000000000..8c381c99062f --- /dev/null +++ b/kernel/module/internal.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Module internals + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include +#include + +struct load_info { + const char *name; + /* pointer to module in temporary copy, freed at end of load_module() */ + struct module *mod; + Elf_Ehdr *hdr; + unsigned long len; + Elf_Shdr *sechdrs; + char *secstrings, *strtab; + unsigned long symoffs, stroffs, init_typeoffs, core_typeoffs; + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; +#ifdef CONFIG_KALLSYMS + unsigned long mod_kallsyms_init_off; +#endif +#ifdef CONFIG_MODULE_DECOMPRESS + struct page **pages; + unsigned int max_pages; + unsigned int used_pages; +#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +}; + +extern int mod_verify_sig(const void *mod, struct load_info *info); + +#ifdef CONFIG_MODULE_DECOMPRESS +int module_decompress(struct load_info *info, const void *buf, size_t size); +void module_decompress_cleanup(struct load_info *info); +#else +static inline int module_decompress(struct load_info *info, + const void *buf, size_t size) +{ + return -EOPNOTSUPP; +} +static inline void module_decompress_cleanup(struct load_info *info) +{ +} +#endif diff --git a/kernel/module/main.c b/kernel/module/main.c new file mode 100644 index 000000000000..1a17f02f69a0 --- /dev/null +++ b/kernel/module/main.c @@ -0,0 +1,4810 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2002 Richard Henderson + * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. + */ + +#define INCLUDE_VERMAGIC + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +#define CREATE_TRACE_POINTS +#include + +#ifndef ARCH_SHF_SMALL +#define ARCH_SHF_SMALL 0 +#endif + +/* + * Modules' sections will be aligned on page boundaries + * to ensure complete separation of code and data, but + * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y + */ +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX +# define debug_align(X) ALIGN(X, PAGE_SIZE) +#else +# define debug_align(X) (X) +#endif + +/* If this is set, the section belongs in the init part of the module */ +#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) + +/* + * Mutex protects: + * 1) List of modules (also safely readable with preempt_disable), + * 2) module_use links, + * 3) module_addr_min/module_addr_max. + * (delete and add uses RCU list operations). + */ +static DEFINE_MUTEX(module_mutex); +static LIST_HEAD(modules); + +/* Work queue for freeing init sections in success case */ +static void do_free_init(struct work_struct *w); +static DECLARE_WORK(init_free_wq, do_free_init); +static LLIST_HEAD(init_free_list); + +#ifdef CONFIG_MODULES_TREE_LOOKUP + +/* + * Use a latched RB-tree for __module_address(); this allows us to use + * RCU-sched lookups of the address from any context. + * + * This is conditional on PERF_EVENTS || TRACING because those can really hit + * __module_address() hard by doing a lot of stack unwinding; potentially from + * NMI context. + */ + +static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) +{ + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); + + return (unsigned long)layout->base; +} + +static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) +{ + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); + + return (unsigned long)layout->size; +} + +static __always_inline bool +mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) +{ + return __mod_tree_val(a) < __mod_tree_val(b); +} + +static __always_inline int +mod_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long start, end; + + start = __mod_tree_val(n); + if (val < start) + return -1; + + end = start + __mod_tree_size(n); + if (val >= end) + return 1; + + return 0; +} + +static const struct latch_tree_ops mod_tree_ops = { + .less = mod_tree_less, + .comp = mod_tree_comp, +}; + +static struct mod_tree_root { + struct latch_tree_root root; + unsigned long addr_min; + unsigned long addr_max; +} mod_tree __cacheline_aligned = { + .addr_min = -1UL, +}; + +#define module_addr_min mod_tree.addr_min +#define module_addr_max mod_tree.addr_max + +static noinline void __mod_tree_insert(struct mod_tree_node *node) +{ + latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); +} + +static void __mod_tree_remove(struct mod_tree_node *node) +{ + latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); +} + +/* + * These modifications: insert, remove_init and remove; are serialized by the + * module_mutex. + */ +static void mod_tree_insert(struct module *mod) +{ + mod->core_layout.mtn.mod = mod; + mod->init_layout.mtn.mod = mod; + + __mod_tree_insert(&mod->core_layout.mtn); + if (mod->init_layout.size) + __mod_tree_insert(&mod->init_layout.mtn); +} + +static void mod_tree_remove_init(struct module *mod) +{ + if (mod->init_layout.size) + __mod_tree_remove(&mod->init_layout.mtn); +} + +static void mod_tree_remove(struct module *mod) +{ + __mod_tree_remove(&mod->core_layout.mtn); + mod_tree_remove_init(mod); +} + +static struct module *mod_find(unsigned long addr) +{ + struct latch_tree_node *ltn; + + ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); + if (!ltn) + return NULL; + + return container_of(ltn, struct mod_tree_node, node)->mod; +} + +#else /* MODULES_TREE_LOOKUP */ + +static unsigned long module_addr_min = -1UL, module_addr_max = 0; + +static void mod_tree_insert(struct module *mod) { } +static void mod_tree_remove_init(struct module *mod) { } +static void mod_tree_remove(struct module *mod) { } + +static struct module *mod_find(unsigned long addr) +{ + struct module *mod; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + if (within_module(addr, mod)) + return mod; + } + + return NULL; +} + +#endif /* MODULES_TREE_LOOKUP */ + +/* + * Bounds of module text, for speeding up __module_address. + * Protected by module_mutex. + */ +static void __mod_update_bounds(void *base, unsigned int size) +{ + unsigned long min = (unsigned long)base; + unsigned long max = min + size; + + if (min < module_addr_min) + module_addr_min = min; + if (max > module_addr_max) + module_addr_max = max; +} + +static void mod_update_bounds(struct module *mod) +{ + __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); + if (mod->init_layout.size) + __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); +} + +#ifdef CONFIG_KGDB_KDB +struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ +#endif /* CONFIG_KGDB_KDB */ + +static void module_assert_mutex_or_preempt(void) +{ +#ifdef CONFIG_LOCKDEP + if (unlikely(!debug_locks)) + return; + + WARN_ON_ONCE(!rcu_read_lock_sched_held() && + !lockdep_is_held(&module_mutex)); +#endif +} + +#ifdef CONFIG_MODULE_SIG +static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); +module_param(sig_enforce, bool_enable_only, 0644); + +void set_module_sig_enforced(void) +{ + sig_enforce = true; +} +#else +#define sig_enforce false +#endif + +/* + * Export sig_enforce kernel cmdline parameter to allow other subsystems rely + * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. + */ +bool is_module_sig_enforced(void) +{ + return sig_enforce; +} +EXPORT_SYMBOL(is_module_sig_enforced); + +/* Block module loading/unloading? */ +int modules_disabled = 0; +core_param(nomodule, modules_disabled, bint, 0); + +/* Waiting for a module to finish initializing? */ +static DECLARE_WAIT_QUEUE_HEAD(module_wq); + +static BLOCKING_NOTIFIER_HEAD(module_notify_list); + +int register_module_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&module_notify_list, nb); +} +EXPORT_SYMBOL(register_module_notifier); + +int unregister_module_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&module_notify_list, nb); +} +EXPORT_SYMBOL(unregister_module_notifier); + +/* + * We require a truly strong try_module_get(): 0 means success. + * Otherwise an error is returned due to ongoing or failed + * initialization etc. + */ +static inline int strong_try_module_get(struct module *mod) +{ + BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); + if (mod && mod->state == MODULE_STATE_COMING) + return -EBUSY; + if (try_module_get(mod)) + return 0; + else + return -ENOENT; +} + +static inline void add_taint_module(struct module *mod, unsigned flag, + enum lockdep_ok lockdep_ok) +{ + add_taint(flag, lockdep_ok); + set_bit(flag, &mod->taints); +} + +/* + * A thread that wants to hold a reference to a module only while it + * is running can call this to safely exit. + */ +void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) +{ + module_put(mod); + kthread_exit(code); +} +EXPORT_SYMBOL(__module_put_and_kthread_exit); + +/* Find a module section: 0 means not found. */ +static unsigned int find_sec(const struct load_info *info, const char *name) +{ + unsigned int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + /* Alloc bit cleared means "ignore it." */ + if ((shdr->sh_flags & SHF_ALLOC) + && strcmp(info->secstrings + shdr->sh_name, name) == 0) + return i; + } + return 0; +} + +/* Find a module section, or NULL. */ +static void *section_addr(const struct load_info *info, const char *name) +{ + /* Section 0 has sh_addr 0. */ + return (void *)info->sechdrs[find_sec(info, name)].sh_addr; +} + +/* Find a module section, or NULL. Fill in number of "objects" in section. */ +static void *section_objs(const struct load_info *info, + const char *name, + size_t object_size, + unsigned int *num) +{ + unsigned int sec = find_sec(info, name); + + /* Section 0 has sh_addr 0 and sh_size 0. */ + *num = info->sechdrs[sec].sh_size / object_size; + return (void *)info->sechdrs[sec].sh_addr; +} + +/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ +static unsigned int find_any_sec(const struct load_info *info, const char *name) +{ + unsigned int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + if (strcmp(info->secstrings + shdr->sh_name, name) == 0) + return i; + } + return 0; +} + +/* + * Find a module section, or NULL. Fill in number of "objects" in section. + * Ignores SHF_ALLOC flag. + */ +static __maybe_unused void *any_section_objs(const struct load_info *info, + const char *name, + size_t object_size, + unsigned int *num) +{ + unsigned int sec = find_any_sec(info, name); + + /* Section 0 has sh_addr 0 and sh_size 0. */ + *num = info->sechdrs[sec].sh_size / object_size; + return (void *)info->sechdrs[sec].sh_addr; +} + +/* Provided by the linker */ +extern const struct kernel_symbol __start___ksymtab[]; +extern const struct kernel_symbol __stop___ksymtab[]; +extern const struct kernel_symbol __start___ksymtab_gpl[]; +extern const struct kernel_symbol __stop___ksymtab_gpl[]; +extern const s32 __start___kcrctab[]; +extern const s32 __start___kcrctab_gpl[]; + +#ifndef CONFIG_MODVERSIONS +#define symversion(base, idx) NULL +#else +#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) +#endif + +struct symsearch { + const struct kernel_symbol *start, *stop; + const s32 *crcs; + enum mod_license { + NOT_GPL_ONLY, + GPL_ONLY, + } license; +}; + +struct find_symbol_arg { + /* Input */ + const char *name; + bool gplok; + bool warn; + + /* Output */ + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +static bool check_exported_symbol(const struct symsearch *syms, + struct module *owner, + unsigned int symnum, void *data) +{ + struct find_symbol_arg *fsa = data; + + if (!fsa->gplok && syms->license == GPL_ONLY) + return false; + fsa->owner = owner; + fsa->crc = symversion(syms->crcs, symnum); + fsa->sym = &syms->start[symnum]; + fsa->license = syms->license; + return true; +} + +static unsigned long kernel_symbol_value(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + return (unsigned long)offset_to_ptr(&sym->value_offset); +#else + return sym->value; +#endif +} + +static const char *kernel_symbol_name(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + return offset_to_ptr(&sym->name_offset); +#else + return sym->name; +#endif +} + +static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + if (!sym->namespace_offset) + return NULL; + return offset_to_ptr(&sym->namespace_offset); +#else + return sym->namespace; +#endif +} + +static int cmp_name(const void *name, const void *sym) +{ + return strcmp(name, kernel_symbol_name(sym)); +} + +static bool find_exported_symbol_in_section(const struct symsearch *syms, + struct module *owner, + void *data) +{ + struct find_symbol_arg *fsa = data; + struct kernel_symbol *sym; + + sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, + sizeof(struct kernel_symbol), cmp_name); + + if (sym != NULL && check_exported_symbol(syms, owner, + sym - syms->start, data)) + return true; + + return false; +} + +/* + * Find an exported symbol and return it, along with, (optional) crc and + * (optional) module which owns it. Needs preempt disabled or module_mutex. + */ +static bool find_symbol(struct find_symbol_arg *fsa) +{ + static const struct symsearch arr[] = { + { __start___ksymtab, __stop___ksymtab, __start___kcrctab, + NOT_GPL_ONLY }, + { __start___ksymtab_gpl, __stop___ksymtab_gpl, + __start___kcrctab_gpl, + GPL_ONLY }, + }; + struct module *mod; + unsigned int i; + + module_assert_mutex_or_preempt(); + + for (i = 0; i < ARRAY_SIZE(arr); i++) + if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) + return true; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + struct symsearch arr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, + mod->gpl_crcs, + GPL_ONLY }, + }; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + + for (i = 0; i < ARRAY_SIZE(arr); i++) + if (find_exported_symbol_in_section(&arr[i], mod, fsa)) + return true; + } + + pr_debug("Failed to find symbol %s\n", fsa->name); + return false; +} + +/* + * Search for module by name: must hold module_mutex (or preempt disabled + * for read-only access). + */ +static struct module *find_module_all(const char *name, size_t len, + bool even_unformed) +{ + struct module *mod; + + module_assert_mutex_or_preempt(); + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) + continue; + if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) + return mod; + } + return NULL; +} + +struct module *find_module(const char *name) +{ + return find_module_all(name, strlen(name), false); +} + +#ifdef CONFIG_SMP + +static inline void __percpu *mod_percpu(struct module *mod) +{ + return mod->percpu; +} + +static int percpu_modalloc(struct module *mod, struct load_info *info) +{ + Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; + unsigned long align = pcpusec->sh_addralign; + + if (!pcpusec->sh_size) + return 0; + + if (align > PAGE_SIZE) { + pr_warn("%s: per-cpu alignment %li > %li\n", + mod->name, align, PAGE_SIZE); + align = PAGE_SIZE; + } + + mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); + if (!mod->percpu) { + pr_warn("%s: Could not allocate %lu bytes percpu data\n", + mod->name, (unsigned long)pcpusec->sh_size); + return -ENOMEM; + } + mod->percpu_size = pcpusec->sh_size; + return 0; +} + +static void percpu_modfree(struct module *mod) +{ + free_percpu(mod->percpu); +} + +static unsigned int find_pcpusec(struct load_info *info) +{ + return find_sec(info, ".data..percpu"); +} + +static void percpu_modcopy(struct module *mod, + const void *from, unsigned long size) +{ + int cpu; + + for_each_possible_cpu(cpu) + memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); +} + +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + struct module *mod; + unsigned int cpu; + + preempt_disable(); + + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (!mod->percpu_size) + continue; + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(mod->percpu, cpu); + void *va = (void *)addr; + + if (va >= start && va < start + mod->percpu_size) { + if (can_addr) { + *can_addr = (unsigned long) (va - start); + *can_addr += (unsigned long) + per_cpu_ptr(mod->percpu, + get_boot_cpu_id()); + } + preempt_enable(); + return true; + } + } + } + + preempt_enable(); + return false; +} + +/** + * is_module_percpu_address() - test whether address is from module static percpu + * @addr: address to test + * + * Test whether @addr belongs to module static percpu area. + * + * Return: %true if @addr is from module static percpu area + */ +bool is_module_percpu_address(unsigned long addr) +{ + return __is_module_percpu_address(addr, NULL); +} + +#else /* ... !CONFIG_SMP */ + +static inline void __percpu *mod_percpu(struct module *mod) +{ + return NULL; +} +static int percpu_modalloc(struct module *mod, struct load_info *info) +{ + /* UP modules shouldn't have this section: ENOMEM isn't quite right */ + if (info->sechdrs[info->index.pcpu].sh_size != 0) + return -ENOMEM; + return 0; +} +static inline void percpu_modfree(struct module *mod) +{ +} +static unsigned int find_pcpusec(struct load_info *info) +{ + return 0; +} +static inline void percpu_modcopy(struct module *mod, + const void *from, unsigned long size) +{ + /* pcpusec should be 0, and size of that section should be 0. */ + BUG_ON(size != 0); +} +bool is_module_percpu_address(unsigned long addr) +{ + return false; +} + +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + +#endif /* CONFIG_SMP */ + +#define MODINFO_ATTR(field) \ +static void setup_modinfo_##field(struct module *mod, const char *s) \ +{ \ + mod->field = kstrdup(s, GFP_KERNEL); \ +} \ +static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ + struct module_kobject *mk, char *buffer) \ +{ \ + return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ +} \ +static int modinfo_##field##_exists(struct module *mod) \ +{ \ + return mod->field != NULL; \ +} \ +static void free_modinfo_##field(struct module *mod) \ +{ \ + kfree(mod->field); \ + mod->field = NULL; \ +} \ +static struct module_attribute modinfo_##field = { \ + .attr = { .name = __stringify(field), .mode = 0444 }, \ + .show = show_modinfo_##field, \ + .setup = setup_modinfo_##field, \ + .test = modinfo_##field##_exists, \ + .free = free_modinfo_##field, \ +}; + +MODINFO_ATTR(version); +MODINFO_ATTR(srcversion); + +static char last_unloaded_module[MODULE_NAME_LEN+1]; + +#ifdef CONFIG_MODULE_UNLOAD + +EXPORT_TRACEPOINT_SYMBOL(module_get); + +/* MODULE_REF_BASE is the base reference count by kmodule loader. */ +#define MODULE_REF_BASE 1 + +/* Init the unload section of the module. */ +static int module_unload_init(struct module *mod) +{ + /* + * Initialize reference counter to MODULE_REF_BASE. + * refcnt == 0 means module is going. + */ + atomic_set(&mod->refcnt, MODULE_REF_BASE); + + INIT_LIST_HEAD(&mod->source_list); + INIT_LIST_HEAD(&mod->target_list); + + /* Hold reference count during initialization. */ + atomic_inc(&mod->refcnt); + + return 0; +} + +/* Does a already use b? */ +static int already_uses(struct module *a, struct module *b) +{ + struct module_use *use; + + list_for_each_entry(use, &b->source_list, source_list) { + if (use->source == a) { + pr_debug("%s uses %s!\n", a->name, b->name); + return 1; + } + } + pr_debug("%s does not use %s!\n", a->name, b->name); + return 0; +} + +/* + * Module a uses b + * - we add 'a' as a "source", 'b' as a "target" of module use + * - the module_use is added to the list of 'b' sources (so + * 'b' can walk the list to see who sourced them), and of 'a' + * targets (so 'a' can see what modules it targets). + */ +static int add_module_usage(struct module *a, struct module *b) +{ + struct module_use *use; + + pr_debug("Allocating new usage for %s.\n", a->name); + use = kmalloc(sizeof(*use), GFP_ATOMIC); + if (!use) + return -ENOMEM; + + use->source = a; + use->target = b; + list_add(&use->source_list, &b->source_list); + list_add(&use->target_list, &a->target_list); + return 0; +} + +/* Module a uses b: caller needs module_mutex() */ +static int ref_module(struct module *a, struct module *b) +{ + int err; + + if (b == NULL || already_uses(a, b)) + return 0; + + /* If module isn't available, we fail. */ + err = strong_try_module_get(b); + if (err) + return err; + + err = add_module_usage(a, b); + if (err) { + module_put(b); + return err; + } + return 0; +} + +/* Clear the unload stuff of the module. */ +static void module_unload_free(struct module *mod) +{ + struct module_use *use, *tmp; + + mutex_lock(&module_mutex); + list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { + struct module *i = use->target; + pr_debug("%s unusing %s\n", mod->name, i->name); + module_put(i); + list_del(&use->source_list); + list_del(&use->target_list); + kfree(use); + } + mutex_unlock(&module_mutex); +} + +#ifdef CONFIG_MODULE_FORCE_UNLOAD +static inline int try_force_unload(unsigned int flags) +{ + int ret = (flags & O_TRUNC); + if (ret) + add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); + return ret; +} +#else +static inline int try_force_unload(unsigned int flags) +{ + return 0; +} +#endif /* CONFIG_MODULE_FORCE_UNLOAD */ + +/* Try to release refcount of module, 0 means success. */ +static int try_release_module_ref(struct module *mod) +{ + int ret; + + /* Try to decrement refcnt which we set at loading */ + ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); + BUG_ON(ret < 0); + if (ret) + /* Someone can put this right now, recover with checking */ + ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); + + return ret; +} + +static int try_stop_module(struct module *mod, int flags, int *forced) +{ + /* If it's not unused, quit unless we're forcing. */ + if (try_release_module_ref(mod) != 0) { + *forced = try_force_unload(flags); + if (!(*forced)) + return -EWOULDBLOCK; + } + + /* Mark it as dying. */ + mod->state = MODULE_STATE_GOING; + + return 0; +} + +/** + * module_refcount() - return the refcount or -1 if unloading + * @mod: the module we're checking + * + * Return: + * -1 if the module is in the process of unloading + * otherwise the number of references in the kernel to the module + */ +int module_refcount(struct module *mod) +{ + return atomic_read(&mod->refcnt) - MODULE_REF_BASE; +} +EXPORT_SYMBOL(module_refcount); + +/* This exists whether we can unload or not */ +static void free_module(struct module *mod); + +SYSCALL_DEFINE2(delete_module, const char __user *, name_user, + unsigned int, flags) +{ + struct module *mod; + char name[MODULE_NAME_LEN]; + int ret, forced = 0; + + if (!capable(CAP_SYS_MODULE) || modules_disabled) + return -EPERM; + + if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) + return -EFAULT; + name[MODULE_NAME_LEN-1] = '\0'; + + audit_log_kern_module(name); + + if (mutex_lock_interruptible(&module_mutex) != 0) + return -EINTR; + + mod = find_module(name); + if (!mod) { + ret = -ENOENT; + goto out; + } + + if (!list_empty(&mod->source_list)) { + /* Other modules depend on us: get rid of them first. */ + ret = -EWOULDBLOCK; + goto out; + } + + /* Doing init or already dying? */ + if (mod->state != MODULE_STATE_LIVE) { + /* FIXME: if (force), slam module count damn the torpedoes */ + pr_debug("%s already dying\n", mod->name); + ret = -EBUSY; + goto out; + } + + /* If it has an init func, it must have an exit func to unload */ + if (mod->init && !mod->exit) { + forced = try_force_unload(flags); + if (!forced) { + /* This module can't be removed */ + ret = -EBUSY; + goto out; + } + } + + ret = try_stop_module(mod, flags, &forced); + if (ret != 0) + goto out; + + mutex_unlock(&module_mutex); + /* Final destruction now no one is using it. */ + if (mod->exit != NULL) + mod->exit(); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + ftrace_release_mod(mod); + + async_synchronize_full(); + + /* Store the name of the last unloaded module for diagnostic purposes */ + strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); + + free_module(mod); + /* someone could wait for the module in add_unformed_module() */ + wake_up_all(&module_wq); + return 0; +out: + mutex_unlock(&module_mutex); + return ret; +} + +static inline void print_unload_info(struct seq_file *m, struct module *mod) +{ + struct module_use *use; + int printed_something = 0; + + seq_printf(m, " %i ", module_refcount(mod)); + + /* + * Always include a trailing , so userspace can differentiate + * between this and the old multi-field proc format. + */ + list_for_each_entry(use, &mod->source_list, source_list) { + printed_something = 1; + seq_printf(m, "%s,", use->source->name); + } + + if (mod->init != NULL && mod->exit == NULL) { + printed_something = 1; + seq_puts(m, "[permanent],"); + } + + if (!printed_something) + seq_puts(m, "-"); +} + +void __symbol_put(const char *symbol) +{ + struct find_symbol_arg fsa = { + .name = symbol, + .gplok = true, + }; + + preempt_disable(); + BUG_ON(!find_symbol(&fsa)); + module_put(fsa.owner); + preempt_enable(); +} +EXPORT_SYMBOL(__symbol_put); + +/* Note this assumes addr is a function, which it currently always is. */ +void symbol_put_addr(void *addr) +{ + struct module *modaddr; + unsigned long a = (unsigned long)dereference_function_descriptor(addr); + + if (core_kernel_text(a)) + return; + + /* + * Even though we hold a reference on the module; we still need to + * disable preemption in order to safely traverse the data structure. + */ + preempt_disable(); + modaddr = __module_text_address(a); + BUG_ON(!modaddr); + module_put(modaddr); + preempt_enable(); +} +EXPORT_SYMBOL_GPL(symbol_put_addr); + +static ssize_t show_refcnt(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + return sprintf(buffer, "%i\n", module_refcount(mk->mod)); +} + +static struct module_attribute modinfo_refcnt = + __ATTR(refcnt, 0444, show_refcnt, NULL); + +void __module_get(struct module *module) +{ + if (module) { + preempt_disable(); + atomic_inc(&module->refcnt); + trace_module_get(module, _RET_IP_); + preempt_enable(); + } +} +EXPORT_SYMBOL(__module_get); + +bool try_module_get(struct module *module) +{ + bool ret = true; + + if (module) { + preempt_disable(); + /* Note: here, we can fail to get a reference */ + if (likely(module_is_live(module) && + atomic_inc_not_zero(&module->refcnt) != 0)) + trace_module_get(module, _RET_IP_); + else + ret = false; + + preempt_enable(); + } + return ret; +} +EXPORT_SYMBOL(try_module_get); + +void module_put(struct module *module) +{ + int ret; + + if (module) { + preempt_disable(); + ret = atomic_dec_if_positive(&module->refcnt); + WARN_ON(ret < 0); /* Failed to put refcount */ + trace_module_put(module, _RET_IP_); + preempt_enable(); + } +} +EXPORT_SYMBOL(module_put); + +#else /* !CONFIG_MODULE_UNLOAD */ +static inline void print_unload_info(struct seq_file *m, struct module *mod) +{ + /* We don't know the usage count, or what modules are using. */ + seq_puts(m, " - -"); +} + +static inline void module_unload_free(struct module *mod) +{ +} + +static int ref_module(struct module *a, struct module *b) +{ + return strong_try_module_get(b); +} + +static inline int module_unload_init(struct module *mod) +{ + return 0; +} +#endif /* CONFIG_MODULE_UNLOAD */ + +static size_t module_flags_taint(struct module *mod, char *buf) +{ + size_t l = 0; + int i; + + for (i = 0; i < TAINT_FLAGS_COUNT; i++) { + if (taint_flags[i].module && test_bit(i, &mod->taints)) + buf[l++] = taint_flags[i].c_true; + } + + return l; +} + +static ssize_t show_initstate(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + const char *state = "unknown"; + + switch (mk->mod->state) { + case MODULE_STATE_LIVE: + state = "live"; + break; + case MODULE_STATE_COMING: + state = "coming"; + break; + case MODULE_STATE_GOING: + state = "going"; + break; + default: + BUG(); + } + return sprintf(buffer, "%s\n", state); +} + +static struct module_attribute modinfo_initstate = + __ATTR(initstate, 0444, show_initstate, NULL); + +static ssize_t store_uevent(struct module_attribute *mattr, + struct module_kobject *mk, + const char *buffer, size_t count) +{ + int rc; + + rc = kobject_synth_uevent(&mk->kobj, buffer, count); + return rc ? rc : count; +} + +struct module_attribute module_uevent = + __ATTR(uevent, 0200, NULL, store_uevent); + +static ssize_t show_coresize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + return sprintf(buffer, "%u\n", mk->mod->core_layout.size); +} + +static struct module_attribute modinfo_coresize = + __ATTR(coresize, 0444, show_coresize, NULL); + +static ssize_t show_initsize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + return sprintf(buffer, "%u\n", mk->mod->init_layout.size); +} + +static struct module_attribute modinfo_initsize = + __ATTR(initsize, 0444, show_initsize, NULL); + +static ssize_t show_taint(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + size_t l; + + l = module_flags_taint(mk->mod, buffer); + buffer[l++] = '\n'; + return l; +} + +static struct module_attribute modinfo_taint = + __ATTR(taint, 0444, show_taint, NULL); + +static struct module_attribute *modinfo_attrs[] = { + &module_uevent, + &modinfo_version, + &modinfo_srcversion, + &modinfo_initstate, + &modinfo_coresize, + &modinfo_initsize, + &modinfo_taint, +#ifdef CONFIG_MODULE_UNLOAD + &modinfo_refcnt, +#endif + NULL, +}; + +static const char vermagic[] = VERMAGIC_STRING; + +static int try_to_force_load(struct module *mod, const char *reason) +{ +#ifdef CONFIG_MODULE_FORCE_LOAD + if (!test_taint(TAINT_FORCED_MODULE)) + pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); + add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); + return 0; +#else + return -ENOEXEC; +#endif +} + +#ifdef CONFIG_MODVERSIONS + +static u32 resolve_rel_crc(const s32 *crc) +{ + return *(u32 *)((void *)crc + *crc); +} + +static int check_version(const struct load_info *info, + const char *symname, + struct module *mod, + const s32 *crc) +{ + Elf_Shdr *sechdrs = info->sechdrs; + unsigned int versindex = info->index.vers; + unsigned int i, num_versions; + struct modversion_info *versions; + + /* Exporting module didn't supply crcs? OK, we're already tainted. */ + if (!crc) + return 1; + + /* No versions at all? modprobe --force does this. */ + if (versindex == 0) + return try_to_force_load(mod, symname) == 0; + + versions = (void *) sechdrs[versindex].sh_addr; + num_versions = sechdrs[versindex].sh_size + / sizeof(struct modversion_info); + + for (i = 0; i < num_versions; i++) { + u32 crcval; + + if (strcmp(versions[i].name, symname) != 0) + continue; + + if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) + crcval = resolve_rel_crc(crc); + else + crcval = *crc; + if (versions[i].crc == crcval) + return 1; + pr_debug("Found checksum %X vs module %lX\n", + crcval, versions[i].crc); + goto bad_version; + } + + /* Broken toolchain. Warn once, then let it go.. */ + pr_warn_once("%s: no symbol version for %s\n", info->name, symname); + return 1; + +bad_version: + pr_warn("%s: disagrees about version of symbol %s\n", + info->name, symname); + return 0; +} + +static inline int check_modstruct_version(const struct load_info *info, + struct module *mod) +{ + struct find_symbol_arg fsa = { + .name = "module_layout", + .gplok = true, + }; + + /* + * Since this should be found in kernel (which can't be removed), no + * locking is necessary -- use preempt_disable() to placate lockdep. + */ + preempt_disable(); + if (!find_symbol(&fsa)) { + preempt_enable(); + BUG(); + } + preempt_enable(); + return check_version(info, "module_layout", mod, fsa.crc); +} + +/* First part is kernel version, which we ignore if module has crcs. */ +static inline int same_magic(const char *amagic, const char *bmagic, + bool has_crcs) +{ + if (has_crcs) { + amagic += strcspn(amagic, " "); + bmagic += strcspn(bmagic, " "); + } + return strcmp(amagic, bmagic) == 0; +} +#else +static inline int check_version(const struct load_info *info, + const char *symname, + struct module *mod, + const s32 *crc) +{ + return 1; +} + +static inline int check_modstruct_version(const struct load_info *info, + struct module *mod) +{ + return 1; +} + +static inline int same_magic(const char *amagic, const char *bmagic, + bool has_crcs) +{ + return strcmp(amagic, bmagic) == 0; +} +#endif /* CONFIG_MODVERSIONS */ + +static char *get_modinfo(const struct load_info *info, const char *tag); +static char *get_next_modinfo(const struct load_info *info, const char *tag, + char *prev); + +static int verify_namespace_is_imported(const struct load_info *info, + const struct kernel_symbol *sym, + struct module *mod) +{ + const char *namespace; + char *imported_namespace; + + namespace = kernel_symbol_namespace(sym); + if (namespace && namespace[0]) { + imported_namespace = get_modinfo(info, "import_ns"); + while (imported_namespace) { + if (strcmp(namespace, imported_namespace) == 0) + return 0; + imported_namespace = get_next_modinfo( + info, "import_ns", imported_namespace); + } +#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + pr_warn( +#else + pr_err( +#endif + "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", + mod->name, kernel_symbol_name(sym), namespace); +#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + return -EINVAL; +#endif + } + return 0; +} + +static bool inherit_taint(struct module *mod, struct module *owner) +{ + if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) + return true; + + if (mod->using_gplonly_symbols) { + pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n", + mod->name, owner->name); + return false; + } + + if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { + pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n", + mod->name, owner->name); + set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); + } + return true; +} + +/* Resolve a symbol for this module. I.e. if we find one, record usage. */ +static const struct kernel_symbol *resolve_symbol(struct module *mod, + const struct load_info *info, + const char *name, + char ownername[]) +{ + struct find_symbol_arg fsa = { + .name = name, + .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), + .warn = true, + }; + int err; + + /* + * The module_mutex should not be a heavily contended lock; + * if we get the occasional sleep here, we'll go an extra iteration + * in the wait_event_interruptible(), which is harmless. + */ + sched_annotate_sleep(); + mutex_lock(&module_mutex); + if (!find_symbol(&fsa)) + goto unlock; + + if (fsa.license == GPL_ONLY) + mod->using_gplonly_symbols = true; + + if (!inherit_taint(mod, fsa.owner)) { + fsa.sym = NULL; + goto getname; + } + + if (!check_version(info, name, mod, fsa.crc)) { + fsa.sym = ERR_PTR(-EINVAL); + goto getname; + } + + err = verify_namespace_is_imported(info, fsa.sym, mod); + if (err) { + fsa.sym = ERR_PTR(err); + goto getname; + } + + err = ref_module(mod, fsa.owner); + if (err) { + fsa.sym = ERR_PTR(err); + goto getname; + } + +getname: + /* We must make copy under the lock if we failed to get ref. */ + strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); +unlock: + mutex_unlock(&module_mutex); + return fsa.sym; +} + +static const struct kernel_symbol * +resolve_symbol_wait(struct module *mod, + const struct load_info *info, + const char *name) +{ + const struct kernel_symbol *ksym; + char owner[MODULE_NAME_LEN]; + + if (wait_event_interruptible_timeout(module_wq, + !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) + || PTR_ERR(ksym) != -EBUSY, + 30 * HZ) <= 0) { + pr_warn("%s: gave up waiting for init of module %s.\n", + mod->name, owner); + } + return ksym; +} + +#ifdef CONFIG_KALLSYMS +static inline bool sect_empty(const Elf_Shdr *sect) +{ + return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; +} +#endif + +/* + * /sys/module/foo/sections stuff + * J. Corbet + */ +#ifdef CONFIG_SYSFS + +#ifdef CONFIG_KALLSYMS +struct module_sect_attr { + struct bin_attribute battr; + unsigned long address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[]; +}; + +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) +static ssize_t module_sect_read(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, + char *buf, loff_t pos, size_t count) +{ + struct module_sect_attr *sattr = + container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; + + if (pos != 0) + return -EINVAL; + + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; +} + +static void free_sect_attrs(struct module_sect_attrs *sect_attrs) +{ + unsigned int section; + + for (section = 0; section < sect_attrs->nsections; section++) + kfree(sect_attrs->attrs[section].battr.attr.name); + kfree(sect_attrs); +} + +static void add_sect_attrs(struct module *mod, const struct load_info *info) +{ + unsigned int nloaded = 0, i, size[2]; + struct module_sect_attrs *sect_attrs; + struct module_sect_attr *sattr; + struct bin_attribute **gattr; + + /* Count loaded sections and allocate structures */ + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i])) + nloaded++; + size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), + sizeof(sect_attrs->grp.bin_attrs[0])); + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); + sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); + if (sect_attrs == NULL) + return; + + /* Setup section attributes. */ + sect_attrs->grp.name = "sections"; + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; + + sect_attrs->nsections = 0; + sattr = §_attrs->attrs[0]; + gattr = §_attrs->grp.bin_attrs[0]; + for (i = 0; i < info->hdr->e_shnum; i++) { + Elf_Shdr *sec = &info->sechdrs[i]; + if (sect_empty(sec)) + continue; + sysfs_bin_attr_init(&sattr->battr); + sattr->address = sec->sh_addr; + sattr->battr.attr.name = + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); + if (sattr->battr.attr.name == NULL) + goto out; + sect_attrs->nsections++; + sattr->battr.read = module_sect_read; + sattr->battr.size = MODULE_SECT_READ_SIZE; + sattr->battr.attr.mode = 0400; + *(gattr++) = &(sattr++)->battr; + } + *gattr = NULL; + + if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) + goto out; + + mod->sect_attrs = sect_attrs; + return; + out: + free_sect_attrs(sect_attrs); +} + +static void remove_sect_attrs(struct module *mod) +{ + if (mod->sect_attrs) { + sysfs_remove_group(&mod->mkobj.kobj, + &mod->sect_attrs->grp); + /* + * We are positive that no one is using any sect attrs + * at this point. Deallocate immediately. + */ + free_sect_attrs(mod->sect_attrs); + mod->sect_attrs = NULL; + } +} + +/* + * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. + */ + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[]; +}; + +static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) +{ + /* + * The caller checked the pos and count against our size. + */ + memcpy(buf, bin_attr->private + pos, count); + return count; +} + +static void free_notes_attrs(struct module_notes_attrs *notes_attrs, + unsigned int i) +{ + if (notes_attrs->dir) { + while (i-- > 0) + sysfs_remove_bin_file(notes_attrs->dir, + ¬es_attrs->attrs[i]); + kobject_put(notes_attrs->dir); + } + kfree(notes_attrs); +} + +static void add_notes_attrs(struct module *mod, const struct load_info *info) +{ + unsigned int notes, loaded, i; + struct module_notes_attrs *notes_attrs; + struct bin_attribute *nattr; + + /* failed to create section attributes, so can't create notes */ + if (!mod->sect_attrs) + return; + + /* Count notes sections and allocate structures. */ + notes = 0; + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i]) && + (info->sechdrs[i].sh_type == SHT_NOTE)) + ++notes; + + if (notes == 0) + return; + + notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), + GFP_KERNEL); + if (notes_attrs == NULL) + return; + + notes_attrs->notes = notes; + nattr = ¬es_attrs->attrs[0]; + for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { + if (sect_empty(&info->sechdrs[i])) + continue; + if (info->sechdrs[i].sh_type == SHT_NOTE) { + sysfs_bin_attr_init(nattr); + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; + nattr->attr.mode = S_IRUGO; + nattr->size = info->sechdrs[i].sh_size; + nattr->private = (void *) info->sechdrs[i].sh_addr; + nattr->read = module_notes_read; + ++nattr; + } + ++loaded; + } + + notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); + if (!notes_attrs->dir) + goto out; + + for (i = 0; i < notes; ++i) + if (sysfs_create_bin_file(notes_attrs->dir, + ¬es_attrs->attrs[i])) + goto out; + + mod->notes_attrs = notes_attrs; + return; + + out: + free_notes_attrs(notes_attrs, i); +} + +static void remove_notes_attrs(struct module *mod) +{ + if (mod->notes_attrs) + free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); +} + +#else + +static inline void add_sect_attrs(struct module *mod, + const struct load_info *info) +{ +} + +static inline void remove_sect_attrs(struct module *mod) +{ +} + +static inline void add_notes_attrs(struct module *mod, + const struct load_info *info) +{ +} + +static inline void remove_notes_attrs(struct module *mod) +{ +} +#endif /* CONFIG_KALLSYMS */ + +static void del_usage_links(struct module *mod) +{ +#ifdef CONFIG_MODULE_UNLOAD + struct module_use *use; + + mutex_lock(&module_mutex); + list_for_each_entry(use, &mod->target_list, target_list) + sysfs_remove_link(use->target->holders_dir, mod->name); + mutex_unlock(&module_mutex); +#endif +} + +static int add_usage_links(struct module *mod) +{ + int ret = 0; +#ifdef CONFIG_MODULE_UNLOAD + struct module_use *use; + + mutex_lock(&module_mutex); + list_for_each_entry(use, &mod->target_list, target_list) { + ret = sysfs_create_link(use->target->holders_dir, + &mod->mkobj.kobj, mod->name); + if (ret) + break; + } + mutex_unlock(&module_mutex); + if (ret) + del_usage_links(mod); +#endif + return ret; +} + +static void module_remove_modinfo_attrs(struct module *mod, int end); + +static int module_add_modinfo_attrs(struct module *mod) +{ + struct module_attribute *attr; + struct module_attribute *temp_attr; + int error = 0; + int i; + + mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * + (ARRAY_SIZE(modinfo_attrs) + 1)), + GFP_KERNEL); + if (!mod->modinfo_attrs) + return -ENOMEM; + + temp_attr = mod->modinfo_attrs; + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (!attr->test || attr->test(mod)) { + memcpy(temp_attr, attr, sizeof(*temp_attr)); + sysfs_attr_init(&temp_attr->attr); + error = sysfs_create_file(&mod->mkobj.kobj, + &temp_attr->attr); + if (error) + goto error_out; + ++temp_attr; + } + } + + return 0; + +error_out: + if (i > 0) + module_remove_modinfo_attrs(mod, --i); + else + kfree(mod->modinfo_attrs); + return error; +} + +static void module_remove_modinfo_attrs(struct module *mod, int end) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { + if (end >= 0 && i > end) + break; + /* pick a field to test for end of list */ + if (!attr->attr.name) + break; + sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); + if (attr->free) + attr->free(mod); + } + kfree(mod->modinfo_attrs); +} + +static void mod_kobject_put(struct module *mod) +{ + DECLARE_COMPLETION_ONSTACK(c); + mod->mkobj.kobj_completion = &c; + kobject_put(&mod->mkobj.kobj); + wait_for_completion(&c); +} + +static int mod_sysfs_init(struct module *mod) +{ + int err; + struct kobject *kobj; + + if (!module_sysfs_initialized) { + pr_err("%s: module sysfs not initialized\n", mod->name); + err = -EINVAL; + goto out; + } + + kobj = kset_find_obj(module_kset, mod->name); + if (kobj) { + pr_err("%s: module is already loaded\n", mod->name); + kobject_put(kobj); + err = -EINVAL; + goto out; + } + + mod->mkobj.mod = mod; + + memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); + mod->mkobj.kobj.kset = module_kset; + err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, + "%s", mod->name); + if (err) + mod_kobject_put(mod); + +out: + return err; +} + +static int mod_sysfs_setup(struct module *mod, + const struct load_info *info, + struct kernel_param *kparam, + unsigned int num_params) +{ + int err; + + err = mod_sysfs_init(mod); + if (err) + goto out; + + mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); + if (!mod->holders_dir) { + err = -ENOMEM; + goto out_unreg; + } + + err = module_param_sysfs_setup(mod, kparam, num_params); + if (err) + goto out_unreg_holders; + + err = module_add_modinfo_attrs(mod); + if (err) + goto out_unreg_param; + + err = add_usage_links(mod); + if (err) + goto out_unreg_modinfo_attrs; + + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); + + return 0; + +out_unreg_modinfo_attrs: + module_remove_modinfo_attrs(mod, -1); +out_unreg_param: + module_param_sysfs_remove(mod); +out_unreg_holders: + kobject_put(mod->holders_dir); +out_unreg: + mod_kobject_put(mod); +out: + return err; +} + +static void mod_sysfs_fini(struct module *mod) +{ + remove_notes_attrs(mod); + remove_sect_attrs(mod); + mod_kobject_put(mod); +} + +static void init_param_lock(struct module *mod) +{ + mutex_init(&mod->param_lock); +} +#else /* !CONFIG_SYSFS */ + +static int mod_sysfs_setup(struct module *mod, + const struct load_info *info, + struct kernel_param *kparam, + unsigned int num_params) +{ + return 0; +} + +static void mod_sysfs_fini(struct module *mod) +{ +} + +static void module_remove_modinfo_attrs(struct module *mod, int end) +{ +} + +static void del_usage_links(struct module *mod) +{ +} + +static void init_param_lock(struct module *mod) +{ +} +#endif /* CONFIG_SYSFS */ + +static void mod_sysfs_teardown(struct module *mod) +{ + del_usage_links(mod); + module_remove_modinfo_attrs(mod, -1); + module_param_sysfs_remove(mod); + kobject_put(mod->mkobj.drivers_dir); + kobject_put(mod->holders_dir); + mod_sysfs_fini(mod); +} + +/* + * LKM RO/NX protection: protect module's text/ro-data + * from modification and any data from execution. + * + * General layout of module is: + * [text] [read-only-data] [ro-after-init] [writable data] + * text_size -----^ ^ ^ ^ + * ro_size ------------------------| | | + * ro_after_init_size -----------------------------| | + * size -----------------------------------------------------------| + * + * These values are always page-aligned (as is base) + */ + +/* + * Since some arches are moving towards PAGE_KERNEL module allocations instead + * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the + * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of + * whether we are strict. + */ +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX +static void frob_text(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); + set_memory((unsigned long)layout->base, + layout->text_size >> PAGE_SHIFT); +} + +static void module_enable_x(const struct module *mod) +{ + frob_text(&mod->core_layout, set_memory_x); + frob_text(&mod->init_layout, set_memory_x); +} +#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ +static void module_enable_x(const struct module *mod) { } +#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ + +#ifdef CONFIG_STRICT_MODULE_RWX +static void frob_rodata(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); + set_memory((unsigned long)layout->base + layout->text_size, + (layout->ro_size - layout->text_size) >> PAGE_SHIFT); +} + +static void frob_ro_after_init(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); + set_memory((unsigned long)layout->base + layout->ro_size, + (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); +} + +static void frob_writable_data(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); + BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); + set_memory((unsigned long)layout->base + layout->ro_after_init_size, + (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); +} + +static void module_enable_ro(const struct module *mod, bool after_init) +{ + if (!rodata_enabled) + return; + + set_vm_flush_reset_perms(mod->core_layout.base); + set_vm_flush_reset_perms(mod->init_layout.base); + frob_text(&mod->core_layout, set_memory_ro); + + frob_rodata(&mod->core_layout, set_memory_ro); + frob_text(&mod->init_layout, set_memory_ro); + frob_rodata(&mod->init_layout, set_memory_ro); + + if (after_init) + frob_ro_after_init(&mod->core_layout, set_memory_ro); +} + +static void module_enable_nx(const struct module *mod) +{ + frob_rodata(&mod->core_layout, set_memory_nx); + frob_ro_after_init(&mod->core_layout, set_memory_nx); + frob_writable_data(&mod->core_layout, set_memory_nx); + frob_rodata(&mod->init_layout, set_memory_nx); + frob_writable_data(&mod->init_layout, set_memory_nx); +} + +static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR; + int i; + + for (i = 0; i < hdr->e_shnum; i++) { + if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { + pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", + mod->name, secstrings + sechdrs[i].sh_name, i); + return -ENOEXEC; + } + } + + return 0; +} + +#else /* !CONFIG_STRICT_MODULE_RWX */ +static void module_enable_nx(const struct module *mod) { } +static void module_enable_ro(const struct module *mod, bool after_init) {} +static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + return 0; +} +#endif /* CONFIG_STRICT_MODULE_RWX */ + +#ifdef CONFIG_LIVEPATCH +/* + * Persist Elf information about a module. Copy the Elf header, + * section header table, section string table, and symtab section + * index from info to mod->klp_info. + */ +static int copy_module_elf(struct module *mod, struct load_info *info) +{ + unsigned int size, symndx; + int ret; + + size = sizeof(*mod->klp_info); + mod->klp_info = kmalloc(size, GFP_KERNEL); + if (mod->klp_info == NULL) + return -ENOMEM; + + /* Elf header */ + size = sizeof(mod->klp_info->hdr); + memcpy(&mod->klp_info->hdr, info->hdr, size); + + /* Elf section header table */ + size = sizeof(*info->sechdrs) * info->hdr->e_shnum; + mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); + if (mod->klp_info->sechdrs == NULL) { + ret = -ENOMEM; + goto free_info; + } + + /* Elf section name string table */ + size = info->sechdrs[info->hdr->e_shstrndx].sh_size; + mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); + if (mod->klp_info->secstrings == NULL) { + ret = -ENOMEM; + goto free_sechdrs; + } + + /* Elf symbol section index */ + symndx = info->index.sym; + mod->klp_info->symndx = symndx; + + /* + * For livepatch modules, core_kallsyms.symtab is a complete + * copy of the original symbol table. Adjust sh_addr to point + * to core_kallsyms.symtab since the copy of the symtab in module + * init memory is freed at the end of do_init_module(). + */ + mod->klp_info->sechdrs[symndx].sh_addr = \ + (unsigned long) mod->core_kallsyms.symtab; + + return 0; + +free_sechdrs: + kfree(mod->klp_info->sechdrs); +free_info: + kfree(mod->klp_info); + return ret; +} + +static void free_module_elf(struct module *mod) +{ + kfree(mod->klp_info->sechdrs); + kfree(mod->klp_info->secstrings); + kfree(mod->klp_info); +} +#else /* !CONFIG_LIVEPATCH */ +static int copy_module_elf(struct module *mod, struct load_info *info) +{ + return 0; +} + +static void free_module_elf(struct module *mod) +{ +} +#endif /* CONFIG_LIVEPATCH */ + +void __weak module_memfree(void *module_region) +{ + /* + * This memory may be RO, and freeing RO memory in an interrupt is not + * supported by vmalloc. + */ + WARN_ON(in_interrupt()); + vfree(module_region); +} + +void __weak module_arch_cleanup(struct module *mod) +{ +} + +void __weak module_arch_freeing_init(struct module *mod) +{ +} + +static void cfi_cleanup(struct module *mod); + +/* Free a module, remove from lists, etc. */ +static void free_module(struct module *mod) +{ + trace_module_free(mod); + + mod_sysfs_teardown(mod); + + /* + * We leave it in list to prevent duplicate loads, but make sure + * that noone uses it while it's being deconstructed. + */ + mutex_lock(&module_mutex); + mod->state = MODULE_STATE_UNFORMED; + mutex_unlock(&module_mutex); + + /* Remove dynamic debug info */ + ddebug_remove_module(mod->name); + + /* Arch-specific cleanup. */ + module_arch_cleanup(mod); + + /* Module unload stuff */ + module_unload_free(mod); + + /* Free any allocated parameters. */ + destroy_params(mod->kp, mod->num_kp); + + if (is_livepatch_module(mod)) + free_module_elf(mod); + + /* Now we can delete it from the lists */ + mutex_lock(&module_mutex); + /* Unlink carefully: kallsyms could be walking list. */ + list_del_rcu(&mod->list); + mod_tree_remove(mod); + /* Remove this module from bug list, this uses list_del_rcu */ + module_bug_cleanup(mod); + /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ + synchronize_rcu(); + mutex_unlock(&module_mutex); + + /* Clean up CFI for the module. */ + cfi_cleanup(mod); + + /* This may be empty, but that's OK */ + module_arch_freeing_init(mod); + module_memfree(mod->init_layout.base); + kfree(mod->args); + percpu_modfree(mod); + + /* Free lock-classes; relies on the preceding sync_rcu(). */ + lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); + + /* Finally, free the core (containing the module structure) */ + module_memfree(mod->core_layout.base); +} + +void *__symbol_get(const char *symbol) +{ + struct find_symbol_arg fsa = { + .name = symbol, + .gplok = true, + .warn = true, + }; + + preempt_disable(); + if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) { + preempt_enable(); + return NULL; + } + preempt_enable(); + return (void *)kernel_symbol_value(fsa.sym); +} +EXPORT_SYMBOL_GPL(__symbol_get); + +/* + * Ensure that an exported symbol [global namespace] does not already exist + * in the kernel or in some other module's exported symbol table. + * + * You must hold the module_mutex. + */ +static int verify_exported_symbols(struct module *mod) +{ + unsigned int i; + const struct kernel_symbol *s; + struct { + const struct kernel_symbol *sym; + unsigned int num; + } arr[] = { + { mod->syms, mod->num_syms }, + { mod->gpl_syms, mod->num_gpl_syms }, + }; + + for (i = 0; i < ARRAY_SIZE(arr); i++) { + for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { + struct find_symbol_arg fsa = { + .name = kernel_symbol_name(s), + .gplok = true, + }; + if (find_symbol(&fsa)) { + pr_err("%s: exports duplicate symbol %s" + " (owned by %s)\n", + mod->name, kernel_symbol_name(s), + module_name(fsa.owner)); + return -ENOEXEC; + } + } + } + return 0; +} + +static bool ignore_undef_symbol(Elf_Half emachine, const char *name) +{ + /* + * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as + * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. + * i386 has a similar problem but may not deserve a fix. + * + * If we ever have to ignore many symbols, consider refactoring the code to + * only warn if referenced by a relocation. + */ + if (emachine == EM_386 || emachine == EM_X86_64) + return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); + return false; +} + +/* Change all symbols so that st_value encodes the pointer directly. */ +static int simplify_symbols(struct module *mod, const struct load_info *info) +{ + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + Elf_Sym *sym = (void *)symsec->sh_addr; + unsigned long secbase; + unsigned int i; + int ret = 0; + const struct kernel_symbol *ksym; + + for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { + const char *name = info->strtab + sym[i].st_name; + + switch (sym[i].st_shndx) { + case SHN_COMMON: + /* Ignore common symbols */ + if (!strncmp(name, "__gnu_lto", 9)) + break; + + /* + * We compiled with -fno-common. These are not + * supposed to happen. + */ + pr_debug("Common symbol: %s\n", name); + pr_warn("%s: please compile with -fno-common\n", + mod->name); + ret = -ENOEXEC; + break; + + case SHN_ABS: + /* Don't need to do anything */ + pr_debug("Absolute symbol: 0x%08lx\n", + (long)sym[i].st_value); + break; + + case SHN_LIVEPATCH: + /* Livepatch symbols are resolved by livepatch */ + break; + + case SHN_UNDEF: + ksym = resolve_symbol_wait(mod, info, name); + /* Ok if resolved. */ + if (ksym && !IS_ERR(ksym)) { + sym[i].st_value = kernel_symbol_value(ksym); + break; + } + + /* Ok if weak or ignored. */ + if (!ksym && + (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || + ignore_undef_symbol(info->hdr->e_machine, name))) + break; + + ret = PTR_ERR(ksym) ?: -ENOENT; + pr_warn("%s: Unknown symbol %s (err %d)\n", + mod->name, name, ret); + break; + + default: + /* Divert to percpu allocation if a percpu var. */ + if (sym[i].st_shndx == info->index.pcpu) + secbase = (unsigned long)mod_percpu(mod); + else + secbase = info->sechdrs[sym[i].st_shndx].sh_addr; + sym[i].st_value += secbase; + break; + } + } + + return ret; +} + +static int apply_relocations(struct module *mod, const struct load_info *info) +{ + unsigned int i; + int err = 0; + + /* Now do relocations. */ + for (i = 1; i < info->hdr->e_shnum; i++) { + unsigned int infosec = info->sechdrs[i].sh_info; + + /* Not a valid relocation section? */ + if (infosec >= info->hdr->e_shnum) + continue; + + /* Don't bother with non-allocated sections */ + if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) + continue; + + if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) + err = klp_apply_section_relocs(mod, info->sechdrs, + info->secstrings, + info->strtab, + info->index.sym, i, + NULL); + else if (info->sechdrs[i].sh_type == SHT_REL) + err = apply_relocate(info->sechdrs, info->strtab, + info->index.sym, i, mod); + else if (info->sechdrs[i].sh_type == SHT_RELA) + err = apply_relocate_add(info->sechdrs, info->strtab, + info->index.sym, i, mod); + if (err < 0) + break; + } + return err; +} + +/* Additional bytes needed by arch in front of individual sections */ +unsigned int __weak arch_mod_section_prepend(struct module *mod, + unsigned int section) +{ + /* default implementation just returns zero */ + return 0; +} + +/* Update size with this section: return offset. */ +static long get_offset(struct module *mod, unsigned int *size, + Elf_Shdr *sechdr, unsigned int section) +{ + long ret; + + *size += arch_mod_section_prepend(mod, section); + ret = ALIGN(*size, sechdr->sh_addralign ?: 1); + *size = ret + sechdr->sh_size; + return ret; +} + +static bool module_init_layout_section(const char *sname) +{ +#ifndef CONFIG_MODULE_UNLOAD + if (module_exit_section(sname)) + return true; +#endif + return module_init_section(sname); +} + +/* + * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld + * might -- code, read-only data, read-write data, small data. Tally + * sizes, and place the offsets into sh_entsize fields: high bit means it + * belongs in init. + */ +static void layout_sections(struct module *mod, struct load_info *info) +{ + static unsigned long const masks[][2] = { + /* + * NOTE: all executable code must be the first section + * in this array; otherwise modify the text_size + * finder in the two loops below + */ + { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, + { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, + { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, + { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, + { ARCH_SHF_SMALL | SHF_ALLOC, 0 } + }; + unsigned int m, i; + + for (i = 0; i < info->hdr->e_shnum; i++) + info->sechdrs[i].sh_entsize = ~0UL; + + pr_debug("Core section allocation order:\n"); + for (m = 0; m < ARRAY_SIZE(masks); ++m) { + for (i = 0; i < info->hdr->e_shnum; ++i) { + Elf_Shdr *s = &info->sechdrs[i]; + const char *sname = info->secstrings + s->sh_name; + + if ((s->sh_flags & masks[m][0]) != masks[m][0] + || (s->sh_flags & masks[m][1]) + || s->sh_entsize != ~0UL + || module_init_layout_section(sname)) + continue; + s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); + pr_debug("\t%s\n", sname); + } + switch (m) { + case 0: /* executable */ + mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.text_size = mod->core_layout.size; + break; + case 1: /* RO: text and ro-data */ + mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.ro_size = mod->core_layout.size; + break; + case 2: /* RO after init */ + mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.ro_after_init_size = mod->core_layout.size; + break; + case 4: /* whole core */ + mod->core_layout.size = debug_align(mod->core_layout.size); + break; + } + } + + pr_debug("Init section allocation order:\n"); + for (m = 0; m < ARRAY_SIZE(masks); ++m) { + for (i = 0; i < info->hdr->e_shnum; ++i) { + Elf_Shdr *s = &info->sechdrs[i]; + const char *sname = info->secstrings + s->sh_name; + + if ((s->sh_flags & masks[m][0]) != masks[m][0] + || (s->sh_flags & masks[m][1]) + || s->sh_entsize != ~0UL + || !module_init_layout_section(sname)) + continue; + s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) + | INIT_OFFSET_MASK); + pr_debug("\t%s\n", sname); + } + switch (m) { + case 0: /* executable */ + mod->init_layout.size = debug_align(mod->init_layout.size); + mod->init_layout.text_size = mod->init_layout.size; + break; + case 1: /* RO: text and ro-data */ + mod->init_layout.size = debug_align(mod->init_layout.size); + mod->init_layout.ro_size = mod->init_layout.size; + break; + case 2: + /* + * RO after init doesn't apply to init_layout (only + * core_layout), so it just takes the value of ro_size. + */ + mod->init_layout.ro_after_init_size = mod->init_layout.ro_size; + break; + case 4: /* whole init */ + mod->init_layout.size = debug_align(mod->init_layout.size); + break; + } + } +} + +static void set_license(struct module *mod, const char *license) +{ + if (!license) + license = "unspecified"; + + if (!license_is_gpl_compatible(license)) { + if (!test_taint(TAINT_PROPRIETARY_MODULE)) + pr_warn("%s: module license '%s' taints kernel.\n", + mod->name, license); + add_taint_module(mod, TAINT_PROPRIETARY_MODULE, + LOCKDEP_NOW_UNRELIABLE); + } +} + +/* Parse tag=value strings from .modinfo section */ +static char *next_string(char *string, unsigned long *secsize) +{ + /* Skip non-zero chars */ + while (string[0]) { + string++; + if ((*secsize)-- <= 1) + return NULL; + } + + /* Skip any zero padding. */ + while (!string[0]) { + string++; + if ((*secsize)-- <= 1) + return NULL; + } + return string; +} + +static char *get_next_modinfo(const struct load_info *info, const char *tag, + char *prev) +{ + char *p; + unsigned int taglen = strlen(tag); + Elf_Shdr *infosec = &info->sechdrs[info->index.info]; + unsigned long size = infosec->sh_size; + + /* + * get_modinfo() calls made before rewrite_section_headers() + * must use sh_offset, as sh_addr isn't set! + */ + char *modinfo = (char *)info->hdr + infosec->sh_offset; + + if (prev) { + size -= prev - modinfo; + modinfo = next_string(prev, &size); + } + + for (p = modinfo; p; p = next_string(p, &size)) { + if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') + return p + taglen + 1; + } + return NULL; +} + +static char *get_modinfo(const struct load_info *info, const char *tag) +{ + return get_next_modinfo(info, tag, NULL); +} + +static void setup_modinfo(struct module *mod, struct load_info *info) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (attr->setup) + attr->setup(mod, get_modinfo(info, attr->attr.name)); + } +} + +static void free_modinfo(struct module *mod) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (attr->free) + attr->free(mod); + } +} + +#ifdef CONFIG_KALLSYMS + +/* Lookup exported symbol in given range of kernel_symbols */ +static const struct kernel_symbol *lookup_exported_symbol(const char *name, + const struct kernel_symbol *start, + const struct kernel_symbol *stop) +{ + return bsearch(name, start, stop - start, + sizeof(struct kernel_symbol), cmp_name); +} + +static int is_exported(const char *name, unsigned long value, + const struct module *mod) +{ + const struct kernel_symbol *ks; + if (!mod) + ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); + else + ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); + + return ks != NULL && kernel_symbol_value(ks) == value; +} + +/* As per nm */ +static char elf_type(const Elf_Sym *sym, const struct load_info *info) +{ + const Elf_Shdr *sechdrs = info->sechdrs; + + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { + if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) + return 'v'; + else + return 'w'; + } + if (sym->st_shndx == SHN_UNDEF) + return 'U'; + if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) + return 'a'; + if (sym->st_shndx >= SHN_LORESERVE) + return '?'; + if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) + return 't'; + if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC + && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { + if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) + return 'r'; + else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) + return 'g'; + else + return 'd'; + } + if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { + if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) + return 's'; + else + return 'b'; + } + if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, + ".debug")) { + return 'n'; + } + return '?'; +} + +static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, + unsigned int shnum, unsigned int pcpundx) +{ + const Elf_Shdr *sec; + + if (src->st_shndx == SHN_UNDEF + || src->st_shndx >= shnum + || !src->st_name) + return false; + +#ifdef CONFIG_KALLSYMS_ALL + if (src->st_shndx == pcpundx) + return true; +#endif + + sec = sechdrs + src->st_shndx; + if (!(sec->sh_flags & SHF_ALLOC) +#ifndef CONFIG_KALLSYMS_ALL + || !(sec->sh_flags & SHF_EXECINSTR) +#endif + || (sec->sh_entsize & INIT_OFFSET_MASK)) + return false; + + return true; +} + +/* + * We only allocate and copy the strings needed by the parts of symtab + * we keep. This is simple, but has the effect of making multiple + * copies of duplicates. We could be more sophisticated, see + * linux-kernel thread starting with + * <73defb5e4bca04a6431392cc341112b1@localhost>. + */ +static void layout_symtab(struct module *mod, struct load_info *info) +{ + Elf_Shdr *symsect = info->sechdrs + info->index.sym; + Elf_Shdr *strsect = info->sechdrs + info->index.str; + const Elf_Sym *src; + unsigned int i, nsrc, ndst, strtab_size = 0; + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; + symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, + info->index.sym) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + symsect->sh_name); + + src = (void *)info->hdr + symsect->sh_offset; + nsrc = symsect->sh_size / sizeof(*src); + + /* Compute total space required for the core symbols' strtab. */ + for (ndst = i = 0; i < nsrc; i++) { + if (i == 0 || is_livepatch_module(mod) || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, + info->index.pcpu)) { + strtab_size += strlen(&info->strtab[src[i].st_name])+1; + ndst++; + } + } + + /* Append room for core symbols at end of core part. */ + info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); + info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); + mod->core_layout.size += strtab_size; + info->core_typeoffs = mod->core_layout.size; + mod->core_layout.size += ndst * sizeof(char); + mod->core_layout.size = debug_align(mod->core_layout.size); + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; + strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); + + /* We'll tack temporary mod_kallsyms on the end. */ + mod->init_layout.size = ALIGN(mod->init_layout.size, + __alignof__(struct mod_kallsyms)); + info->mod_kallsyms_init_off = mod->init_layout.size; + mod->init_layout.size += sizeof(struct mod_kallsyms); + info->init_typeoffs = mod->init_layout.size; + mod->init_layout.size += nsrc * sizeof(char); + mod->init_layout.size = debug_align(mod->init_layout.size); +} + +/* + * We use the full symtab and strtab which layout_symtab arranged to + * be appended to the init section. Later we switch to the cut-down + * core-only ones. + */ +static void add_kallsyms(struct module *mod, const struct load_info *info) +{ + unsigned int i, ndst; + const Elf_Sym *src; + Elf_Sym *dst; + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + + /* Set up to point into init section. */ + mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; + + mod->kallsyms->symtab = (void *)symsec->sh_addr; + mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ + mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; + + /* + * Now populate the cut down core kallsyms for after init + * and set types up while we still have access to sections. + */ + mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; + mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; + mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; + src = mod->kallsyms->symtab; + for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + mod->kallsyms->typetab[i] = elf_type(src + i, info); + if (i == 0 || is_livepatch_module(mod) || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, + info->index.pcpu)) { + mod->core_kallsyms.typetab[ndst] = + mod->kallsyms->typetab[i]; + dst[ndst] = src[i]; + dst[ndst++].st_name = s - mod->core_kallsyms.strtab; + s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } + mod->core_kallsyms.num_symtab = ndst; +} +#else +static inline void layout_symtab(struct module *mod, struct load_info *info) +{ +} + +static void add_kallsyms(struct module *mod, const struct load_info *info) +{ +} +#endif /* CONFIG_KALLSYMS */ + +#if IS_ENABLED(CONFIG_KALLSYMS) && IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) +static void init_build_id(struct module *mod, const struct load_info *info) +{ + const Elf_Shdr *sechdr; + unsigned int i; + + for (i = 0; i < info->hdr->e_shnum; i++) { + sechdr = &info->sechdrs[i]; + if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE && + !build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id, + sechdr->sh_size)) + break; + } +} +#else +static void init_build_id(struct module *mod, const struct load_info *info) +{ +} +#endif + +static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num) +{ + if (!debug) + return; + ddebug_add_module(debug, num, mod->name); +} + +static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug) +{ + if (debug) + ddebug_remove_module(mod->name); +} + +void * __weak module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); +} + +bool __weak module_init_section(const char *name) +{ + return strstarts(name, ".init"); +} + +bool __weak module_exit_section(const char *name) +{ + return strstarts(name, ".exit"); +} + +#ifdef CONFIG_DEBUG_KMEMLEAK +static void kmemleak_load_module(const struct module *mod, + const struct load_info *info) +{ + unsigned int i; + + /* only scan the sections containing data */ + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); + + for (i = 1; i < info->hdr->e_shnum; i++) { + /* Scan all writable sections that's not executable */ + if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || + !(info->sechdrs[i].sh_flags & SHF_WRITE) || + (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) + continue; + + kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, + info->sechdrs[i].sh_size, GFP_KERNEL); + } +} +#else +static inline void kmemleak_load_module(const struct module *mod, + const struct load_info *info) +{ +} +#endif + +#ifdef CONFIG_MODULE_SIG +static int module_sig_check(struct load_info *info, int flags) +{ + int err = -ENODATA; + const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; + const char *reason; + const void *mod = info->hdr; + bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | + MODULE_INIT_IGNORE_VERMAGIC); + /* + * Do not allow mangled modules as a module with version information + * removed is no longer the module that was signed. + */ + if (!mangled_module && + info->len > markerlen && + memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { + /* We truncate the module to discard the signature */ + info->len -= markerlen; + err = mod_verify_sig(mod, info); + if (!err) { + info->sig_ok = true; + return 0; + } + } + + /* + * We don't permit modules to be loaded into the trusted kernels + * without a valid signature on them, but if we're not enforcing, + * certain errors are non-fatal. + */ + switch (err) { + case -ENODATA: + reason = "unsigned module"; + break; + case -ENOPKG: + reason = "module with unsupported crypto"; + break; + case -ENOKEY: + reason = "module with unavailable key"; + break; + + default: + /* + * All other errors are fatal, including lack of memory, + * unparseable signatures, and signature check failures -- + * even if signatures aren't required. + */ + return err; + } + + if (is_module_sig_enforced()) { + pr_notice("Loading of %s is rejected\n", reason); + return -EKEYREJECTED; + } + + return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); +} +#else /* !CONFIG_MODULE_SIG */ +static int module_sig_check(struct load_info *info, int flags) +{ + return 0; +} +#endif /* !CONFIG_MODULE_SIG */ + +static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) +{ +#if defined(CONFIG_64BIT) + unsigned long long secend; +#else + unsigned long secend; +#endif + + /* + * Check for both overflow and offset/size being + * too large. + */ + secend = shdr->sh_offset + shdr->sh_size; + if (secend < shdr->sh_offset || secend > info->len) + return -ENOEXEC; + + return 0; +} + +/* + * Sanity checks against invalid binaries, wrong arch, weird elf version. + * + * Also do basic validity checks against section offsets and sizes, the + * section name string table, and the indices used for it (sh_name). + */ +static int elf_validity_check(struct load_info *info) +{ + unsigned int i; + Elf_Shdr *shdr, *strhdr; + int err; + + if (info->len < sizeof(*(info->hdr))) { + pr_err("Invalid ELF header len %lu\n", info->len); + goto no_exec; + } + + if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { + pr_err("Invalid ELF header magic: != %s\n", ELFMAG); + goto no_exec; + } + if (info->hdr->e_type != ET_REL) { + pr_err("Invalid ELF header type: %u != %u\n", + info->hdr->e_type, ET_REL); + goto no_exec; + } + if (!elf_check_arch(info->hdr)) { + pr_err("Invalid architecture in ELF header: %u\n", + info->hdr->e_machine); + goto no_exec; + } + if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { + pr_err("Invalid ELF section header size\n"); + goto no_exec; + } + + /* + * e_shnum is 16 bits, and sizeof(Elf_Shdr) is + * known and small. So e_shnum * sizeof(Elf_Shdr) + * will not overflow unsigned long on any platform. + */ + if (info->hdr->e_shoff >= info->len + || (info->hdr->e_shnum * sizeof(Elf_Shdr) > + info->len - info->hdr->e_shoff)) { + pr_err("Invalid ELF section header overflow\n"); + goto no_exec; + } + + info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + + /* + * Verify if the section name table index is valid. + */ + if (info->hdr->e_shstrndx == SHN_UNDEF + || info->hdr->e_shstrndx >= info->hdr->e_shnum) { + pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", + info->hdr->e_shstrndx, info->hdr->e_shstrndx, + info->hdr->e_shnum); + goto no_exec; + } + + strhdr = &info->sechdrs[info->hdr->e_shstrndx]; + err = validate_section_offset(info, strhdr); + if (err < 0) { + pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); + return err; + } + + /* + * The section name table must be NUL-terminated, as required + * by the spec. This makes strcmp and pr_* calls that access + * strings in the section safe. + */ + info->secstrings = (void *)info->hdr + strhdr->sh_offset; + if (info->secstrings[strhdr->sh_size - 1] != '\0') { + pr_err("ELF Spec violation: section name table isn't null terminated\n"); + goto no_exec; + } + + /* + * The code assumes that section 0 has a length of zero and + * an addr of zero, so check for it. + */ + if (info->sechdrs[0].sh_type != SHT_NULL + || info->sechdrs[0].sh_size != 0 + || info->sechdrs[0].sh_addr != 0) { + pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", + info->sechdrs[0].sh_type); + goto no_exec; + } + + for (i = 1; i < info->hdr->e_shnum; i++) { + shdr = &info->sechdrs[i]; + switch (shdr->sh_type) { + case SHT_NULL: + case SHT_NOBITS: + continue; + case SHT_SYMTAB: + if (shdr->sh_link == SHN_UNDEF + || shdr->sh_link >= info->hdr->e_shnum) { + pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", + shdr->sh_link, shdr->sh_link, + info->hdr->e_shnum); + goto no_exec; + } + fallthrough; + default: + err = validate_section_offset(info, shdr); + if (err < 0) { + pr_err("Invalid ELF section in module (section %u type %u)\n", + i, shdr->sh_type); + return err; + } + + if (shdr->sh_flags & SHF_ALLOC) { + if (shdr->sh_name >= strhdr->sh_size) { + pr_err("Invalid ELF section name in module (section %u type %u)\n", + i, shdr->sh_type); + return -ENOEXEC; + } + } + break; + } + } + + return 0; + +no_exec: + return -ENOEXEC; +} + +#define COPY_CHUNK_SIZE (16*PAGE_SIZE) + +static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) +{ + do { + unsigned long n = min(len, COPY_CHUNK_SIZE); + + if (copy_from_user(dst, usrc, n) != 0) + return -EFAULT; + cond_resched(); + dst += n; + usrc += n; + len -= n; + } while (len); + return 0; +} + +#ifdef CONFIG_LIVEPATCH +static int check_modinfo_livepatch(struct module *mod, struct load_info *info) +{ + if (get_modinfo(info, "livepatch")) { + mod->klp = true; + add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); + pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", + mod->name); + } + + return 0; +} +#else /* !CONFIG_LIVEPATCH */ +static int check_modinfo_livepatch(struct module *mod, struct load_info *info) +{ + if (get_modinfo(info, "livepatch")) { + pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", + mod->name); + return -ENOEXEC; + } + + return 0; +} +#endif /* CONFIG_LIVEPATCH */ + +static void check_modinfo_retpoline(struct module *mod, struct load_info *info) +{ + if (retpoline_module_ok(get_modinfo(info, "retpoline"))) + return; + + pr_warn("%s: loading module not compiled with retpoline compiler.\n", + mod->name); +} + +/* Sets info->hdr and info->len. */ +static int copy_module_from_user(const void __user *umod, unsigned long len, + struct load_info *info) +{ + int err; + + info->len = len; + if (info->len < sizeof(*(info->hdr))) + return -ENOEXEC; + + err = security_kernel_load_data(LOADING_MODULE, true); + if (err) + return err; + + /* Suck in entire file: we'll want most of it. */ + info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); + if (!info->hdr) + return -ENOMEM; + + if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { + err = -EFAULT; + goto out; + } + + err = security_kernel_post_load_data((char *)info->hdr, info->len, + LOADING_MODULE, "init_module"); +out: + if (err) + vfree(info->hdr); + + return err; +} + +static void free_copy(struct load_info *info, int flags) +{ + if (flags & MODULE_INIT_COMPRESSED_FILE) + module_decompress_cleanup(info); + else + vfree(info->hdr); +} + +static int rewrite_section_headers(struct load_info *info, int flags) +{ + unsigned int i; + + /* This should always be true, but let's be sure. */ + info->sechdrs[0].sh_addr = 0; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + + /* + * Mark all sections sh_addr with their address in the + * temporary image. + */ + shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; + + } + + /* Track but don't keep modinfo and version sections. */ + info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; + info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; + + return 0; +} + +/* + * Set up our basic convenience variables (pointers to section headers, + * search for module section index etc), and do some basic section + * verification. + * + * Set info->mod to the temporary copy of the module in info->hdr. The final one + * will be allocated in move_module(). + */ +static int setup_load_info(struct load_info *info, int flags) +{ + unsigned int i; + + /* Try to find a name early so we can log errors with a module name */ + info->index.info = find_sec(info, ".modinfo"); + if (info->index.info) + info->name = get_modinfo(info, "name"); + + /* Find internal symbols and strings. */ + for (i = 1; i < info->hdr->e_shnum; i++) { + if (info->sechdrs[i].sh_type == SHT_SYMTAB) { + info->index.sym = i; + info->index.str = info->sechdrs[i].sh_link; + info->strtab = (char *)info->hdr + + info->sechdrs[info->index.str].sh_offset; + break; + } + } + + if (info->index.sym == 0) { + pr_warn("%s: module has no symbols (stripped?)\n", + info->name ?: "(missing .modinfo section or name field)"); + return -ENOEXEC; + } + + info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); + if (!info->index.mod) { + pr_warn("%s: No module found in object\n", + info->name ?: "(missing .modinfo section or name field)"); + return -ENOEXEC; + } + /* This is temporary: point mod into copy of data. */ + info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; + + /* + * If we didn't load the .modinfo 'name' field earlier, fall back to + * on-disk struct mod 'name' field. + */ + if (!info->name) + info->name = info->mod->name; + + if (flags & MODULE_INIT_IGNORE_MODVERSIONS) + info->index.vers = 0; /* Pretend no __versions section! */ + else + info->index.vers = find_sec(info, "__versions"); + + info->index.pcpu = find_pcpusec(info); + + return 0; +} + +static int check_modinfo(struct module *mod, struct load_info *info, int flags) +{ + const char *modmagic = get_modinfo(info, "vermagic"); + int err; + + if (flags & MODULE_INIT_IGNORE_VERMAGIC) + modmagic = NULL; + + /* This is allowed: modprobe --force will invalidate it. */ + if (!modmagic) { + err = try_to_force_load(mod, "bad vermagic"); + if (err) + return err; + } else if (!same_magic(modmagic, vermagic, info->index.vers)) { + pr_err("%s: version magic '%s' should be '%s'\n", + info->name, modmagic, vermagic); + return -ENOEXEC; + } + + if (!get_modinfo(info, "intree")) { + if (!test_taint(TAINT_OOT_MODULE)) + pr_warn("%s: loading out-of-tree module taints kernel.\n", + mod->name); + add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); + } + + check_modinfo_retpoline(mod, info); + + if (get_modinfo(info, "staging")) { + add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); + pr_warn("%s: module is from the staging directory, the quality " + "is unknown, you have been warned.\n", mod->name); + } + + err = check_modinfo_livepatch(mod, info); + if (err) + return err; + + /* Set up license info based on the info section */ + set_license(mod, get_modinfo(info, "license")); + + return 0; +} + +static int find_module_sections(struct module *mod, struct load_info *info) +{ + mod->kp = section_objs(info, "__param", + sizeof(*mod->kp), &mod->num_kp); + mod->syms = section_objs(info, "__ksymtab", + sizeof(*mod->syms), &mod->num_syms); + mod->crcs = section_addr(info, "__kcrctab"); + mod->gpl_syms = section_objs(info, "__ksymtab_gpl", + sizeof(*mod->gpl_syms), + &mod->num_gpl_syms); + mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); + +#ifdef CONFIG_CONSTRUCTORS + mod->ctors = section_objs(info, ".ctors", + sizeof(*mod->ctors), &mod->num_ctors); + if (!mod->ctors) + mod->ctors = section_objs(info, ".init_array", + sizeof(*mod->ctors), &mod->num_ctors); + else if (find_sec(info, ".init_array")) { + /* + * This shouldn't happen with same compiler and binutils + * building all parts of the module. + */ + pr_warn("%s: has both .ctors and .init_array.\n", + mod->name); + return -EINVAL; + } +#endif + + mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, + &mod->noinstr_text_size); + +#ifdef CONFIG_TRACEPOINTS + mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", + sizeof(*mod->tracepoints_ptrs), + &mod->num_tracepoints); +#endif +#ifdef CONFIG_TREE_SRCU + mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", + sizeof(*mod->srcu_struct_ptrs), + &mod->num_srcu_structs); +#endif +#ifdef CONFIG_BPF_EVENTS + mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", + sizeof(*mod->bpf_raw_events), + &mod->num_bpf_raw_events); +#endif +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); +#endif +#ifdef CONFIG_JUMP_LABEL + mod->jump_entries = section_objs(info, "__jump_table", + sizeof(*mod->jump_entries), + &mod->num_jump_entries); +#endif +#ifdef CONFIG_EVENT_TRACING + mod->trace_events = section_objs(info, "_ftrace_events", + sizeof(*mod->trace_events), + &mod->num_trace_events); + mod->trace_evals = section_objs(info, "_ftrace_eval_map", + sizeof(*mod->trace_evals), + &mod->num_trace_evals); +#endif +#ifdef CONFIG_TRACING + mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", + sizeof(*mod->trace_bprintk_fmt_start), + &mod->num_trace_bprintk_fmt); +#endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD + /* sechdrs[0].sh_size is always zero */ + mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, + sizeof(*mod->ftrace_callsites), + &mod->num_ftrace_callsites); +#endif +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + mod->ei_funcs = section_objs(info, "_error_injection_whitelist", + sizeof(*mod->ei_funcs), + &mod->num_ei_funcs); +#endif +#ifdef CONFIG_KPROBES + mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, + &mod->kprobes_text_size); + mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", + sizeof(unsigned long), + &mod->num_kprobe_blacklist); +#endif +#ifdef CONFIG_PRINTK_INDEX + mod->printk_index_start = section_objs(info, ".printk_index", + sizeof(*mod->printk_index_start), + &mod->printk_index_size); +#endif +#ifdef CONFIG_HAVE_STATIC_CALL_INLINE + mod->static_call_sites = section_objs(info, ".static_call_sites", + sizeof(*mod->static_call_sites), + &mod->num_static_call_sites); +#endif + mod->extable = section_objs(info, "__ex_table", + sizeof(*mod->extable), &mod->num_exentries); + + if (section_addr(info, "__obsparm")) + pr_warn("%s: Ignoring obsolete parameters\n", mod->name); + + info->debug = section_objs(info, "__dyndbg", + sizeof(*info->debug), &info->num_debug); + + return 0; +} + +static int move_module(struct module *mod, struct load_info *info) +{ + int i; + void *ptr; + + /* Do the allocs. */ + ptr = module_alloc(mod->core_layout.size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a + * leak. + */ + kmemleak_not_leak(ptr); + if (!ptr) + return -ENOMEM; + + memset(ptr, 0, mod->core_layout.size); + mod->core_layout.base = ptr; + + if (mod->init_layout.size) { + ptr = module_alloc(mod->init_layout.size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be + * scanned as it contains data and code that will be freed + * after the module is initialized. + */ + kmemleak_ignore(ptr); + if (!ptr) { + module_memfree(mod->core_layout.base); + return -ENOMEM; + } + memset(ptr, 0, mod->init_layout.size); + mod->init_layout.base = ptr; + } else + mod->init_layout.base = NULL; + + /* Transfer each section which specifies SHF_ALLOC */ + pr_debug("final section addresses:\n"); + for (i = 0; i < info->hdr->e_shnum; i++) { + void *dest; + Elf_Shdr *shdr = &info->sechdrs[i]; + + if (!(shdr->sh_flags & SHF_ALLOC)) + continue; + + if (shdr->sh_entsize & INIT_OFFSET_MASK) + dest = mod->init_layout.base + + (shdr->sh_entsize & ~INIT_OFFSET_MASK); + else + dest = mod->core_layout.base + shdr->sh_entsize; + + if (shdr->sh_type != SHT_NOBITS) + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); + /* Update sh_addr to point to copy in image. */ + shdr->sh_addr = (unsigned long)dest; + pr_debug("\t0x%lx %s\n", + (long)shdr->sh_addr, info->secstrings + shdr->sh_name); + } + + return 0; +} + +static int check_module_license_and_versions(struct module *mod) +{ + int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); + + /* + * ndiswrapper is under GPL by itself, but loads proprietary modules. + * Don't use add_taint_module(), as it would prevent ndiswrapper from + * using GPL-only symbols it needs. + */ + if (strcmp(mod->name, "ndiswrapper") == 0) + add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); + + /* driverloader was caught wrongly pretending to be under GPL */ + if (strcmp(mod->name, "driverloader") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE, + LOCKDEP_NOW_UNRELIABLE); + + /* lve claims to be GPL but upstream won't provide source */ + if (strcmp(mod->name, "lve") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE, + LOCKDEP_NOW_UNRELIABLE); + + if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) + pr_warn("%s: module license taints kernel.\n", mod->name); + +#ifdef CONFIG_MODVERSIONS + if ((mod->num_syms && !mod->crcs) || + (mod->num_gpl_syms && !mod->gpl_crcs)) { + return try_to_force_load(mod, + "no versions for exported symbols"); + } +#endif + return 0; +} + +static void flush_module_icache(const struct module *mod) +{ + /* + * Flush the instruction cache, since we've played with text. + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ + if (mod->init_layout.base) + flush_icache_range((unsigned long)mod->init_layout.base, + (unsigned long)mod->init_layout.base + + mod->init_layout.size); + flush_icache_range((unsigned long)mod->core_layout.base, + (unsigned long)mod->core_layout.base + mod->core_layout.size); +} + +int __weak module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod) +{ + return 0; +} + +/* module_blacklist is a comma-separated list of module names */ +static char *module_blacklist; +static bool blacklisted(const char *module_name) +{ + const char *p; + size_t len; + + if (!module_blacklist) + return false; + + for (p = module_blacklist; *p; p += len) { + len = strcspn(p, ","); + if (strlen(module_name) == len && !memcmp(module_name, p, len)) + return true; + if (p[len] == ',') + len++; + } + return false; +} +core_param(module_blacklist, module_blacklist, charp, 0400); + +static struct module *layout_and_allocate(struct load_info *info, int flags) +{ + struct module *mod; + unsigned int ndx; + int err; + + err = check_modinfo(info->mod, info, flags); + if (err) + return ERR_PTR(err); + + /* Allow arches to frob section contents and sizes. */ + err = module_frob_arch_sections(info->hdr, info->sechdrs, + info->secstrings, info->mod); + if (err < 0) + return ERR_PTR(err); + + err = module_enforce_rwx_sections(info->hdr, info->sechdrs, + info->secstrings, info->mod); + if (err < 0) + return ERR_PTR(err); + + /* We will do a special allocation for per-cpu sections later. */ + info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; + + /* + * Mark ro_after_init section with SHF_RO_AFTER_INIT so that + * layout_sections() can put it in the right place. + * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. + */ + ndx = find_sec(info, ".data..ro_after_init"); + if (ndx) + info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; + /* + * Mark the __jump_table section as ro_after_init as well: these data + * structures are never modified, with the exception of entries that + * refer to code in the __init section, which are annotated as such + * at module load time. + */ + ndx = find_sec(info, "__jump_table"); + if (ndx) + info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; + + /* + * Determine total sizes, and put offsets in sh_entsize. For now + * this is done generically; there doesn't appear to be any + * special cases for the architectures. + */ + layout_sections(info->mod, info); + layout_symtab(info->mod, info); + + /* Allocate and move to the final place */ + err = move_module(info->mod, info); + if (err) + return ERR_PTR(err); + + /* Module has been copied to its final place now: return it. */ + mod = (void *)info->sechdrs[info->index.mod].sh_addr; + kmemleak_load_module(mod, info); + return mod; +} + +/* mod is no longer valid after this! */ +static void module_deallocate(struct module *mod, struct load_info *info) +{ + percpu_modfree(mod); + module_arch_freeing_init(mod); + module_memfree(mod->init_layout.base); + module_memfree(mod->core_layout.base); +} + +int __weak module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + return 0; +} + +static int post_relocation(struct module *mod, const struct load_info *info) +{ + /* Sort exception table now relocations are done. */ + sort_extable(mod->extable, mod->extable + mod->num_exentries); + + /* Copy relocated percpu area over. */ + percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, + info->sechdrs[info->index.pcpu].sh_size); + + /* Setup kallsyms-specific fields. */ + add_kallsyms(mod, info); + + /* Arch-specific module finalizing. */ + return module_finalize(info->hdr, info->sechdrs, mod); +} + +/* Is this module of this name done loading? No locks held. */ +static bool finished_loading(const char *name) +{ + struct module *mod; + bool ret; + + /* + * The module_mutex should not be a heavily contended lock; + * if we get the occasional sleep here, we'll go an extra iteration + * in the wait_event_interruptible(), which is harmless. + */ + sched_annotate_sleep(); + mutex_lock(&module_mutex); + mod = find_module_all(name, strlen(name), true); + ret = !mod || mod->state == MODULE_STATE_LIVE; + mutex_unlock(&module_mutex); + + return ret; +} + +/* Call module constructors. */ +static void do_mod_ctors(struct module *mod) +{ +#ifdef CONFIG_CONSTRUCTORS + unsigned long i; + + for (i = 0; i < mod->num_ctors; i++) + mod->ctors[i](); +#endif +} + +/* For freeing module_init on success, in case kallsyms traversing */ +struct mod_initfree { + struct llist_node node; + void *module_init; +}; + +static void do_free_init(struct work_struct *w) +{ + struct llist_node *pos, *n, *list; + struct mod_initfree *initfree; + + list = llist_del_all(&init_free_list); + + synchronize_rcu(); + + llist_for_each_safe(pos, n, list) { + initfree = container_of(pos, struct mod_initfree, node); + module_memfree(initfree->module_init); + kfree(initfree); + } +} + +/* + * This is where the real work happens. + * + * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb + * helper command 'lx-symbols'. + */ +static noinline int do_init_module(struct module *mod) +{ + int ret = 0; + struct mod_initfree *freeinit; + + freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); + if (!freeinit) { + ret = -ENOMEM; + goto fail; + } + freeinit->module_init = mod->init_layout.base; + + do_mod_ctors(mod); + /* Start the module */ + if (mod->init != NULL) + ret = do_one_initcall(mod->init); + if (ret < 0) { + goto fail_free_freeinit; + } + if (ret > 0) { + pr_warn("%s: '%s'->init suspiciously returned %d, it should " + "follow 0/-E convention\n" + "%s: loading module anyway...\n", + __func__, mod->name, ret, __func__); + dump_stack(); + } + + /* Now it's a first class citizen! */ + mod->state = MODULE_STATE_LIVE; + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_LIVE, mod); + + /* Delay uevent until module has finished its init routine */ + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + + /* + * We need to finish all async code before the module init sequence + * is done. This has potential to deadlock if synchronous module + * loading is requested from async (which is not allowed!). + * + * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous + * request_module() from async workers") for more details. + */ + if (!mod->async_probe_requested) + async_synchronize_full(); + + ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + + mod->init_layout.size); + mutex_lock(&module_mutex); + /* Drop initial reference. */ + module_put(mod); + trim_init_extable(mod); +#ifdef CONFIG_KALLSYMS + /* Switch to core kallsyms now init is done: kallsyms may be walking! */ + rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); +#endif + module_enable_ro(mod, true); + mod_tree_remove_init(mod); + module_arch_freeing_init(mod); + mod->init_layout.base = NULL; + mod->init_layout.size = 0; + mod->init_layout.ro_size = 0; + mod->init_layout.ro_after_init_size = 0; + mod->init_layout.text_size = 0; +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ + mod->btf_data = NULL; +#endif + /* + * We want to free module_init, but be aware that kallsyms may be + * walking this with preempt disabled. In all the failure paths, we + * call synchronize_rcu(), but we don't want to slow down the success + * path. module_memfree() cannot be called in an interrupt, so do the + * work and call synchronize_rcu() in a work queue. + * + * Note that module_alloc() on most architectures creates W+X page + * mappings which won't be cleaned up until do_free_init() runs. Any + * code such as mark_rodata_ro() which depends on those mappings to + * be cleaned up needs to sync with the queued work - ie + * rcu_barrier() + */ + if (llist_add(&freeinit->node, &init_free_list)) + schedule_work(&init_free_wq); + + mutex_unlock(&module_mutex); + wake_up_all(&module_wq); + + return 0; + +fail_free_freeinit: + kfree(freeinit); +fail: + /* Try to protect us from buggy refcounters. */ + mod->state = MODULE_STATE_GOING; + synchronize_rcu(); + module_put(mod); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + ftrace_release_mod(mod); + free_module(mod); + wake_up_all(&module_wq); + return ret; +} + +static int may_init_module(void) +{ + if (!capable(CAP_SYS_MODULE) || modules_disabled) + return -EPERM; + + return 0; +} + +/* + * We try to place it in the list now to make sure it's unique before + * we dedicate too many resources. In particular, temporary percpu + * memory exhaustion. + */ +static int add_unformed_module(struct module *mod) +{ + int err; + struct module *old; + + mod->state = MODULE_STATE_UNFORMED; + +again: + mutex_lock(&module_mutex); + old = find_module_all(mod->name, strlen(mod->name), true); + if (old != NULL) { + if (old->state != MODULE_STATE_LIVE) { + /* Wait in case it fails to load. */ + mutex_unlock(&module_mutex); + err = wait_event_interruptible(module_wq, + finished_loading(mod->name)); + if (err) + goto out_unlocked; + goto again; + } + err = -EEXIST; + goto out; + } + mod_update_bounds(mod); + list_add_rcu(&mod->list, &modules); + mod_tree_insert(mod); + err = 0; + +out: + mutex_unlock(&module_mutex); +out_unlocked: + return err; +} + +static int complete_formation(struct module *mod, struct load_info *info) +{ + int err; + + mutex_lock(&module_mutex); + + /* Find duplicate symbols (must be called under lock). */ + err = verify_exported_symbols(mod); + if (err < 0) + goto out; + + /* This relies on module_mutex for list integrity. */ + module_bug_finalize(info->hdr, info->sechdrs, mod); + + module_enable_ro(mod, false); + module_enable_nx(mod); + module_enable_x(mod); + + /* + * Mark state as coming so strong_try_module_get() ignores us, + * but kallsyms etc. can see us. + */ + mod->state = MODULE_STATE_COMING; + mutex_unlock(&module_mutex); + + return 0; + +out: + mutex_unlock(&module_mutex); + return err; +} + +static int prepare_coming_module(struct module *mod) +{ + int err; + + ftrace_module_enable(mod); + err = klp_module_coming(mod); + if (err) + return err; + + err = blocking_notifier_call_chain_robust(&module_notify_list, + MODULE_STATE_COMING, MODULE_STATE_GOING, mod); + err = notifier_to_errno(err); + if (err) + klp_module_going(mod); + + return err; +} + +static int unknown_module_param_cb(char *param, char *val, const char *modname, + void *arg) +{ + struct module *mod = arg; + int ret; + + if (strcmp(param, "async_probe") == 0) { + mod->async_probe_requested = true; + return 0; + } + + /* Check for magic 'dyndbg' arg */ + ret = ddebug_dyndbg_module_param_cb(param, val, modname); + if (ret != 0) + pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); + return 0; +} + +static void cfi_init(struct module *mod); + +/* + * Allocate and load the module: note that size of section 0 is always + * zero, and we rely on this for optional sections. + */ +static int load_module(struct load_info *info, const char __user *uargs, + int flags) +{ + struct module *mod; + long err = 0; + char *after_dashes; + + /* + * Do the signature check (if any) first. All that + * the signature check needs is info->len, it does + * not need any of the section info. That can be + * set up later. This will minimize the chances + * of a corrupt module causing problems before + * we even get to the signature check. + * + * The check will also adjust info->len by stripping + * off the sig length at the end of the module, making + * checks against info->len more correct. + */ + err = module_sig_check(info, flags); + if (err) + goto free_copy; + + /* + * Do basic sanity checks against the ELF header and + * sections. + */ + err = elf_validity_check(info); + if (err) + goto free_copy; + + /* + * Everything checks out, so set up the section info + * in the info structure. + */ + err = setup_load_info(info, flags); + if (err) + goto free_copy; + + /* + * Now that we know we have the correct module name, check + * if it's blacklisted. + */ + if (blacklisted(info->name)) { + err = -EPERM; + pr_err("Module %s is blacklisted\n", info->name); + goto free_copy; + } + + err = rewrite_section_headers(info, flags); + if (err) + goto free_copy; + + /* Check module struct version now, before we try to use module. */ + if (!check_modstruct_version(info, info->mod)) { + err = -ENOEXEC; + goto free_copy; + } + + /* Figure out module layout, and allocate all the memory. */ + mod = layout_and_allocate(info, flags); + if (IS_ERR(mod)) { + err = PTR_ERR(mod); + goto free_copy; + } + + audit_log_kern_module(mod->name); + + /* Reserve our place in the list. */ + err = add_unformed_module(mod); + if (err) + goto free_module; + +#ifdef CONFIG_MODULE_SIG + mod->sig_ok = info->sig_ok; + if (!mod->sig_ok) { + pr_notice_once("%s: module verification failed: signature " + "and/or required key missing - tainting " + "kernel\n", mod->name); + add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); + } +#endif + + /* To avoid stressing percpu allocator, do this once we're unique. */ + err = percpu_modalloc(mod, info); + if (err) + goto unlink_mod; + + /* Now module is in final location, initialize linked lists, etc. */ + err = module_unload_init(mod); + if (err) + goto unlink_mod; + + init_param_lock(mod); + + /* + * Now we've got everything in the final locations, we can + * find optional sections. + */ + err = find_module_sections(mod, info); + if (err) + goto free_unload; + + err = check_module_license_and_versions(mod); + if (err) + goto free_unload; + + /* Set up MODINFO_ATTR fields */ + setup_modinfo(mod, info); + + /* Fix up syms, so that st_value is a pointer to location. */ + err = simplify_symbols(mod, info); + if (err < 0) + goto free_modinfo; + + err = apply_relocations(mod, info); + if (err < 0) + goto free_modinfo; + + err = post_relocation(mod, info); + if (err < 0) + goto free_modinfo; + + flush_module_icache(mod); + + /* Setup CFI for the module. */ + cfi_init(mod); + + /* Now copy in args */ + mod->args = strndup_user(uargs, ~0UL >> 1); + if (IS_ERR(mod->args)) { + err = PTR_ERR(mod->args); + goto free_arch_cleanup; + } + + init_build_id(mod, info); + dynamic_debug_setup(mod, info->debug, info->num_debug); + + /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ + ftrace_module_init(mod); + + /* Finally it's fully formed, ready to start executing. */ + err = complete_formation(mod, info); + if (err) + goto ddebug_cleanup; + + err = prepare_coming_module(mod); + if (err) + goto bug_cleanup; + + /* Module is ready to execute: parsing args may do that. */ + after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, + -32768, 32767, mod, + unknown_module_param_cb); + if (IS_ERR(after_dashes)) { + err = PTR_ERR(after_dashes); + goto coming_cleanup; + } else if (after_dashes) { + pr_warn("%s: parameters '%s' after `--' ignored\n", + mod->name, after_dashes); + } + + /* Link in to sysfs. */ + err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); + if (err < 0) + goto coming_cleanup; + + if (is_livepatch_module(mod)) { + err = copy_module_elf(mod, info); + if (err < 0) + goto sysfs_cleanup; + } + + /* Get rid of temporary copy. */ + free_copy(info, flags); + + /* Done! */ + trace_module_load(mod); + + return do_init_module(mod); + + sysfs_cleanup: + mod_sysfs_teardown(mod); + coming_cleanup: + mod->state = MODULE_STATE_GOING; + destroy_params(mod->kp, mod->num_kp); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + bug_cleanup: + mod->state = MODULE_STATE_GOING; + /* module_bug_cleanup needs module_mutex protection */ + mutex_lock(&module_mutex); + module_bug_cleanup(mod); + mutex_unlock(&module_mutex); + + ddebug_cleanup: + ftrace_release_mod(mod); + dynamic_debug_remove(mod, info->debug); + synchronize_rcu(); + kfree(mod->args); + free_arch_cleanup: + cfi_cleanup(mod); + module_arch_cleanup(mod); + free_modinfo: + free_modinfo(mod); + free_unload: + module_unload_free(mod); + unlink_mod: + mutex_lock(&module_mutex); + /* Unlink carefully: kallsyms could be walking list. */ + list_del_rcu(&mod->list); + mod_tree_remove(mod); + wake_up_all(&module_wq); + /* Wait for RCU-sched synchronizing before releasing mod->list. */ + synchronize_rcu(); + mutex_unlock(&module_mutex); + free_module: + /* Free lock-classes; relies on the preceding sync_rcu() */ + lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); + + module_deallocate(mod, info); + free_copy: + free_copy(info, flags); + return err; +} + +SYSCALL_DEFINE3(init_module, void __user *, umod, + unsigned long, len, const char __user *, uargs) +{ + int err; + struct load_info info = { }; + + err = may_init_module(); + if (err) + return err; + + pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", + umod, len, uargs); + + err = copy_module_from_user(umod, len, &info); + if (err) + return err; + + return load_module(&info, uargs, 0); +} + +SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) +{ + struct load_info info = { }; + void *buf = NULL; + int len; + int err; + + err = may_init_module(); + if (err) + return err; + + pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); + + if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS + |MODULE_INIT_IGNORE_VERMAGIC + |MODULE_INIT_COMPRESSED_FILE)) + return -EINVAL; + + len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL, + READING_MODULE); + if (len < 0) + return len; + + if (flags & MODULE_INIT_COMPRESSED_FILE) { + err = module_decompress(&info, buf, len); + vfree(buf); /* compressed data is no longer needed */ + if (err) + return err; + } else { + info.hdr = buf; + info.len = len; + } + + return load_module(&info, uargs, flags); +} + +static inline int within(unsigned long addr, void *start, unsigned long size) +{ + return ((void *)addr >= start && (void *)addr < start + size); +} + +#ifdef CONFIG_KALLSYMS +/* + * This ignores the intensely annoying "mapping symbols" found + * in ARM ELF files: $a, $t and $d. + */ +static inline int is_arm_mapping_symbol(const char *str) +{ + if (str[0] == '.' && str[1] == 'L') + return true; + return str[0] == '$' && strchr("axtd", str[1]) + && (str[2] == '\0' || str[2] == '.'); +} + +static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) +{ + return kallsyms->strtab + kallsyms->symtab[symnum].st_name; +} + +/* + * Given a module and address, find the corresponding symbol and return its name + * while providing its size and offset if needed. + */ +static const char *find_kallsyms_symbol(struct module *mod, + unsigned long addr, + unsigned long *size, + unsigned long *offset) +{ + unsigned int i, best = 0; + unsigned long nextval, bestval; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) + nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; + else + nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; + + bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); + + /* + * Scan for closest preceding symbol, and next symbol. (ELF + * starts real symbols at 1). + */ + for (i = 1; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + unsigned long thisval = kallsyms_symbol_value(sym); + + if (sym->st_shndx == SHN_UNDEF) + continue; + + /* + * We ignore unnamed symbols: they're uninformative + * and inserted at a whim. + */ + if (*kallsyms_symbol_name(kallsyms, i) == '\0' + || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) + continue; + + if (thisval <= addr && thisval > bestval) { + best = i; + bestval = thisval; + } + if (thisval > addr && thisval < nextval) + nextval = thisval; + } + + if (!best) + return NULL; + + if (size) + *size = nextval - bestval; + if (offset) + *offset = addr - bestval; + + return kallsyms_symbol_name(kallsyms, best); +} + +void * __weak dereference_module_function_descriptor(struct module *mod, + void *ptr) +{ + return ptr; +} + +/* + * For kallsyms to ask for address resolution. NULL means not found. Careful + * not to lock to avoid deadlock on oopses, simply disable preemption. + */ +const char *module_address_lookup(unsigned long addr, + unsigned long *size, + unsigned long *offset, + char **modname, + const unsigned char **modbuildid, + char *namebuf) +{ + const char *ret = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (mod) { + if (modname) + *modname = mod->name; + if (modbuildid) { +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) + *modbuildid = mod->build_id; +#else + *modbuildid = NULL; +#endif + } + + ret = find_kallsyms_symbol(mod, addr, size, offset); + } + /* Make a copy in here where it's safe */ + if (ret) { + strncpy(namebuf, ret, KSYM_NAME_LEN - 1); + ret = namebuf; + } + preempt_enable(); + + return ret; +} + +int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (within_module(addr, mod)) { + const char *sym; + + sym = find_kallsyms_symbol(mod, addr, NULL, NULL); + if (!sym) + goto out; + + strlcpy(symname, sym, KSYM_NAME_LEN); + preempt_enable(); + return 0; + } + } +out: + preempt_enable(); + return -ERANGE; +} + +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, + unsigned long *offset, char *modname, char *name) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (within_module(addr, mod)) { + const char *sym; + + sym = find_kallsyms_symbol(mod, addr, size, offset); + if (!sym) + goto out; + if (modname) + strlcpy(modname, mod->name, MODULE_NAME_LEN); + if (name) + strlcpy(name, sym, KSYM_NAME_LEN); + preempt_enable(); + return 0; + } + } +out: + preempt_enable(); + return -ERANGE; +} + +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + struct mod_kallsyms *kallsyms; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + kallsyms = rcu_dereference_sched(mod->kallsyms); + if (symnum < kallsyms->num_symtab) { + const Elf_Sym *sym = &kallsyms->symtab[symnum]; + + *value = kallsyms_symbol_value(sym); + *type = kallsyms->typetab[symnum]; + strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } + symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +} + +/* Given a module and name of symbol, find and return the symbol's value */ +static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) +{ + unsigned int i; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && + sym->st_shndx != SHN_UNDEF) + return kallsyms_symbol_value(sym); + } + return 0; +} + +/* Look for this name: can be of form module:name. */ +unsigned long module_kallsyms_lookup_name(const char *name) +{ + struct module *mod; + char *colon; + unsigned long ret = 0; + + /* Don't lock: we're in enough trouble already. */ + preempt_disable(); + if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { + if ((mod = find_module_all(name, colon - name, false)) != NULL) + ret = find_kallsyms_symbol_value(mod, colon+1); + } else { + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if ((ret = find_kallsyms_symbol_value(mod, name)) != 0) + break; + } + } + preempt_enable(); + return ret; +} + +#ifdef CONFIG_LIVEPATCH +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data) +{ + struct module *mod; + unsigned int i; + int ret = 0; + + mutex_lock(&module_mutex); + list_for_each_entry(mod, &modules, list) { + /* We hold module_mutex: no need for rcu_dereference_sched */ + struct mod_kallsyms *kallsyms = mod->kallsyms; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (sym->st_shndx == SHN_UNDEF) + continue; + + ret = fn(data, kallsyms_symbol_name(kallsyms, i), + mod, kallsyms_symbol_value(sym)); + if (ret != 0) + goto out; + + cond_resched(); + } + } +out: + mutex_unlock(&module_mutex); + return ret; +} +#endif /* CONFIG_LIVEPATCH */ +#endif /* CONFIG_KALLSYMS */ + +static void cfi_init(struct module *mod) +{ +#ifdef CONFIG_CFI_CLANG + initcall_t *init; + exitcall_t *exit; + + rcu_read_lock_sched(); + mod->cfi_check = (cfi_check_fn) + find_kallsyms_symbol_value(mod, "__cfi_check"); + init = (initcall_t *) + find_kallsyms_symbol_value(mod, "__cfi_jt_init_module"); + exit = (exitcall_t *) + find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module"); + rcu_read_unlock_sched(); + + /* Fix init/exit functions to point to the CFI jump table */ + if (init) + mod->init = *init; +#ifdef CONFIG_MODULE_UNLOAD + if (exit) + mod->exit = *exit; +#endif + + cfi_module_add(mod, module_addr_min); +#endif +} + +static void cfi_cleanup(struct module *mod) +{ +#ifdef CONFIG_CFI_CLANG + cfi_module_remove(mod, module_addr_min); +#endif +} + +/* Maximum number of characters written by module_flags() */ +#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) + +/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ +static char *module_flags(struct module *mod, char *buf) +{ + int bx = 0; + + BUG_ON(mod->state == MODULE_STATE_UNFORMED); + if (mod->taints || + mod->state == MODULE_STATE_GOING || + mod->state == MODULE_STATE_COMING) { + buf[bx++] = '('; + bx += module_flags_taint(mod, buf + bx); + /* Show a - for module-is-being-unloaded */ + if (mod->state == MODULE_STATE_GOING) + buf[bx++] = '-'; + /* Show a + for module-is-being-loaded */ + if (mod->state == MODULE_STATE_COMING) + buf[bx++] = '+'; + buf[bx++] = ')'; + } + buf[bx] = '\0'; + + return buf; +} + +#ifdef CONFIG_PROC_FS +/* Called by the /proc file system to return a list of modules. */ +static void *m_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&module_mutex); + return seq_list_start(&modules, *pos); +} + +static void *m_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &modules, pos); +} + +static void m_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&module_mutex); +} + +static int m_show(struct seq_file *m, void *p) +{ + struct module *mod = list_entry(p, struct module, list); + char buf[MODULE_FLAGS_BUF_SIZE]; + void *value; + + /* We always ignore unformed modules. */ + if (mod->state == MODULE_STATE_UNFORMED) + return 0; + + seq_printf(m, "%s %u", + mod->name, mod->init_layout.size + mod->core_layout.size); + print_unload_info(m, mod); + + /* Informative for users. */ + seq_printf(m, " %s", + mod->state == MODULE_STATE_GOING ? "Unloading" : + mod->state == MODULE_STATE_COMING ? "Loading" : + "Live"); + /* Used by oprofile and other similar tools. */ + value = m->private ? NULL : mod->core_layout.base; + seq_printf(m, " 0x%px", value); + + /* Taints info */ + if (mod->taints) + seq_printf(m, " %s", module_flags(mod, buf)); + + seq_puts(m, "\n"); + return 0; +} + +/* + * Format: modulename size refcount deps address + * + * Where refcount is a number or -, and deps is a comma-separated list + * of depends or -. + */ +static const struct seq_operations modules_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = m_show +}; + +/* + * This also sets the "private" pointer to non-NULL if the + * kernel pointers should be hidden (so you can just test + * "m->private" to see if you should keep the values private). + * + * We use the same logic as for /proc/kallsyms. + */ +static int modules_open(struct inode *inode, struct file *file) +{ + int err = seq_open(file, &modules_op); + + if (!err) { + struct seq_file *m = file->private_data; + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; + } + + return err; +} + +static const struct proc_ops modules_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, + .proc_open = modules_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release, +}; + +static int __init proc_modules_init(void) +{ + proc_create("modules", 0, NULL, &modules_proc_ops); + return 0; +} +module_init(proc_modules_init); +#endif + +/* Given an address, look for it in the module exception tables. */ +const struct exception_table_entry *search_module_extables(unsigned long addr) +{ + const struct exception_table_entry *e = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (!mod) + goto out; + + if (!mod->num_exentries) + goto out; + + e = search_extable(mod->extable, + mod->num_exentries, + addr); +out: + preempt_enable(); + + /* + * Now, if we found one, we are running inside it now, hence + * we cannot unload the module, hence no refcnt needed. + */ + return e; +} + +/** + * is_module_address() - is this address inside a module? + * @addr: the address to check. + * + * See is_module_text_address() if you simply want to see if the address + * is code (not data). + */ +bool is_module_address(unsigned long addr) +{ + bool ret; + + preempt_disable(); + ret = __module_address(addr) != NULL; + preempt_enable(); + + return ret; +} + +/** + * __module_address() - get the module which contains an address. + * @addr: the address. + * + * Must be called with preempt disabled or module mutex held so that + * module doesn't get freed during this. + */ +struct module *__module_address(unsigned long addr) +{ + struct module *mod; + + if (addr < module_addr_min || addr > module_addr_max) + return NULL; + + module_assert_mutex_or_preempt(); + + mod = mod_find(addr); + if (mod) { + BUG_ON(!within_module(addr, mod)); + if (mod->state == MODULE_STATE_UNFORMED) + mod = NULL; + } + return mod; +} + +/** + * is_module_text_address() - is this address inside module code? + * @addr: the address to check. + * + * See is_module_address() if you simply want to see if the address is + * anywhere in a module. See kernel_text_address() for testing if an + * address corresponds to kernel or module code. + */ +bool is_module_text_address(unsigned long addr) +{ + bool ret; + + preempt_disable(); + ret = __module_text_address(addr) != NULL; + preempt_enable(); + + return ret; +} + +/** + * __module_text_address() - get the module whose code contains an address. + * @addr: the address. + * + * Must be called with preempt disabled or module mutex held so that + * module doesn't get freed during this. + */ +struct module *__module_text_address(unsigned long addr) +{ + struct module *mod = __module_address(addr); + if (mod) { + /* Make sure it's within the text section. */ + if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) + && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) + mod = NULL; + } + return mod; +} + +/* Don't grab lock, we're oopsing. */ +void print_modules(void) +{ + struct module *mod; + char buf[MODULE_FLAGS_BUF_SIZE]; + + printk(KERN_DEFAULT "Modules linked in:"); + /* Most callers should already have preempt disabled, but make sure */ + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + pr_cont(" %s%s", mod->name, module_flags(mod, buf)); + } + preempt_enable(); + if (last_unloaded_module[0]) + pr_cont(" [last unloaded: %s]", last_unloaded_module); + pr_cont("\n"); +} + +#ifdef CONFIG_MODVERSIONS +/* + * Generate the signature for all relevant module structures here. + * If these change, we don't want to try to parse the module. + */ +void module_layout(struct module *mod, + struct modversion_info *ver, + struct kernel_param *kp, + struct kernel_symbol *ks, + struct tracepoint * const *tp) +{ +} +EXPORT_SYMBOL(module_layout); +#endif diff --git a/kernel/module/signing.c b/kernel/module/signing.c new file mode 100644 index 000000000000..8aeb6d2ee94b --- /dev/null +++ b/kernel/module/signing.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Module signature checker + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* + * Verify the signature on a module. + */ +int mod_verify_sig(const void *mod, struct load_info *info) +{ + struct module_signature ms; + size_t sig_len, modlen = info->len; + int ret; + + pr_devel("==>%s(,%zu)\n", __func__, modlen); + + if (modlen <= sizeof(ms)) + return -EBADMSG; + + memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); + + ret = mod_check_sig(&ms, modlen, "module"); + if (ret) + return ret; + + sig_len = be32_to_cpu(ms.sig_len); + modlen -= sig_len + sizeof(ms); + info->len = modlen; + + return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, + VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); +} -- cgit v1.2.3-59-g8ed1b From 8ab4ed08a24f88359f22439e37cac65c95cf6ac2 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:32 +0000 Subject: module: Simple refactor in preparation for split No functional change. This patch makes it possible to move non-essential code out of core module code. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 21 +++++++++++++++++++++ kernel/module/main.c | 22 ++-------------------- 2 files changed, 23 insertions(+), 20 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 8c381c99062f..ea8c4c02614c 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -7,6 +7,27 @@ #include #include +#include + +#ifndef ARCH_SHF_SMALL +#define ARCH_SHF_SMALL 0 +#endif + +/* If this is set, the section belongs in the init part of the module */ +#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG - 1)) +/* Maximum number of characters written by module_flags() */ +#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) + +extern struct mutex module_mutex; +extern struct list_head modules; + +/* Provided by the linker */ +extern const struct kernel_symbol __start___ksymtab[]; +extern const struct kernel_symbol __stop___ksymtab[]; +extern const struct kernel_symbol __start___ksymtab_gpl[]; +extern const struct kernel_symbol __stop___ksymtab_gpl[]; +extern const s32 __start___kcrctab[]; +extern const s32 __start___kcrctab_gpl[]; struct load_info { const char *name; diff --git a/kernel/module/main.c b/kernel/module/main.c index 1a17f02f69a0..5898a1af41a9 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -63,10 +63,6 @@ #define CREATE_TRACE_POINTS #include -#ifndef ARCH_SHF_SMALL -#define ARCH_SHF_SMALL 0 -#endif - /* * Modules' sections will be aligned on page boundaries * to ensure complete separation of code and data, but @@ -78,9 +74,6 @@ # define debug_align(X) (X) #endif -/* If this is set, the section belongs in the init part of the module */ -#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) - /* * Mutex protects: * 1) List of modules (also safely readable with preempt_disable), @@ -88,8 +81,8 @@ * 3) module_addr_min/module_addr_max. * (delete and add uses RCU list operations). */ -static DEFINE_MUTEX(module_mutex); -static LIST_HEAD(modules); +DEFINE_MUTEX(module_mutex); +LIST_HEAD(modules); /* Work queue for freeing init sections in success case */ static void do_free_init(struct work_struct *w); @@ -408,14 +401,6 @@ static __maybe_unused void *any_section_objs(const struct load_info *info, return (void *)info->sechdrs[sec].sh_addr; } -/* Provided by the linker */ -extern const struct kernel_symbol __start___ksymtab[]; -extern const struct kernel_symbol __stop___ksymtab[]; -extern const struct kernel_symbol __start___ksymtab_gpl[]; -extern const struct kernel_symbol __stop___ksymtab_gpl[]; -extern const s32 __start___kcrctab[]; -extern const s32 __start___kcrctab_gpl[]; - #ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL #else @@ -4542,9 +4527,6 @@ static void cfi_cleanup(struct module *mod) #endif } -/* Maximum number of characters written by module_flags() */ -#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) - /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ static char *module_flags(struct module *mod, char *buf) { -- cgit v1.2.3-59-g8ed1b From 5aff4dfdb4ae2741cfff759d917f597f2c7f70aa Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:33 +0000 Subject: module: Make internal.h and decompress.c more compliant This patch will address the following warning and style violations generated by ./scripts/checkpatch.pl in strict mode: WARNING: Use #include instead of #10: FILE: kernel/module/internal.h:10: +#include CHECK: spaces preferred around that '-' (ctx:VxV) #18: FILE: kernel/module/internal.h:18: +#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) CHECK: Please use a blank line after function/struct/union/enum declarations #69: FILE: kernel/module/internal.h:69: +} +static inline void module_decompress_cleanup(struct load_info *info) ^ CHECK: extern prototypes should be avoided in .h files #84: FILE: kernel/module/internal.h:84: +extern int mod_verify_sig(const void *mod, struct load_info *info); WARNING: Missing a blank line after declarations #116: FILE: kernel/module/decompress.c:116: + struct page *page = module_get_next_page(info); + if (!page) { WARNING: Missing a blank line after declarations #174: FILE: kernel/module/decompress.c:174: + struct page *page = module_get_next_page(info); + if (!page) { CHECK: Please use a blank line after function/struct/union/enum declarations #258: FILE: kernel/module/decompress.c:258: +} +static struct kobj_attribute module_compression_attr = __ATTR_RO(compression); Note: Fortunately, the multiple-include optimisation found in include/linux/module.h will prevent duplication/or inclusion more than once. Fixes: f314dfea16a0 ("modsign: log module name in the event of an error") Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/decompress.c | 3 +++ kernel/module/internal.h | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c index d14d6443225a..2fc7081dd7c1 100644 --- a/kernel/module/decompress.c +++ b/kernel/module/decompress.c @@ -113,6 +113,7 @@ static ssize_t module_gzip_decompress(struct load_info *info, do { struct page *page = module_get_next_page(info); + if (!page) { retval = -ENOMEM; goto out_inflate_end; @@ -171,6 +172,7 @@ static ssize_t module_xz_decompress(struct load_info *info, do { struct page *page = module_get_next_page(info); + if (!page) { retval = -ENOMEM; goto out; @@ -256,6 +258,7 @@ static ssize_t compression_show(struct kobject *kobj, { return sysfs_emit(buf, "%s\n", __stringify(MODULE_COMPRESSION)); } + static struct kobj_attribute module_compression_attr = __ATTR_RO(compression); static int __init module_decompress_sysfs_init(void) diff --git a/kernel/module/internal.h b/kernel/module/internal.h index ea8c4c02614c..e0775e66bcf7 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -6,7 +6,8 @@ */ #include -#include +#include +#include #include #ifndef ARCH_SHF_SMALL @@ -54,7 +55,7 @@ struct load_info { } index; }; -extern int mod_verify_sig(const void *mod, struct load_info *info); +int mod_verify_sig(const void *mod, struct load_info *info); #ifdef CONFIG_MODULE_DECOMPRESS int module_decompress(struct load_info *info, const void *buf, size_t size); @@ -65,6 +66,7 @@ static inline int module_decompress(struct load_info *info, { return -EOPNOTSUPP; } + static inline void module_decompress_cleanup(struct load_info *info) { } -- cgit v1.2.3-59-g8ed1b From 1be9473e31ab87ad1b6ecf9fd11df461930ede85 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:34 +0000 Subject: module: Move livepatch support to a separate file No functional change. This patch migrates livepatch support (i.e. used during module add/or load and remove/or deletion) from core module code into kernel/module/livepatch.c. At the moment it contains code to persist Elf information about a given livepatch module, only. The new file was added to MAINTAINERS. Reviewed-by: Petr Mladek Tested-by: Petr Mladek Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- MAINTAINERS | 1 + include/linux/module.h | 9 ++-- kernel/module/Makefile | 1 + kernel/module/internal.h | 22 ++++++++++ kernel/module/livepatch.c | 74 +++++++++++++++++++++++++++++++++ kernel/module/main.c | 102 +++++----------------------------------------- 6 files changed, 111 insertions(+), 98 deletions(-) create mode 100644 kernel/module/livepatch.c (limited to 'kernel/module') diff --git a/MAINTAINERS b/MAINTAINERS index 5e7778cd437f..6dcd93fb3a96 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11360,6 +11360,7 @@ F: arch/s390/include/asm/livepatch.h F: arch/x86/include/asm/livepatch.h F: include/linux/livepatch.h F: kernel/livepatch/ +F: kernel/module/livepatch.c F: lib/livepatch/ F: samples/livepatch/ F: tools/testing/selftests/livepatch/ diff --git a/include/linux/module.h b/include/linux/module.h index 1e135fd5c076..7ec9715de7dc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -663,17 +663,14 @@ static inline bool module_requested_async_probing(struct module *module) return module && module->async_probe_requested; } -#ifdef CONFIG_LIVEPATCH static inline bool is_livepatch_module(struct module *mod) { +#ifdef CONFIG_LIVEPATCH return mod->klp; -} -#else /* !CONFIG_LIVEPATCH */ -static inline bool is_livepatch_module(struct module *mod) -{ +#else return false; +#endif } -#endif /* CONFIG_LIVEPATCH */ bool is_module_sig_enforced(void); void set_module_sig_enforced(void); diff --git a/kernel/module/Makefile b/kernel/module/Makefile index cdd5c61b8c7f..ed3aacb04f17 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -10,3 +10,4 @@ KCOV_INSTRUMENT_module.o := n obj-y += main.o obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o obj-$(CONFIG_MODULE_SIG) += signing.o +obj-$(CONFIG_LIVEPATCH) += livepatch.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index e0775e66bcf7..ad7a444253ed 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -57,6 +57,28 @@ struct load_info { int mod_verify_sig(const void *mod, struct load_info *info); +#ifdef CONFIG_LIVEPATCH +int copy_module_elf(struct module *mod, struct load_info *info); +void free_module_elf(struct module *mod); +#else /* !CONFIG_LIVEPATCH */ +static inline int copy_module_elf(struct module *mod, struct load_info *info) +{ + return 0; +} + +static inline void free_module_elf(struct module *mod) { } +#endif /* CONFIG_LIVEPATCH */ + +static inline bool set_livepatch_module(struct module *mod) +{ +#ifdef CONFIG_LIVEPATCH + mod->klp = true; + return true; +#else + return false; +#endif +} + #ifdef CONFIG_MODULE_DECOMPRESS int module_decompress(struct load_info *info, const void *buf, size_t size); void module_decompress_cleanup(struct load_info *info); diff --git a/kernel/module/livepatch.c b/kernel/module/livepatch.c new file mode 100644 index 000000000000..486d4ff92719 --- /dev/null +++ b/kernel/module/livepatch.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module livepatch support + * + * Copyright (C) 2016 Jessica Yu + */ + +#include +#include +#include +#include "internal.h" + +/* + * Persist Elf information about a module. Copy the Elf header, + * section header table, section string table, and symtab section + * index from info to mod->klp_info. + */ +int copy_module_elf(struct module *mod, struct load_info *info) +{ + unsigned int size, symndx; + int ret; + + size = sizeof(*mod->klp_info); + mod->klp_info = kmalloc(size, GFP_KERNEL); + if (!mod->klp_info) + return -ENOMEM; + + /* Elf header */ + size = sizeof(mod->klp_info->hdr); + memcpy(&mod->klp_info->hdr, info->hdr, size); + + /* Elf section header table */ + size = sizeof(*info->sechdrs) * info->hdr->e_shnum; + mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); + if (!mod->klp_info->sechdrs) { + ret = -ENOMEM; + goto free_info; + } + + /* Elf section name string table */ + size = info->sechdrs[info->hdr->e_shstrndx].sh_size; + mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); + if (!mod->klp_info->secstrings) { + ret = -ENOMEM; + goto free_sechdrs; + } + + /* Elf symbol section index */ + symndx = info->index.sym; + mod->klp_info->symndx = symndx; + + /* + * For livepatch modules, core_kallsyms.symtab is a complete + * copy of the original symbol table. Adjust sh_addr to point + * to core_kallsyms.symtab since the copy of the symtab in module + * init memory is freed at the end of do_init_module(). + */ + mod->klp_info->sechdrs[symndx].sh_addr = (unsigned long)mod->core_kallsyms.symtab; + + return 0; + +free_sechdrs: + kfree(mod->klp_info->sechdrs); +free_info: + kfree(mod->klp_info); + return ret; +} + +void free_module_elf(struct module *mod) +{ + kfree(mod->klp_info->sechdrs); + kfree(mod->klp_info->secstrings); + kfree(mod->klp_info); +} diff --git a/kernel/module/main.c b/kernel/module/main.c index 5898a1af41a9..915143827069 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2043,81 +2043,6 @@ static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, } #endif /* CONFIG_STRICT_MODULE_RWX */ -#ifdef CONFIG_LIVEPATCH -/* - * Persist Elf information about a module. Copy the Elf header, - * section header table, section string table, and symtab section - * index from info to mod->klp_info. - */ -static int copy_module_elf(struct module *mod, struct load_info *info) -{ - unsigned int size, symndx; - int ret; - - size = sizeof(*mod->klp_info); - mod->klp_info = kmalloc(size, GFP_KERNEL); - if (mod->klp_info == NULL) - return -ENOMEM; - - /* Elf header */ - size = sizeof(mod->klp_info->hdr); - memcpy(&mod->klp_info->hdr, info->hdr, size); - - /* Elf section header table */ - size = sizeof(*info->sechdrs) * info->hdr->e_shnum; - mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); - if (mod->klp_info->sechdrs == NULL) { - ret = -ENOMEM; - goto free_info; - } - - /* Elf section name string table */ - size = info->sechdrs[info->hdr->e_shstrndx].sh_size; - mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); - if (mod->klp_info->secstrings == NULL) { - ret = -ENOMEM; - goto free_sechdrs; - } - - /* Elf symbol section index */ - symndx = info->index.sym; - mod->klp_info->symndx = symndx; - - /* - * For livepatch modules, core_kallsyms.symtab is a complete - * copy of the original symbol table. Adjust sh_addr to point - * to core_kallsyms.symtab since the copy of the symtab in module - * init memory is freed at the end of do_init_module(). - */ - mod->klp_info->sechdrs[symndx].sh_addr = \ - (unsigned long) mod->core_kallsyms.symtab; - - return 0; - -free_sechdrs: - kfree(mod->klp_info->sechdrs); -free_info: - kfree(mod->klp_info); - return ret; -} - -static void free_module_elf(struct module *mod) -{ - kfree(mod->klp_info->sechdrs); - kfree(mod->klp_info->secstrings); - kfree(mod->klp_info); -} -#else /* !CONFIG_LIVEPATCH */ -static int copy_module_elf(struct module *mod, struct load_info *info) -{ - return 0; -} - -static void free_module_elf(struct module *mod) -{ -} -#endif /* CONFIG_LIVEPATCH */ - void __weak module_memfree(void *module_region) { /* @@ -3092,30 +3017,23 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l return 0; } -#ifdef CONFIG_LIVEPATCH static int check_modinfo_livepatch(struct module *mod, struct load_info *info) { - if (get_modinfo(info, "livepatch")) { - mod->klp = true; + if (!get_modinfo(info, "livepatch")) + /* Nothing more to do */ + return 0; + + if (set_livepatch_module(mod)) { add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", - mod->name); - } - - return 0; -} -#else /* !CONFIG_LIVEPATCH */ -static int check_modinfo_livepatch(struct module *mod, struct load_info *info) -{ - if (get_modinfo(info, "livepatch")) { - pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", - mod->name); - return -ENOEXEC; + mod->name); + return 0; } - return 0; + pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", + mod->name); + return -ENOEXEC; } -#endif /* CONFIG_LIVEPATCH */ static void check_modinfo_retpoline(struct module *mod, struct load_info *info) { -- cgit v1.2.3-59-g8ed1b From 58d208de3e8d87dbe196caf0b57cc58c7a3836ca Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:35 +0000 Subject: module: Move latched RB-tree support to a separate file No functional change. This patch migrates module latched RB-tree support (e.g. see __module_address()) from core module code into kernel/module/tree_lookup.c. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/internal.h | 33 +++++++++++ kernel/module/main.c | 130 ++------------------------------------------ kernel/module/tree_lookup.c | 109 +++++++++++++++++++++++++++++++++++++ 4 files changed, 147 insertions(+), 126 deletions(-) create mode 100644 kernel/module/tree_lookup.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index ed3aacb04f17..88774e386276 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -11,3 +11,4 @@ obj-y += main.o obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o obj-$(CONFIG_MODULE_SIG) += signing.o obj-$(CONFIG_LIVEPATCH) += livepatch.o +obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index ad7a444253ed..f1682e3677be 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -9,6 +9,7 @@ #include #include #include +#include #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 @@ -93,3 +94,35 @@ static inline void module_decompress_cleanup(struct load_info *info) { } #endif + +#ifdef CONFIG_MODULES_TREE_LOOKUP +struct mod_tree_root { + struct latch_tree_root root; + unsigned long addr_min; + unsigned long addr_max; +}; + +extern struct mod_tree_root mod_tree; + +void mod_tree_insert(struct module *mod); +void mod_tree_remove_init(struct module *mod); +void mod_tree_remove(struct module *mod); +struct module *mod_find(unsigned long addr); +#else /* !CONFIG_MODULES_TREE_LOOKUP */ + +static inline void mod_tree_insert(struct module *mod) { } +static inline void mod_tree_remove_init(struct module *mod) { } +static inline void mod_tree_remove(struct module *mod) { } +static inline struct module *mod_find(unsigned long addr) +{ + struct module *mod; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + if (within_module(addr, mod)) + return mod; + } + + return NULL; +} +#endif /* CONFIG_MODULES_TREE_LOOKUP */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 915143827069..0d0fdd82359b 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -90,138 +90,16 @@ static DECLARE_WORK(init_free_wq, do_free_init); static LLIST_HEAD(init_free_list); #ifdef CONFIG_MODULES_TREE_LOOKUP - -/* - * Use a latched RB-tree for __module_address(); this allows us to use - * RCU-sched lookups of the address from any context. - * - * This is conditional on PERF_EVENTS || TRACING because those can really hit - * __module_address() hard by doing a lot of stack unwinding; potentially from - * NMI context. - */ - -static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) -{ - struct module_layout *layout = container_of(n, struct module_layout, mtn.node); - - return (unsigned long)layout->base; -} - -static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) -{ - struct module_layout *layout = container_of(n, struct module_layout, mtn.node); - - return (unsigned long)layout->size; -} - -static __always_inline bool -mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) -{ - return __mod_tree_val(a) < __mod_tree_val(b); -} - -static __always_inline int -mod_tree_comp(void *key, struct latch_tree_node *n) -{ - unsigned long val = (unsigned long)key; - unsigned long start, end; - - start = __mod_tree_val(n); - if (val < start) - return -1; - - end = start + __mod_tree_size(n); - if (val >= end) - return 1; - - return 0; -} - -static const struct latch_tree_ops mod_tree_ops = { - .less = mod_tree_less, - .comp = mod_tree_comp, -}; - -static struct mod_tree_root { - struct latch_tree_root root; - unsigned long addr_min; - unsigned long addr_max; -} mod_tree __cacheline_aligned = { +struct mod_tree_root mod_tree __cacheline_aligned = { .addr_min = -1UL, }; #define module_addr_min mod_tree.addr_min #define module_addr_max mod_tree.addr_max -static noinline void __mod_tree_insert(struct mod_tree_node *node) -{ - latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); -} - -static void __mod_tree_remove(struct mod_tree_node *node) -{ - latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); -} - -/* - * These modifications: insert, remove_init and remove; are serialized by the - * module_mutex. - */ -static void mod_tree_insert(struct module *mod) -{ - mod->core_layout.mtn.mod = mod; - mod->init_layout.mtn.mod = mod; - - __mod_tree_insert(&mod->core_layout.mtn); - if (mod->init_layout.size) - __mod_tree_insert(&mod->init_layout.mtn); -} - -static void mod_tree_remove_init(struct module *mod) -{ - if (mod->init_layout.size) - __mod_tree_remove(&mod->init_layout.mtn); -} - -static void mod_tree_remove(struct module *mod) -{ - __mod_tree_remove(&mod->core_layout.mtn); - mod_tree_remove_init(mod); -} - -static struct module *mod_find(unsigned long addr) -{ - struct latch_tree_node *ltn; - - ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); - if (!ltn) - return NULL; - - return container_of(ltn, struct mod_tree_node, node)->mod; -} - -#else /* MODULES_TREE_LOOKUP */ - -static unsigned long module_addr_min = -1UL, module_addr_max = 0; - -static void mod_tree_insert(struct module *mod) { } -static void mod_tree_remove_init(struct module *mod) { } -static void mod_tree_remove(struct module *mod) { } - -static struct module *mod_find(unsigned long addr) -{ - struct module *mod; - - list_for_each_entry_rcu(mod, &modules, list, - lockdep_is_held(&module_mutex)) { - if (within_module(addr, mod)) - return mod; - } - - return NULL; -} - -#endif /* MODULES_TREE_LOOKUP */ +#else /* !CONFIG_MODULES_TREE_LOOKUP */ +static unsigned long module_addr_min = -1UL, module_addr_max; +#endif /* CONFIG_MODULES_TREE_LOOKUP */ /* * Bounds of module text, for speeding up __module_address. diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c new file mode 100644 index 000000000000..0bc4ec3b22ce --- /dev/null +++ b/kernel/module/tree_lookup.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Modules tree lookup + * + * Copyright (C) 2015 Peter Zijlstra + * Copyright (C) 2015 Rusty Russell + */ + +#include +#include +#include "internal.h" + +/* + * Use a latched RB-tree for __module_address(); this allows us to use + * RCU-sched lookups of the address from any context. + * + * This is conditional on PERF_EVENTS || TRACING because those can really hit + * __module_address() hard by doing a lot of stack unwinding; potentially from + * NMI context. + */ + +static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) +{ + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); + + return (unsigned long)layout->base; +} + +static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) +{ + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); + + return (unsigned long)layout->size; +} + +static __always_inline bool +mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) +{ + return __mod_tree_val(a) < __mod_tree_val(b); +} + +static __always_inline int +mod_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long start, end; + + start = __mod_tree_val(n); + if (val < start) + return -1; + + end = start + __mod_tree_size(n); + if (val >= end) + return 1; + + return 0; +} + +static const struct latch_tree_ops mod_tree_ops = { + .less = mod_tree_less, + .comp = mod_tree_comp, +}; + +static noinline void __mod_tree_insert(struct mod_tree_node *node) +{ + latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); +} + +static void __mod_tree_remove(struct mod_tree_node *node) +{ + latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); +} + +/* + * These modifications: insert, remove_init and remove; are serialized by the + * module_mutex. + */ +void mod_tree_insert(struct module *mod) +{ + mod->core_layout.mtn.mod = mod; + mod->init_layout.mtn.mod = mod; + + __mod_tree_insert(&mod->core_layout.mtn); + if (mod->init_layout.size) + __mod_tree_insert(&mod->init_layout.mtn); +} + +void mod_tree_remove_init(struct module *mod) +{ + if (mod->init_layout.size) + __mod_tree_remove(&mod->init_layout.mtn); +} + +void mod_tree_remove(struct module *mod) +{ + __mod_tree_remove(&mod->core_layout.mtn); + mod_tree_remove_init(mod); +} + +struct module *mod_find(unsigned long addr) +{ + struct latch_tree_node *ltn; + + ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); + if (!ltn) + return NULL; + + return container_of(ltn, struct mod_tree_node, node)->mod; +} -- cgit v1.2.3-59-g8ed1b From b33465fe9c52a3719f013deeca261bd82af235ee Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:36 +0000 Subject: module: Move strict rwx support to a separate file No functional change. This patch migrates code that makes module text and rodata memory read-only and non-text memory non-executable from core module code into kernel/module/strict_rwx.c. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/internal.h | 32 +++++++++++++++ kernel/module/main.c | 99 +--------------------------------------------- kernel/module/strict_rwx.c | 85 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 120 insertions(+), 97 deletions(-) create mode 100644 kernel/module/strict_rwx.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index 88774e386276..d313c8472cb3 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o obj-$(CONFIG_MODULE_SIG) += signing.o obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o +obj-$(CONFIG_STRICT_MODULE_RWX) += strict_rwx.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index f1682e3677be..a6895bb5598a 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -20,6 +20,17 @@ /* Maximum number of characters written by module_flags() */ #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) +/* + * Modules' sections will be aligned on page boundaries + * to ensure complete separation of code and data, but + * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y + */ +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX +# define debug_align(X) PAGE_ALIGN(X) +#else +# define debug_align(X) (X) +#endif + extern struct mutex module_mutex; extern struct list_head modules; @@ -126,3 +137,24 @@ static inline struct module *mod_find(unsigned long addr) return NULL; } #endif /* CONFIG_MODULES_TREE_LOOKUP */ + +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX +void frob_text(const struct module_layout *layout, int (*set_memory)(unsigned long start, + int num_pages)); +#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ + +#ifdef CONFIG_STRICT_MODULE_RWX +void module_enable_ro(const struct module *mod, bool after_init); +void module_enable_nx(const struct module *mod); +int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod); + +#else /* !CONFIG_STRICT_MODULE_RWX */ +static inline void module_enable_nx(const struct module *mod) { } +static inline void module_enable_ro(const struct module *mod, bool after_init) {} +static inline int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + return 0; +} +#endif /* CONFIG_STRICT_MODULE_RWX */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 0d0fdd82359b..d55a2a8338a1 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -63,17 +63,6 @@ #define CREATE_TRACE_POINTS #include -/* - * Modules' sections will be aligned on page boundaries - * to ensure complete separation of code and data, but - * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y - */ -#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX -# define debug_align(X) ALIGN(X, PAGE_SIZE) -#else -# define debug_align(X) (X) -#endif - /* * Mutex protects: * 1) List of modules (also safely readable with preempt_disable), @@ -1819,8 +1808,8 @@ static void mod_sysfs_teardown(struct module *mod) * whether we are strict. */ #ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX -static void frob_text(const struct module_layout *layout, - int (*set_memory)(unsigned long start, int num_pages)) +void frob_text(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) { BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); @@ -1837,90 +1826,6 @@ static void module_enable_x(const struct module *mod) static void module_enable_x(const struct module *mod) { } #endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ -#ifdef CONFIG_STRICT_MODULE_RWX -static void frob_rodata(const struct module_layout *layout, - int (*set_memory)(unsigned long start, int num_pages)) -{ - BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); - set_memory((unsigned long)layout->base + layout->text_size, - (layout->ro_size - layout->text_size) >> PAGE_SHIFT); -} - -static void frob_ro_after_init(const struct module_layout *layout, - int (*set_memory)(unsigned long start, int num_pages)) -{ - BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); - set_memory((unsigned long)layout->base + layout->ro_size, - (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); -} - -static void frob_writable_data(const struct module_layout *layout, - int (*set_memory)(unsigned long start, int num_pages)) -{ - BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); - set_memory((unsigned long)layout->base + layout->ro_after_init_size, - (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); -} - -static void module_enable_ro(const struct module *mod, bool after_init) -{ - if (!rodata_enabled) - return; - - set_vm_flush_reset_perms(mod->core_layout.base); - set_vm_flush_reset_perms(mod->init_layout.base); - frob_text(&mod->core_layout, set_memory_ro); - - frob_rodata(&mod->core_layout, set_memory_ro); - frob_text(&mod->init_layout, set_memory_ro); - frob_rodata(&mod->init_layout, set_memory_ro); - - if (after_init) - frob_ro_after_init(&mod->core_layout, set_memory_ro); -} - -static void module_enable_nx(const struct module *mod) -{ - frob_rodata(&mod->core_layout, set_memory_nx); - frob_ro_after_init(&mod->core_layout, set_memory_nx); - frob_writable_data(&mod->core_layout, set_memory_nx); - frob_rodata(&mod->init_layout, set_memory_nx); - frob_writable_data(&mod->init_layout, set_memory_nx); -} - -static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *mod) -{ - const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR; - int i; - - for (i = 0; i < hdr->e_shnum; i++) { - if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { - pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", - mod->name, secstrings + sechdrs[i].sh_name, i); - return -ENOEXEC; - } - } - - return 0; -} - -#else /* !CONFIG_STRICT_MODULE_RWX */ -static void module_enable_nx(const struct module *mod) { } -static void module_enable_ro(const struct module *mod, bool after_init) {} -static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *mod) -{ - return 0; -} -#endif /* CONFIG_STRICT_MODULE_RWX */ - void __weak module_memfree(void *module_region) { /* diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c new file mode 100644 index 000000000000..7949dfd449c2 --- /dev/null +++ b/kernel/module/strict_rwx.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module strict rwx + * + * Copyright (C) 2015 Rusty Russell + */ + +#include +#include +#include +#include +#include "internal.h" + +static void frob_rodata(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON(!PAGE_ALIGNED(layout->base)); + BUG_ON(!PAGE_ALIGNED(layout->text_size)); + BUG_ON(!PAGE_ALIGNED(layout->ro_size)); + set_memory((unsigned long)layout->base + layout->text_size, + (layout->ro_size - layout->text_size) >> PAGE_SHIFT); +} + +static void frob_ro_after_init(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON(!PAGE_ALIGNED(layout->base)); + BUG_ON(!PAGE_ALIGNED(layout->ro_size)); + BUG_ON(!PAGE_ALIGNED(layout->ro_after_init_size)); + set_memory((unsigned long)layout->base + layout->ro_size, + (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); +} + +static void frob_writable_data(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + BUG_ON(!PAGE_ALIGNED(layout->base)); + BUG_ON(!PAGE_ALIGNED(layout->ro_after_init_size)); + BUG_ON(!PAGE_ALIGNED(layout->size)); + set_memory((unsigned long)layout->base + layout->ro_after_init_size, + (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); +} + +void module_enable_ro(const struct module *mod, bool after_init) +{ + if (!rodata_enabled) + return; + + set_vm_flush_reset_perms(mod->core_layout.base); + set_vm_flush_reset_perms(mod->init_layout.base); + frob_text(&mod->core_layout, set_memory_ro); + + frob_rodata(&mod->core_layout, set_memory_ro); + frob_text(&mod->init_layout, set_memory_ro); + frob_rodata(&mod->init_layout, set_memory_ro); + + if (after_init) + frob_ro_after_init(&mod->core_layout, set_memory_ro); +} + +void module_enable_nx(const struct module *mod) +{ + frob_rodata(&mod->core_layout, set_memory_nx); + frob_ro_after_init(&mod->core_layout, set_memory_nx); + frob_writable_data(&mod->core_layout, set_memory_nx); + frob_rodata(&mod->init_layout, set_memory_nx); + frob_writable_data(&mod->init_layout, set_memory_nx); +} + +int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + const unsigned long shf_wx = SHF_WRITE | SHF_EXECINSTR; + int i; + + for (i = 0; i < hdr->e_shnum; i++) { + if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { + pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", + mod->name, secstrings + sechdrs[i].sh_name, i); + return -ENOEXEC; + } + } + + return 0; +} -- cgit v1.2.3-59-g8ed1b From 0c1e42805c25c87eb7a6f3b18bdbf3b3b7840aff Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:37 +0000 Subject: module: Move extra signature support out of core code No functional change. This patch migrates additional module signature check code from core module code into kernel/module/signing.c. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- include/linux/module.h | 12 ++++--- kernel/module/internal.h | 9 +++++ kernel/module/main.c | 87 ------------------------------------------------ kernel/module/signing.c | 77 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 92 deletions(-) (limited to 'kernel/module') diff --git a/include/linux/module.h b/include/linux/module.h index 7ec9715de7dc..5e2059f3afc7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -672,7 +672,6 @@ static inline bool is_livepatch_module(struct module *mod) #endif } -bool is_module_sig_enforced(void); void set_module_sig_enforced(void); #else /* !CONFIG_MODULES... */ @@ -799,10 +798,6 @@ static inline bool module_requested_async_probing(struct module *module) return false; } -static inline bool is_module_sig_enforced(void) -{ - return false; -} static inline void set_module_sig_enforced(void) { @@ -854,11 +849,18 @@ static inline bool retpoline_module_ok(bool has_retpoline) #endif #ifdef CONFIG_MODULE_SIG +bool is_module_sig_enforced(void); + static inline bool module_sig_ok(struct module *module) { return module->sig_ok; } #else /* !CONFIG_MODULE_SIG */ +static inline bool is_module_sig_enforced(void) +{ + return false; +} + static inline bool module_sig_ok(struct module *module) { return true; diff --git a/kernel/module/internal.h b/kernel/module/internal.h index a6895bb5598a..d6f646a5da41 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -158,3 +158,12 @@ static inline int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, return 0; } #endif /* CONFIG_STRICT_MODULE_RWX */ + +#ifdef CONFIG_MODULE_SIG +int module_sig_check(struct load_info *info, int flags); +#else /* !CONFIG_MODULE_SIG */ +static inline int module_sig_check(struct load_info *info, int flags) +{ + return 0; +} +#endif /* !CONFIG_MODULE_SIG */ diff --git a/kernel/module/main.c b/kernel/module/main.c index d55a2a8338a1..c349c91e53fa 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -127,28 +126,6 @@ static void module_assert_mutex_or_preempt(void) #endif } -#ifdef CONFIG_MODULE_SIG -static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); -module_param(sig_enforce, bool_enable_only, 0644); - -void set_module_sig_enforced(void) -{ - sig_enforce = true; -} -#else -#define sig_enforce false -#endif - -/* - * Export sig_enforce kernel cmdline parameter to allow other subsystems rely - * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. - */ -bool is_module_sig_enforced(void) -{ - return sig_enforce; -} -EXPORT_SYMBOL(is_module_sig_enforced); - /* Block module loading/unloading? */ int modules_disabled = 0; core_param(nomodule, modules_disabled, bint, 0); @@ -2569,70 +2546,6 @@ static inline void kmemleak_load_module(const struct module *mod, } #endif -#ifdef CONFIG_MODULE_SIG -static int module_sig_check(struct load_info *info, int flags) -{ - int err = -ENODATA; - const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; - const char *reason; - const void *mod = info->hdr; - bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | - MODULE_INIT_IGNORE_VERMAGIC); - /* - * Do not allow mangled modules as a module with version information - * removed is no longer the module that was signed. - */ - if (!mangled_module && - info->len > markerlen && - memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { - /* We truncate the module to discard the signature */ - info->len -= markerlen; - err = mod_verify_sig(mod, info); - if (!err) { - info->sig_ok = true; - return 0; - } - } - - /* - * We don't permit modules to be loaded into the trusted kernels - * without a valid signature on them, but if we're not enforcing, - * certain errors are non-fatal. - */ - switch (err) { - case -ENODATA: - reason = "unsigned module"; - break; - case -ENOPKG: - reason = "module with unsupported crypto"; - break; - case -ENOKEY: - reason = "module with unavailable key"; - break; - - default: - /* - * All other errors are fatal, including lack of memory, - * unparseable signatures, and signature check failures -- - * even if signatures aren't required. - */ - return err; - } - - if (is_module_sig_enforced()) { - pr_notice("Loading of %s is rejected\n", reason); - return -EKEYREJECTED; - } - - return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); -} -#else /* !CONFIG_MODULE_SIG */ -static int module_sig_check(struct load_info *info, int flags) -{ - return 0; -} -#endif /* !CONFIG_MODULE_SIG */ - static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) { #if defined(CONFIG_64BIT) diff --git a/kernel/module/signing.c b/kernel/module/signing.c index 8aeb6d2ee94b..85c8999dfecf 100644 --- a/kernel/module/signing.c +++ b/kernel/module/signing.c @@ -11,9 +11,29 @@ #include #include #include +#include #include +#include #include "internal.h" +static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); +module_param(sig_enforce, bool_enable_only, 0644); + +/* + * Export sig_enforce kernel cmdline parameter to allow other subsystems rely + * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. + */ +bool is_module_sig_enforced(void) +{ + return sig_enforce; +} +EXPORT_SYMBOL(is_module_sig_enforced); + +void set_module_sig_enforced(void) +{ + sig_enforce = true; +} + /* * Verify the signature on a module. */ @@ -43,3 +63,60 @@ int mod_verify_sig(const void *mod, struct load_info *info) VERIFYING_MODULE_SIGNATURE, NULL, NULL); } + +int module_sig_check(struct load_info *info, int flags) +{ + int err = -ENODATA; + const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; + const char *reason; + const void *mod = info->hdr; + bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | + MODULE_INIT_IGNORE_VERMAGIC); + /* + * Do not allow mangled modules as a module with version information + * removed is no longer the module that was signed. + */ + if (!mangled_module && + info->len > markerlen && + memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { + /* We truncate the module to discard the signature */ + info->len -= markerlen; + err = mod_verify_sig(mod, info); + if (!err) { + info->sig_ok = true; + return 0; + } + } + + /* + * We don't permit modules to be loaded into the trusted kernels + * without a valid signature on them, but if we're not enforcing, + * certain errors are non-fatal. + */ + switch (err) { + case -ENODATA: + reason = "unsigned module"; + break; + case -ENOPKG: + reason = "module with unsupported crypto"; + break; + case -ENOKEY: + reason = "module with unavailable key"; + break; + + default: + /* + * All other errors are fatal, including lack of memory, + * unparseable signatures, and signature check failures -- + * even if signatures aren't required. + */ + return err; + } + + if (is_module_sig_enforced()) { + pr_notice("Loading of %s is rejected\n", reason); + return -EKEYREJECTED; + } + + return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); +} -- cgit v1.2.3-59-g8ed1b From 473c84d1856e83faebf059a52a8e49bdb89026d3 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:38 +0000 Subject: module: Move kmemleak support to a separate file No functional change. This patch migrates kmemleak code out of core module code into kernel/module/debug_kmemleak.c Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/debug_kmemleak.c | 30 ++++++++++++++++++++++++++++++ kernel/module/internal.h | 7 +++++++ kernel/module/main.c | 27 --------------------------- 4 files changed, 38 insertions(+), 27 deletions(-) create mode 100644 kernel/module/debug_kmemleak.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index d313c8472cb3..12388627725c 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_MODULE_SIG) += signing.o obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o obj-$(CONFIG_STRICT_MODULE_RWX) += strict_rwx.o +obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o diff --git a/kernel/module/debug_kmemleak.c b/kernel/module/debug_kmemleak.c new file mode 100644 index 000000000000..12a569d361e8 --- /dev/null +++ b/kernel/module/debug_kmemleak.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module kmemleak support + * + * Copyright (C) 2009 Catalin Marinas + */ + +#include +#include +#include "internal.h" + +void kmemleak_load_module(const struct module *mod, + const struct load_info *info) +{ + unsigned int i; + + /* only scan the sections containing data */ + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); + + for (i = 1; i < info->hdr->e_shnum; i++) { + /* Scan all writable sections that's not executable */ + if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || + !(info->sechdrs[i].sh_flags & SHF_WRITE) || + (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) + continue; + + kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, + info->sechdrs[i].sh_size, GFP_KERNEL); + } +} diff --git a/kernel/module/internal.h b/kernel/module/internal.h index d6f646a5da41..b0c360839f63 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -167,3 +167,10 @@ static inline int module_sig_check(struct load_info *info, int flags) return 0; } #endif /* !CONFIG_MODULE_SIG */ + +#ifdef CONFIG_DEBUG_KMEMLEAK +void kmemleak_load_module(const struct module *mod, const struct load_info *info); +#else /* !CONFIG_DEBUG_KMEMLEAK */ +static inline void kmemleak_load_module(const struct module *mod, + const struct load_info *info) { } +#endif /* CONFIG_DEBUG_KMEMLEAK */ diff --git a/kernel/module/main.c b/kernel/module/main.c index c349c91e53fa..d400c477c35a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2519,33 +2519,6 @@ bool __weak module_exit_section(const char *name) return strstarts(name, ".exit"); } -#ifdef CONFIG_DEBUG_KMEMLEAK -static void kmemleak_load_module(const struct module *mod, - const struct load_info *info) -{ - unsigned int i; - - /* only scan the sections containing data */ - kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); - - for (i = 1; i < info->hdr->e_shnum; i++) { - /* Scan all writable sections that's not executable */ - if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || - !(info->sechdrs[i].sh_flags & SHF_WRITE) || - (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) - continue; - - kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, - info->sechdrs[i].sh_size, GFP_KERNEL); - } -} -#else -static inline void kmemleak_load_module(const struct module *mod, - const struct load_info *info) -{ -} -#endif - static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) { #if defined(CONFIG_64BIT) -- cgit v1.2.3-59-g8ed1b From 91fb02f31505dc22262b13a129550f470ab90a79 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:39 +0000 Subject: module: Move kallsyms support into a separate file No functional change. This patch migrates kallsyms code out of core module code kernel/module/kallsyms.c Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/internal.h | 29 +++ kernel/module/kallsyms.c | 502 ++++++++++++++++++++++++++++++++++++++++++++ kernel/module/main.c | 531 +---------------------------------------------- 4 files changed, 538 insertions(+), 525 deletions(-) create mode 100644 kernel/module/kallsyms.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index 12388627725c..9901bed3ab5b 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o obj-$(CONFIG_STRICT_MODULE_RWX) += strict_rwx.o obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o +obj-$(CONFIG_KALLSYMS) += kallsyms.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index b0c360839f63..44ca05b9eb8f 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -68,6 +68,19 @@ struct load_info { }; int mod_verify_sig(const void *mod, struct load_info *info); +struct module *find_module_all(const char *name, size_t len, bool even_unformed); +int cmp_name(const void *name, const void *sym); +long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, + unsigned int section); + +static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) +{ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + return (unsigned long)offset_to_ptr(&sym->value_offset); +#else + return sym->value; +#endif +} #ifdef CONFIG_LIVEPATCH int copy_module_elf(struct module *mod, struct load_info *info); @@ -174,3 +187,19 @@ void kmemleak_load_module(const struct module *mod, const struct load_info *info static inline void kmemleak_load_module(const struct module *mod, const struct load_info *info) { } #endif /* CONFIG_DEBUG_KMEMLEAK */ + +#ifdef CONFIG_KALLSYMS +void init_build_id(struct module *mod, const struct load_info *info); +void layout_symtab(struct module *mod, struct load_info *info); +void add_kallsyms(struct module *mod, const struct load_info *info); +unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name); + +static inline bool sect_empty(const Elf_Shdr *sect) +{ + return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; +} +#else /* !CONFIG_KALLSYMS */ +static inline void init_build_id(struct module *mod, const struct load_info *info) { } +static inline void layout_symtab(struct module *mod, struct load_info *info) { } +static inline void add_kallsyms(struct module *mod, const struct load_info *info) { } +#endif /* CONFIG_KALLSYMS */ diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c new file mode 100644 index 000000000000..1b0780e20aab --- /dev/null +++ b/kernel/module/kallsyms.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module kallsyms support + * + * Copyright (C) 2010 Rusty Russell + */ + +#include +#include +#include +#include +#include "internal.h" + +/* Lookup exported symbol in given range of kernel_symbols */ +static const struct kernel_symbol *lookup_exported_symbol(const char *name, + const struct kernel_symbol *start, + const struct kernel_symbol *stop) +{ + return bsearch(name, start, stop - start, + sizeof(struct kernel_symbol), cmp_name); +} + +static int is_exported(const char *name, unsigned long value, + const struct module *mod) +{ + const struct kernel_symbol *ks; + + if (!mod) + ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); + else + ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); + + return ks && kernel_symbol_value(ks) == value; +} + +/* As per nm */ +static char elf_type(const Elf_Sym *sym, const struct load_info *info) +{ + const Elf_Shdr *sechdrs = info->sechdrs; + + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { + if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) + return 'v'; + else + return 'w'; + } + if (sym->st_shndx == SHN_UNDEF) + return 'U'; + if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) + return 'a'; + if (sym->st_shndx >= SHN_LORESERVE) + return '?'; + if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) + return 't'; + if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC && + sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { + if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) + return 'r'; + else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) + return 'g'; + else + return 'd'; + } + if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { + if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) + return 's'; + else + return 'b'; + } + if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, + ".debug")) { + return 'n'; + } + return '?'; +} + +static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, + unsigned int shnum, unsigned int pcpundx) +{ + const Elf_Shdr *sec; + + if (src->st_shndx == SHN_UNDEF || + src->st_shndx >= shnum || + !src->st_name) + return false; + +#ifdef CONFIG_KALLSYMS_ALL + if (src->st_shndx == pcpundx) + return true; +#endif + + sec = sechdrs + src->st_shndx; + if (!(sec->sh_flags & SHF_ALLOC) +#ifndef CONFIG_KALLSYMS_ALL + || !(sec->sh_flags & SHF_EXECINSTR) +#endif + || (sec->sh_entsize & INIT_OFFSET_MASK)) + return false; + + return true; +} + +/* + * We only allocate and copy the strings needed by the parts of symtab + * we keep. This is simple, but has the effect of making multiple + * copies of duplicates. We could be more sophisticated, see + * linux-kernel thread starting with + * <73defb5e4bca04a6431392cc341112b1@localhost>. + */ +void layout_symtab(struct module *mod, struct load_info *info) +{ + Elf_Shdr *symsect = info->sechdrs + info->index.sym; + Elf_Shdr *strsect = info->sechdrs + info->index.str; + const Elf_Sym *src; + unsigned int i, nsrc, ndst, strtab_size = 0; + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; + symsect->sh_entsize = module_get_offset(mod, &mod->init_layout.size, symsect, + info->index.sym) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + symsect->sh_name); + + src = (void *)info->hdr + symsect->sh_offset; + nsrc = symsect->sh_size / sizeof(*src); + + /* Compute total space required for the core symbols' strtab. */ + for (ndst = i = 0; i < nsrc; i++) { + if (i == 0 || is_livepatch_module(mod) || + is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, + info->index.pcpu)) { + strtab_size += strlen(&info->strtab[src[i].st_name]) + 1; + ndst++; + } + } + + /* Append room for core symbols at end of core part. */ + info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); + info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); + mod->core_layout.size += strtab_size; + info->core_typeoffs = mod->core_layout.size; + mod->core_layout.size += ndst * sizeof(char); + mod->core_layout.size = debug_align(mod->core_layout.size); + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; + strsect->sh_entsize = module_get_offset(mod, &mod->init_layout.size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); + + /* We'll tack temporary mod_kallsyms on the end. */ + mod->init_layout.size = ALIGN(mod->init_layout.size, + __alignof__(struct mod_kallsyms)); + info->mod_kallsyms_init_off = mod->init_layout.size; + mod->init_layout.size += sizeof(struct mod_kallsyms); + info->init_typeoffs = mod->init_layout.size; + mod->init_layout.size += nsrc * sizeof(char); + mod->init_layout.size = debug_align(mod->init_layout.size); +} + +/* + * We use the full symtab and strtab which layout_symtab arranged to + * be appended to the init section. Later we switch to the cut-down + * core-only ones. + */ +void add_kallsyms(struct module *mod, const struct load_info *info) +{ + unsigned int i, ndst; + const Elf_Sym *src; + Elf_Sym *dst; + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + + /* Set up to point into init section. */ + mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; + + /* The following is safe since this pointer cannot change */ + mod->kallsyms->symtab = (void *)symsec->sh_addr; + mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ + mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; + + /* + * Now populate the cut down core kallsyms for after init + * and set types up while we still have access to sections. + */ + mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; + mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; + mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; + src = mod->kallsyms->symtab; + for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + mod->kallsyms->typetab[i] = elf_type(src + i, info); + if (i == 0 || is_livepatch_module(mod) || + is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, + info->index.pcpu)) { + mod->core_kallsyms.typetab[ndst] = + mod->kallsyms->typetab[i]; + dst[ndst] = src[i]; + dst[ndst++].st_name = s - mod->core_kallsyms.strtab; + s += strscpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } + mod->core_kallsyms.num_symtab = ndst; +} + +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) +void init_build_id(struct module *mod, const struct load_info *info) +{ + const Elf_Shdr *sechdr; + unsigned int i; + + for (i = 0; i < info->hdr->e_shnum; i++) { + sechdr = &info->sechdrs[i]; + if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE && + !build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id, + sechdr->sh_size)) + break; + } +} +#else +void init_build_id(struct module *mod, const struct load_info *info) +{ +} +#endif + +/* + * This ignores the intensely annoying "mapping symbols" found + * in ARM ELF files: $a, $t and $d. + */ +static inline int is_arm_mapping_symbol(const char *str) +{ + if (str[0] == '.' && str[1] == 'L') + return true; + return str[0] == '$' && strchr("axtd", str[1]) && + (str[2] == '\0' || str[2] == '.'); +} + +static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) +{ + return kallsyms->strtab + kallsyms->symtab[symnum].st_name; +} + +/* + * Given a module and address, find the corresponding symbol and return its name + * while providing its size and offset if needed. + */ +static const char *find_kallsyms_symbol(struct module *mod, + unsigned long addr, + unsigned long *size, + unsigned long *offset) +{ + unsigned int i, best = 0; + unsigned long nextval, bestval; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) + nextval = (unsigned long)mod->init_layout.base + mod->init_layout.text_size; + else + nextval = (unsigned long)mod->core_layout.base + mod->core_layout.text_size; + + bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); + + /* + * Scan for closest preceding symbol, and next symbol. (ELF + * starts real symbols at 1). + */ + for (i = 1; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + unsigned long thisval = kallsyms_symbol_value(sym); + + if (sym->st_shndx == SHN_UNDEF) + continue; + + /* + * We ignore unnamed symbols: they're uninformative + * and inserted at a whim. + */ + if (*kallsyms_symbol_name(kallsyms, i) == '\0' || + is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) + continue; + + if (thisval <= addr && thisval > bestval) { + best = i; + bestval = thisval; + } + if (thisval > addr && thisval < nextval) + nextval = thisval; + } + + if (!best) + return NULL; + + if (size) + *size = nextval - bestval; + if (offset) + *offset = addr - bestval; + + return kallsyms_symbol_name(kallsyms, best); +} + +void * __weak dereference_module_function_descriptor(struct module *mod, + void *ptr) +{ + return ptr; +} + +/* + * For kallsyms to ask for address resolution. NULL means not found. Careful + * not to lock to avoid deadlock on oopses, simply disable preemption. + */ +const char *module_address_lookup(unsigned long addr, + unsigned long *size, + unsigned long *offset, + char **modname, + const unsigned char **modbuildid, + char *namebuf) +{ + const char *ret = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (mod) { + if (modname) + *modname = mod->name; + if (modbuildid) { +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) + *modbuildid = mod->build_id; +#else + *modbuildid = NULL; +#endif + } + + ret = find_kallsyms_symbol(mod, addr, size, offset); + } + /* Make a copy in here where it's safe */ + if (ret) { + strncpy(namebuf, ret, KSYM_NAME_LEN - 1); + ret = namebuf; + } + preempt_enable(); + + return ret; +} + +int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (within_module(addr, mod)) { + const char *sym; + + sym = find_kallsyms_symbol(mod, addr, NULL, NULL); + if (!sym) + goto out; + + strscpy(symname, sym, KSYM_NAME_LEN); + preempt_enable(); + return 0; + } + } +out: + preempt_enable(); + return -ERANGE; +} + +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, + unsigned long *offset, char *modname, char *name) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (within_module(addr, mod)) { + const char *sym; + + sym = find_kallsyms_symbol(mod, addr, size, offset); + if (!sym) + goto out; + if (modname) + strscpy(modname, mod->name, MODULE_NAME_LEN); + if (name) + strscpy(name, sym, KSYM_NAME_LEN); + preempt_enable(); + return 0; + } + } +out: + preempt_enable(); + return -ERANGE; +} + +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported) +{ + struct module *mod; + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { + struct mod_kallsyms *kallsyms; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + kallsyms = rcu_dereference_sched(mod->kallsyms); + if (symnum < kallsyms->num_symtab) { + const Elf_Sym *sym = &kallsyms->symtab[symnum]; + + *value = kallsyms_symbol_value(sym); + *type = kallsyms->typetab[symnum]; + strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); + strscpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } + symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +} + +/* Given a module and name of symbol, find and return the symbol's value */ +unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) +{ + unsigned int i; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && + sym->st_shndx != SHN_UNDEF) + return kallsyms_symbol_value(sym); + } + return 0; +} + +/* Look for this name: can be of form module:name. */ +unsigned long module_kallsyms_lookup_name(const char *name) +{ + struct module *mod; + char *colon; + unsigned long ret = 0; + + /* Don't lock: we're in enough trouble already. */ + preempt_disable(); + if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { + if ((mod = find_module_all(name, colon - name, false)) != NULL) + ret = find_kallsyms_symbol_value(mod, colon + 1); + } else { + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if ((ret = find_kallsyms_symbol_value(mod, name)) != 0) + break; + } + } + preempt_enable(); + return ret; +} + +#ifdef CONFIG_LIVEPATCH +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data) +{ + struct module *mod; + unsigned int i; + int ret = 0; + + mutex_lock(&module_mutex); + list_for_each_entry(mod, &modules, list) { + /* We hold module_mutex: no need for rcu_dereference_sched */ + struct mod_kallsyms *kallsyms = mod->kallsyms; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (sym->st_shndx == SHN_UNDEF) + continue; + + ret = fn(data, kallsyms_symbol_name(kallsyms, i), + mod, kallsyms_symbol_value(sym)); + if (ret != 0) + goto out; + } + } +out: + mutex_unlock(&module_mutex); + return ret; +} +#endif /* CONFIG_LIVEPATCH */ diff --git a/kernel/module/main.c b/kernel/module/main.c index d400c477c35a..4d74988e1814 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -288,15 +288,6 @@ static bool check_exported_symbol(const struct symsearch *syms, return true; } -static unsigned long kernel_symbol_value(const struct kernel_symbol *sym) -{ -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS - return (unsigned long)offset_to_ptr(&sym->value_offset); -#else - return sym->value; -#endif -} - static const char *kernel_symbol_name(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -317,7 +308,7 @@ static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) #endif } -static int cmp_name(const void *name, const void *sym) +int cmp_name(const void *name, const void *sym) { return strcmp(name, kernel_symbol_name(sym)); } @@ -387,8 +378,8 @@ static bool find_symbol(struct find_symbol_arg *fsa) * Search for module by name: must hold module_mutex (or preempt disabled * for read-only access). */ -static struct module *find_module_all(const char *name, size_t len, - bool even_unformed) +struct module *find_module_all(const char *name, size_t len, + bool even_unformed) { struct module *mod; @@ -1294,13 +1285,6 @@ resolve_symbol_wait(struct module *mod, return ksym; } -#ifdef CONFIG_KALLSYMS -static inline bool sect_empty(const Elf_Shdr *sect) -{ - return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; -} -#endif - /* * /sys/module/foo/sections stuff * J. Corbet @@ -2065,7 +2049,7 @@ unsigned int __weak arch_mod_section_prepend(struct module *mod, } /* Update size with this section: return offset. */ -static long get_offset(struct module *mod, unsigned int *size, +long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, unsigned int section) { long ret; @@ -2121,7 +2105,7 @@ static void layout_sections(struct module *mod, struct load_info *info) || s->sh_entsize != ~0UL || module_init_layout_section(sname)) continue; - s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); + s->sh_entsize = module_get_offset(mod, &mod->core_layout.size, s, i); pr_debug("\t%s\n", sname); } switch (m) { @@ -2154,7 +2138,7 @@ static void layout_sections(struct module *mod, struct load_info *info) || s->sh_entsize != ~0UL || !module_init_layout_section(sname)) continue; - s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) + s->sh_entsize = (module_get_offset(mod, &mod->init_layout.size, s, i) | INIT_OFFSET_MASK); pr_debug("\t%s\n", sname); } @@ -2267,228 +2251,6 @@ static void free_modinfo(struct module *mod) } } -#ifdef CONFIG_KALLSYMS - -/* Lookup exported symbol in given range of kernel_symbols */ -static const struct kernel_symbol *lookup_exported_symbol(const char *name, - const struct kernel_symbol *start, - const struct kernel_symbol *stop) -{ - return bsearch(name, start, stop - start, - sizeof(struct kernel_symbol), cmp_name); -} - -static int is_exported(const char *name, unsigned long value, - const struct module *mod) -{ - const struct kernel_symbol *ks; - if (!mod) - ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); - else - ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); - - return ks != NULL && kernel_symbol_value(ks) == value; -} - -/* As per nm */ -static char elf_type(const Elf_Sym *sym, const struct load_info *info) -{ - const Elf_Shdr *sechdrs = info->sechdrs; - - if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { - if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) - return 'v'; - else - return 'w'; - } - if (sym->st_shndx == SHN_UNDEF) - return 'U'; - if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) - return 'a'; - if (sym->st_shndx >= SHN_LORESERVE) - return '?'; - if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) - return 't'; - if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC - && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { - if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) - return 'r'; - else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) - return 'g'; - else - return 'd'; - } - if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { - if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) - return 's'; - else - return 'b'; - } - if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, - ".debug")) { - return 'n'; - } - return '?'; -} - -static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, - unsigned int shnum, unsigned int pcpundx) -{ - const Elf_Shdr *sec; - - if (src->st_shndx == SHN_UNDEF - || src->st_shndx >= shnum - || !src->st_name) - return false; - -#ifdef CONFIG_KALLSYMS_ALL - if (src->st_shndx == pcpundx) - return true; -#endif - - sec = sechdrs + src->st_shndx; - if (!(sec->sh_flags & SHF_ALLOC) -#ifndef CONFIG_KALLSYMS_ALL - || !(sec->sh_flags & SHF_EXECINSTR) -#endif - || (sec->sh_entsize & INIT_OFFSET_MASK)) - return false; - - return true; -} - -/* - * We only allocate and copy the strings needed by the parts of symtab - * we keep. This is simple, but has the effect of making multiple - * copies of duplicates. We could be more sophisticated, see - * linux-kernel thread starting with - * <73defb5e4bca04a6431392cc341112b1@localhost>. - */ -static void layout_symtab(struct module *mod, struct load_info *info) -{ - Elf_Shdr *symsect = info->sechdrs + info->index.sym; - Elf_Shdr *strsect = info->sechdrs + info->index.str; - const Elf_Sym *src; - unsigned int i, nsrc, ndst, strtab_size = 0; - - /* Put symbol section at end of init part of module. */ - symsect->sh_flags |= SHF_ALLOC; - symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, - info->index.sym) | INIT_OFFSET_MASK; - pr_debug("\t%s\n", info->secstrings + symsect->sh_name); - - src = (void *)info->hdr + symsect->sh_offset; - nsrc = symsect->sh_size / sizeof(*src); - - /* Compute total space required for the core symbols' strtab. */ - for (ndst = i = 0; i < nsrc; i++) { - if (i == 0 || is_livepatch_module(mod) || - is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, - info->index.pcpu)) { - strtab_size += strlen(&info->strtab[src[i].st_name])+1; - ndst++; - } - } - - /* Append room for core symbols at end of core part. */ - info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); - info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); - mod->core_layout.size += strtab_size; - info->core_typeoffs = mod->core_layout.size; - mod->core_layout.size += ndst * sizeof(char); - mod->core_layout.size = debug_align(mod->core_layout.size); - - /* Put string table section at end of init part of module. */ - strsect->sh_flags |= SHF_ALLOC; - strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, - info->index.str) | INIT_OFFSET_MASK; - pr_debug("\t%s\n", info->secstrings + strsect->sh_name); - - /* We'll tack temporary mod_kallsyms on the end. */ - mod->init_layout.size = ALIGN(mod->init_layout.size, - __alignof__(struct mod_kallsyms)); - info->mod_kallsyms_init_off = mod->init_layout.size; - mod->init_layout.size += sizeof(struct mod_kallsyms); - info->init_typeoffs = mod->init_layout.size; - mod->init_layout.size += nsrc * sizeof(char); - mod->init_layout.size = debug_align(mod->init_layout.size); -} - -/* - * We use the full symtab and strtab which layout_symtab arranged to - * be appended to the init section. Later we switch to the cut-down - * core-only ones. - */ -static void add_kallsyms(struct module *mod, const struct load_info *info) -{ - unsigned int i, ndst; - const Elf_Sym *src; - Elf_Sym *dst; - char *s; - Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; - - /* Set up to point into init section. */ - mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; - - mod->kallsyms->symtab = (void *)symsec->sh_addr; - mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); - /* Make sure we get permanent strtab: don't use info->strtab. */ - mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; - mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; - - /* - * Now populate the cut down core kallsyms for after init - * and set types up while we still have access to sections. - */ - mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; - mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; - mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; - src = mod->kallsyms->symtab; - for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { - mod->kallsyms->typetab[i] = elf_type(src + i, info); - if (i == 0 || is_livepatch_module(mod) || - is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, - info->index.pcpu)) { - mod->core_kallsyms.typetab[ndst] = - mod->kallsyms->typetab[i]; - dst[ndst] = src[i]; - dst[ndst++].st_name = s - mod->core_kallsyms.strtab; - s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], - KSYM_NAME_LEN) + 1; - } - } - mod->core_kallsyms.num_symtab = ndst; -} -#else -static inline void layout_symtab(struct module *mod, struct load_info *info) -{ -} - -static void add_kallsyms(struct module *mod, const struct load_info *info) -{ -} -#endif /* CONFIG_KALLSYMS */ - -#if IS_ENABLED(CONFIG_KALLSYMS) && IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) -static void init_build_id(struct module *mod, const struct load_info *info) -{ - const Elf_Shdr *sechdr; - unsigned int i; - - for (i = 0; i < info->hdr->e_shnum; i++) { - sechdr = &info->sechdrs[i]; - if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE && - !build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id, - sechdr->sh_size)) - break; - } -} -#else -static void init_build_id(struct module *mod, const struct load_info *info) -{ -} -#endif - static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num) { if (!debug) @@ -3799,287 +3561,6 @@ static inline int within(unsigned long addr, void *start, unsigned long size) return ((void *)addr >= start && (void *)addr < start + size); } -#ifdef CONFIG_KALLSYMS -/* - * This ignores the intensely annoying "mapping symbols" found - * in ARM ELF files: $a, $t and $d. - */ -static inline int is_arm_mapping_symbol(const char *str) -{ - if (str[0] == '.' && str[1] == 'L') - return true; - return str[0] == '$' && strchr("axtd", str[1]) - && (str[2] == '\0' || str[2] == '.'); -} - -static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) -{ - return kallsyms->strtab + kallsyms->symtab[symnum].st_name; -} - -/* - * Given a module and address, find the corresponding symbol and return its name - * while providing its size and offset if needed. - */ -static const char *find_kallsyms_symbol(struct module *mod, - unsigned long addr, - unsigned long *size, - unsigned long *offset) -{ - unsigned int i, best = 0; - unsigned long nextval, bestval; - struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); - - /* At worse, next value is at end of module */ - if (within_module_init(addr, mod)) - nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; - else - nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; - - bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); - - /* - * Scan for closest preceding symbol, and next symbol. (ELF - * starts real symbols at 1). - */ - for (i = 1; i < kallsyms->num_symtab; i++) { - const Elf_Sym *sym = &kallsyms->symtab[i]; - unsigned long thisval = kallsyms_symbol_value(sym); - - if (sym->st_shndx == SHN_UNDEF) - continue; - - /* - * We ignore unnamed symbols: they're uninformative - * and inserted at a whim. - */ - if (*kallsyms_symbol_name(kallsyms, i) == '\0' - || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) - continue; - - if (thisval <= addr && thisval > bestval) { - best = i; - bestval = thisval; - } - if (thisval > addr && thisval < nextval) - nextval = thisval; - } - - if (!best) - return NULL; - - if (size) - *size = nextval - bestval; - if (offset) - *offset = addr - bestval; - - return kallsyms_symbol_name(kallsyms, best); -} - -void * __weak dereference_module_function_descriptor(struct module *mod, - void *ptr) -{ - return ptr; -} - -/* - * For kallsyms to ask for address resolution. NULL means not found. Careful - * not to lock to avoid deadlock on oopses, simply disable preemption. - */ -const char *module_address_lookup(unsigned long addr, - unsigned long *size, - unsigned long *offset, - char **modname, - const unsigned char **modbuildid, - char *namebuf) -{ - const char *ret = NULL; - struct module *mod; - - preempt_disable(); - mod = __module_address(addr); - if (mod) { - if (modname) - *modname = mod->name; - if (modbuildid) { -#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) - *modbuildid = mod->build_id; -#else - *modbuildid = NULL; -#endif - } - - ret = find_kallsyms_symbol(mod, addr, size, offset); - } - /* Make a copy in here where it's safe */ - if (ret) { - strncpy(namebuf, ret, KSYM_NAME_LEN - 1); - ret = namebuf; - } - preempt_enable(); - - return ret; -} - -int lookup_module_symbol_name(unsigned long addr, char *symname) -{ - struct module *mod; - - preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) { - if (mod->state == MODULE_STATE_UNFORMED) - continue; - if (within_module(addr, mod)) { - const char *sym; - - sym = find_kallsyms_symbol(mod, addr, NULL, NULL); - if (!sym) - goto out; - - strlcpy(symname, sym, KSYM_NAME_LEN); - preempt_enable(); - return 0; - } - } -out: - preempt_enable(); - return -ERANGE; -} - -int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, - unsigned long *offset, char *modname, char *name) -{ - struct module *mod; - - preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) { - if (mod->state == MODULE_STATE_UNFORMED) - continue; - if (within_module(addr, mod)) { - const char *sym; - - sym = find_kallsyms_symbol(mod, addr, size, offset); - if (!sym) - goto out; - if (modname) - strlcpy(modname, mod->name, MODULE_NAME_LEN); - if (name) - strlcpy(name, sym, KSYM_NAME_LEN); - preempt_enable(); - return 0; - } - } -out: - preempt_enable(); - return -ERANGE; -} - -int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, - char *name, char *module_name, int *exported) -{ - struct module *mod; - - preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) { - struct mod_kallsyms *kallsyms; - - if (mod->state == MODULE_STATE_UNFORMED) - continue; - kallsyms = rcu_dereference_sched(mod->kallsyms); - if (symnum < kallsyms->num_symtab) { - const Elf_Sym *sym = &kallsyms->symtab[symnum]; - - *value = kallsyms_symbol_value(sym); - *type = kallsyms->typetab[symnum]; - strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); - strlcpy(module_name, mod->name, MODULE_NAME_LEN); - *exported = is_exported(name, *value, mod); - preempt_enable(); - return 0; - } - symnum -= kallsyms->num_symtab; - } - preempt_enable(); - return -ERANGE; -} - -/* Given a module and name of symbol, find and return the symbol's value */ -static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) -{ - unsigned int i; - struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); - - for (i = 0; i < kallsyms->num_symtab; i++) { - const Elf_Sym *sym = &kallsyms->symtab[i]; - - if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && - sym->st_shndx != SHN_UNDEF) - return kallsyms_symbol_value(sym); - } - return 0; -} - -/* Look for this name: can be of form module:name. */ -unsigned long module_kallsyms_lookup_name(const char *name) -{ - struct module *mod; - char *colon; - unsigned long ret = 0; - - /* Don't lock: we're in enough trouble already. */ - preempt_disable(); - if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { - if ((mod = find_module_all(name, colon - name, false)) != NULL) - ret = find_kallsyms_symbol_value(mod, colon+1); - } else { - list_for_each_entry_rcu(mod, &modules, list) { - if (mod->state == MODULE_STATE_UNFORMED) - continue; - if ((ret = find_kallsyms_symbol_value(mod, name)) != 0) - break; - } - } - preempt_enable(); - return ret; -} - -#ifdef CONFIG_LIVEPATCH -int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data) -{ - struct module *mod; - unsigned int i; - int ret = 0; - - mutex_lock(&module_mutex); - list_for_each_entry(mod, &modules, list) { - /* We hold module_mutex: no need for rcu_dereference_sched */ - struct mod_kallsyms *kallsyms = mod->kallsyms; - - if (mod->state == MODULE_STATE_UNFORMED) - continue; - for (i = 0; i < kallsyms->num_symtab; i++) { - const Elf_Sym *sym = &kallsyms->symtab[i]; - - if (sym->st_shndx == SHN_UNDEF) - continue; - - ret = fn(data, kallsyms_symbol_name(kallsyms, i), - mod, kallsyms_symbol_value(sym)); - if (ret != 0) - goto out; - - cond_resched(); - } - } -out: - mutex_unlock(&module_mutex); - return ret; -} -#endif /* CONFIG_LIVEPATCH */ -#endif /* CONFIG_KALLSYMS */ - static void cfi_init(struct module *mod) { #ifdef CONFIG_CFI_CLANG -- cgit v1.2.3-59-g8ed1b From 08126db5ff739fa011fc5b8af683ad759f2cba9a Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:40 +0000 Subject: module: kallsyms: Fix suspicious rcu usage No functional change. The purpose of this patch is to address the various Sparse warnings due to the incorrect dereference/or access of an __rcu pointer. Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/kallsyms.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 1b0780e20aab..a3da0686a2a6 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -171,14 +171,17 @@ void add_kallsyms(struct module *mod, const struct load_info *info) Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; /* Set up to point into init section. */ - mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; + mod->kallsyms = (void __rcu *)mod->init_layout.base + + info->mod_kallsyms_init_off; + preempt_disable(); /* The following is safe since this pointer cannot change */ - mod->kallsyms->symtab = (void *)symsec->sh_addr; - mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + rcu_dereference_sched(mod->kallsyms)->symtab = (void *)symsec->sh_addr; + rcu_dereference_sched(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym); /* Make sure we get permanent strtab: don't use info->strtab. */ - mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; - mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; + rcu_dereference_sched(mod->kallsyms)->strtab = + (void *)info->sechdrs[info->index.str].sh_addr; + rcu_dereference_sched(mod->kallsyms)->typetab = mod->init_layout.base + info->init_typeoffs; /* * Now populate the cut down core kallsyms for after init @@ -187,20 +190,22 @@ void add_kallsyms(struct module *mod, const struct load_info *info) mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; - src = mod->kallsyms->symtab; - for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { - mod->kallsyms->typetab[i] = elf_type(src + i, info); + src = rcu_dereference_sched(mod->kallsyms)->symtab; + for (ndst = i = 0; i < rcu_dereference_sched(mod->kallsyms)->num_symtab; i++) { + rcu_dereference_sched(mod->kallsyms)->typetab[i] = elf_type(src + i, info); if (i == 0 || is_livepatch_module(mod) || is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { mod->core_kallsyms.typetab[ndst] = - mod->kallsyms->typetab[i]; + rcu_dereference_sched(mod->kallsyms)->typetab[i]; dst[ndst] = src[i]; dst[ndst++].st_name = s - mod->core_kallsyms.strtab; - s += strscpy(s, &mod->kallsyms->strtab[src[i].st_name], + s += strscpy(s, + &rcu_dereference_sched(mod->kallsyms)->strtab[src[i].st_name], KSYM_NAME_LEN) + 1; } } + preempt_enable(); mod->core_kallsyms.num_symtab = ndst; } @@ -478,11 +483,16 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, mutex_lock(&module_mutex); list_for_each_entry(mod, &modules, list) { - /* We hold module_mutex: no need for rcu_dereference_sched */ - struct mod_kallsyms *kallsyms = mod->kallsyms; + struct mod_kallsyms *kallsyms; if (mod->state == MODULE_STATE_UNFORMED) continue; + + /* Use rcu_dereference_sched() to remain compliant with the sparse tool */ + preempt_disable(); + kallsyms = rcu_dereference_sched(mod->kallsyms); + preempt_enable(); + for (i = 0; i < kallsyms->num_symtab; i++) { const Elf_Sym *sym = &kallsyms->symtab[i]; -- cgit v1.2.3-59-g8ed1b From 0ffc40f6c8ab684e694774ebc835b198398129a8 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:41 +0000 Subject: module: Move procfs support into a separate file No functional change. This patch migrates code that allows one to generate a list of loaded/or linked modules via /proc when procfs support is enabled into kernel/module/procfs.c. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/internal.h | 1 + kernel/module/main.c | 131 +------------------------------------------ kernel/module/procfs.c | 142 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 145 insertions(+), 130 deletions(-) create mode 100644 kernel/module/procfs.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index 9901bed3ab5b..94296c98a67f 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o obj-$(CONFIG_STRICT_MODULE_RWX) += strict_rwx.o obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o obj-$(CONFIG_KALLSYMS) += kallsyms.o +obj-$(CONFIG_PROC_FS) += procfs.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 44ca05b9eb8f..6af40c2d145f 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -72,6 +72,7 @@ struct module *find_module_all(const char *name, size_t len, bool even_unformed) int cmp_name(const void *name, const void *sym); long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, unsigned int section); +char *module_flags(struct module *mod, char *buf); static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) { diff --git a/kernel/module/main.c b/kernel/module/main.c index 4d74988e1814..63b98452fd7d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -805,31 +804,6 @@ out: return ret; } -static inline void print_unload_info(struct seq_file *m, struct module *mod) -{ - struct module_use *use; - int printed_something = 0; - - seq_printf(m, " %i ", module_refcount(mod)); - - /* - * Always include a trailing , so userspace can differentiate - * between this and the old multi-field proc format. - */ - list_for_each_entry(use, &mod->source_list, source_list) { - printed_something = 1; - seq_printf(m, "%s,", use->source->name); - } - - if (mod->init != NULL && mod->exit == NULL) { - printed_something = 1; - seq_puts(m, "[permanent],"); - } - - if (!printed_something) - seq_puts(m, "-"); -} - void __symbol_put(const char *symbol) { struct find_symbol_arg fsa = { @@ -919,12 +893,6 @@ void module_put(struct module *module) EXPORT_SYMBOL(module_put); #else /* !CONFIG_MODULE_UNLOAD */ -static inline void print_unload_info(struct seq_file *m, struct module *mod) -{ - /* We don't know the usage count, or what modules are using. */ - seq_puts(m, " - -"); -} - static inline void module_unload_free(struct module *mod) { } @@ -3596,7 +3564,7 @@ static void cfi_cleanup(struct module *mod) } /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ -static char *module_flags(struct module *mod, char *buf) +char *module_flags(struct module *mod, char *buf) { int bx = 0; @@ -3619,103 +3587,6 @@ static char *module_flags(struct module *mod, char *buf) return buf; } -#ifdef CONFIG_PROC_FS -/* Called by the /proc file system to return a list of modules. */ -static void *m_start(struct seq_file *m, loff_t *pos) -{ - mutex_lock(&module_mutex); - return seq_list_start(&modules, *pos); -} - -static void *m_next(struct seq_file *m, void *p, loff_t *pos) -{ - return seq_list_next(p, &modules, pos); -} - -static void m_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&module_mutex); -} - -static int m_show(struct seq_file *m, void *p) -{ - struct module *mod = list_entry(p, struct module, list); - char buf[MODULE_FLAGS_BUF_SIZE]; - void *value; - - /* We always ignore unformed modules. */ - if (mod->state == MODULE_STATE_UNFORMED) - return 0; - - seq_printf(m, "%s %u", - mod->name, mod->init_layout.size + mod->core_layout.size); - print_unload_info(m, mod); - - /* Informative for users. */ - seq_printf(m, " %s", - mod->state == MODULE_STATE_GOING ? "Unloading" : - mod->state == MODULE_STATE_COMING ? "Loading" : - "Live"); - /* Used by oprofile and other similar tools. */ - value = m->private ? NULL : mod->core_layout.base; - seq_printf(m, " 0x%px", value); - - /* Taints info */ - if (mod->taints) - seq_printf(m, " %s", module_flags(mod, buf)); - - seq_puts(m, "\n"); - return 0; -} - -/* - * Format: modulename size refcount deps address - * - * Where refcount is a number or -, and deps is a comma-separated list - * of depends or -. - */ -static const struct seq_operations modules_op = { - .start = m_start, - .next = m_next, - .stop = m_stop, - .show = m_show -}; - -/* - * This also sets the "private" pointer to non-NULL if the - * kernel pointers should be hidden (so you can just test - * "m->private" to see if you should keep the values private). - * - * We use the same logic as for /proc/kallsyms. - */ -static int modules_open(struct inode *inode, struct file *file) -{ - int err = seq_open(file, &modules_op); - - if (!err) { - struct seq_file *m = file->private_data; - m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; - } - - return err; -} - -static const struct proc_ops modules_proc_ops = { - .proc_flags = PROC_ENTRY_PERMANENT, - .proc_open = modules_open, - .proc_read = seq_read, - .proc_lseek = seq_lseek, - .proc_release = seq_release, -}; - -static int __init proc_modules_init(void) -{ - proc_create("modules", 0, NULL, &modules_proc_ops); - return 0; -} -module_init(proc_modules_init); -#endif - /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr) { diff --git a/kernel/module/procfs.c b/kernel/module/procfs.c new file mode 100644 index 000000000000..2717e130788e --- /dev/null +++ b/kernel/module/procfs.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module proc support + * + * Copyright (C) 2008 Alexey Dobriyan + */ + +#include +#include +#include +#include +#include +#include "internal.h" + +#ifdef CONFIG_MODULE_UNLOAD +static inline void print_unload_info(struct seq_file *m, struct module *mod) +{ + struct module_use *use; + int printed_something = 0; + + seq_printf(m, " %i ", module_refcount(mod)); + + /* + * Always include a trailing , so userspace can differentiate + * between this and the old multi-field proc format. + */ + list_for_each_entry(use, &mod->source_list, source_list) { + printed_something = 1; + seq_printf(m, "%s,", use->source->name); + } + + if (mod->init && !mod->exit) { + printed_something = 1; + seq_puts(m, "[permanent],"); + } + + if (!printed_something) + seq_puts(m, "-"); +} +#else /* !CONFIG_MODULE_UNLOAD */ +static inline void print_unload_info(struct seq_file *m, struct module *mod) +{ + /* We don't know the usage count, or what modules are using. */ + seq_puts(m, " - -"); +} +#endif /* CONFIG_MODULE_UNLOAD */ + +/* Called by the /proc file system to return a list of modules. */ +static void *m_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&module_mutex); + return seq_list_start(&modules, *pos); +} + +static void *m_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &modules, pos); +} + +static void m_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&module_mutex); +} + +static int m_show(struct seq_file *m, void *p) +{ + struct module *mod = list_entry(p, struct module, list); + char buf[MODULE_FLAGS_BUF_SIZE]; + void *value; + + /* We always ignore unformed modules. */ + if (mod->state == MODULE_STATE_UNFORMED) + return 0; + + seq_printf(m, "%s %u", + mod->name, mod->init_layout.size + mod->core_layout.size); + print_unload_info(m, mod); + + /* Informative for users. */ + seq_printf(m, " %s", + mod->state == MODULE_STATE_GOING ? "Unloading" : + mod->state == MODULE_STATE_COMING ? "Loading" : + "Live"); + /* Used by oprofile and other similar tools. */ + value = m->private ? NULL : mod->core_layout.base; + seq_printf(m, " 0x%px", value); + + /* Taints info */ + if (mod->taints) + seq_printf(m, " %s", module_flags(mod, buf)); + + seq_puts(m, "\n"); + return 0; +} + +/* + * Format: modulename size refcount deps address + * + * Where refcount is a number or -, and deps is a comma-separated list + * of depends or -. + */ +static const struct seq_operations modules_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = m_show +}; + +/* + * This also sets the "private" pointer to non-NULL if the + * kernel pointers should be hidden (so you can just test + * "m->private" to see if you should keep the values private). + * + * We use the same logic as for /proc/kallsyms. + */ +static int modules_open(struct inode *inode, struct file *file) +{ + int err = seq_open(file, &modules_op); + + if (!err) { + struct seq_file *m = file->private_data; + + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; + } + + return err; +} + +static const struct proc_ops modules_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, + .proc_open = modules_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release, +}; + +static int __init proc_modules_init(void) +{ + proc_create("modules", 0, NULL, &modules_proc_ops); + return 0; +} +module_init(proc_modules_init); -- cgit v1.2.3-59-g8ed1b From 44c09535de4784f31d151aa1047efcf4797ca3cd Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:42 +0000 Subject: module: Move sysfs support into a separate file No functional change. This patch migrates module sysfs support out of core code into kernel/module/sysfs.c. In addition simple code refactoring to make this possible. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/internal.h | 21 +++ kernel/module/main.c | 469 +---------------------------------------------- kernel/module/sysfs.c | 436 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 461 insertions(+), 466 deletions(-) create mode 100644 kernel/module/sysfs.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index 94296c98a67f..cf8dcdc6b55f 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_STRICT_MODULE_RWX) += strict_rwx.o obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_PROC_FS) += procfs.o +obj-$(CONFIG_SYSFS) += sysfs.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 6af40c2d145f..62d749ef695e 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -34,6 +34,9 @@ extern struct mutex module_mutex; extern struct list_head modules; +extern struct module_attribute *modinfo_attrs[]; +extern size_t modinfo_attrs_count; + /* Provided by the linker */ extern const struct kernel_symbol __start___ksymtab[]; extern const struct kernel_symbol __stop___ksymtab[]; @@ -204,3 +207,21 @@ static inline void init_build_id(struct module *mod, const struct load_info *inf static inline void layout_symtab(struct module *mod, struct load_info *info) { } static inline void add_kallsyms(struct module *mod, const struct load_info *info) { } #endif /* CONFIG_KALLSYMS */ + +#ifdef CONFIG_SYSFS +int mod_sysfs_setup(struct module *mod, const struct load_info *info, + struct kernel_param *kparam, unsigned int num_params); +void mod_sysfs_teardown(struct module *mod); +void init_param_lock(struct module *mod); +#else /* !CONFIG_SYSFS */ +static inline int mod_sysfs_setup(struct module *mod, + const struct load_info *info, + struct kernel_param *kparam, + unsigned int num_params) +{ + return 0; +} + +static inline void mod_sysfs_teardown(struct module *mod) { } +static inline void init_param_lock(struct module *mod) { } +#endif /* CONFIG_SYSFS */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 63b98452fd7d..0cd0590dd411 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -14,9 +14,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -989,7 +987,7 @@ static ssize_t show_taint(struct module_attribute *mattr, static struct module_attribute modinfo_taint = __ATTR(taint, 0444, show_taint, NULL); -static struct module_attribute *modinfo_attrs[] = { +struct module_attribute *modinfo_attrs[] = { &module_uevent, &modinfo_version, &modinfo_srcversion, @@ -1003,6 +1001,8 @@ static struct module_attribute *modinfo_attrs[] = { NULL, }; +size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); + static const char vermagic[] = VERMAGIC_STRING; static int try_to_force_load(struct module *mod, const char *reason) @@ -1253,469 +1253,6 @@ resolve_symbol_wait(struct module *mod, return ksym; } -/* - * /sys/module/foo/sections stuff - * J. Corbet - */ -#ifdef CONFIG_SYSFS - -#ifdef CONFIG_KALLSYMS -struct module_sect_attr { - struct bin_attribute battr; - unsigned long address; -}; - -struct module_sect_attrs { - struct attribute_group grp; - unsigned int nsections; - struct module_sect_attr attrs[]; -}; - -#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) -static ssize_t module_sect_read(struct file *file, struct kobject *kobj, - struct bin_attribute *battr, - char *buf, loff_t pos, size_t count) -{ - struct module_sect_attr *sattr = - container_of(battr, struct module_sect_attr, battr); - char bounce[MODULE_SECT_READ_SIZE + 1]; - size_t wrote; - - if (pos != 0) - return -EINVAL; - - /* - * Since we're a binary read handler, we must account for the - * trailing NUL byte that sprintf will write: if "buf" is - * too small to hold the NUL, or the NUL is exactly the last - * byte, the read will look like it got truncated by one byte. - * Since there is no way to ask sprintf nicely to not write - * the NUL, we have to use a bounce buffer. - */ - wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", - kallsyms_show_value(file->f_cred) - ? (void *)sattr->address : NULL); - count = min(count, wrote); - memcpy(buf, bounce, count); - - return count; -} - -static void free_sect_attrs(struct module_sect_attrs *sect_attrs) -{ - unsigned int section; - - for (section = 0; section < sect_attrs->nsections; section++) - kfree(sect_attrs->attrs[section].battr.attr.name); - kfree(sect_attrs); -} - -static void add_sect_attrs(struct module *mod, const struct load_info *info) -{ - unsigned int nloaded = 0, i, size[2]; - struct module_sect_attrs *sect_attrs; - struct module_sect_attr *sattr; - struct bin_attribute **gattr; - - /* Count loaded sections and allocate structures */ - for (i = 0; i < info->hdr->e_shnum; i++) - if (!sect_empty(&info->sechdrs[i])) - nloaded++; - size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), - sizeof(sect_attrs->grp.bin_attrs[0])); - size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); - sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); - if (sect_attrs == NULL) - return; - - /* Setup section attributes. */ - sect_attrs->grp.name = "sections"; - sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; - - sect_attrs->nsections = 0; - sattr = §_attrs->attrs[0]; - gattr = §_attrs->grp.bin_attrs[0]; - for (i = 0; i < info->hdr->e_shnum; i++) { - Elf_Shdr *sec = &info->sechdrs[i]; - if (sect_empty(sec)) - continue; - sysfs_bin_attr_init(&sattr->battr); - sattr->address = sec->sh_addr; - sattr->battr.attr.name = - kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); - if (sattr->battr.attr.name == NULL) - goto out; - sect_attrs->nsections++; - sattr->battr.read = module_sect_read; - sattr->battr.size = MODULE_SECT_READ_SIZE; - sattr->battr.attr.mode = 0400; - *(gattr++) = &(sattr++)->battr; - } - *gattr = NULL; - - if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) - goto out; - - mod->sect_attrs = sect_attrs; - return; - out: - free_sect_attrs(sect_attrs); -} - -static void remove_sect_attrs(struct module *mod) -{ - if (mod->sect_attrs) { - sysfs_remove_group(&mod->mkobj.kobj, - &mod->sect_attrs->grp); - /* - * We are positive that no one is using any sect attrs - * at this point. Deallocate immediately. - */ - free_sect_attrs(mod->sect_attrs); - mod->sect_attrs = NULL; - } -} - -/* - * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. - */ - -struct module_notes_attrs { - struct kobject *dir; - unsigned int notes; - struct bin_attribute attrs[]; -}; - -static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) -{ - /* - * The caller checked the pos and count against our size. - */ - memcpy(buf, bin_attr->private + pos, count); - return count; -} - -static void free_notes_attrs(struct module_notes_attrs *notes_attrs, - unsigned int i) -{ - if (notes_attrs->dir) { - while (i-- > 0) - sysfs_remove_bin_file(notes_attrs->dir, - ¬es_attrs->attrs[i]); - kobject_put(notes_attrs->dir); - } - kfree(notes_attrs); -} - -static void add_notes_attrs(struct module *mod, const struct load_info *info) -{ - unsigned int notes, loaded, i; - struct module_notes_attrs *notes_attrs; - struct bin_attribute *nattr; - - /* failed to create section attributes, so can't create notes */ - if (!mod->sect_attrs) - return; - - /* Count notes sections and allocate structures. */ - notes = 0; - for (i = 0; i < info->hdr->e_shnum; i++) - if (!sect_empty(&info->sechdrs[i]) && - (info->sechdrs[i].sh_type == SHT_NOTE)) - ++notes; - - if (notes == 0) - return; - - notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), - GFP_KERNEL); - if (notes_attrs == NULL) - return; - - notes_attrs->notes = notes; - nattr = ¬es_attrs->attrs[0]; - for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { - if (sect_empty(&info->sechdrs[i])) - continue; - if (info->sechdrs[i].sh_type == SHT_NOTE) { - sysfs_bin_attr_init(nattr); - nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; - nattr->attr.mode = S_IRUGO; - nattr->size = info->sechdrs[i].sh_size; - nattr->private = (void *) info->sechdrs[i].sh_addr; - nattr->read = module_notes_read; - ++nattr; - } - ++loaded; - } - - notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); - if (!notes_attrs->dir) - goto out; - - for (i = 0; i < notes; ++i) - if (sysfs_create_bin_file(notes_attrs->dir, - ¬es_attrs->attrs[i])) - goto out; - - mod->notes_attrs = notes_attrs; - return; - - out: - free_notes_attrs(notes_attrs, i); -} - -static void remove_notes_attrs(struct module *mod) -{ - if (mod->notes_attrs) - free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); -} - -#else - -static inline void add_sect_attrs(struct module *mod, - const struct load_info *info) -{ -} - -static inline void remove_sect_attrs(struct module *mod) -{ -} - -static inline void add_notes_attrs(struct module *mod, - const struct load_info *info) -{ -} - -static inline void remove_notes_attrs(struct module *mod) -{ -} -#endif /* CONFIG_KALLSYMS */ - -static void del_usage_links(struct module *mod) -{ -#ifdef CONFIG_MODULE_UNLOAD - struct module_use *use; - - mutex_lock(&module_mutex); - list_for_each_entry(use, &mod->target_list, target_list) - sysfs_remove_link(use->target->holders_dir, mod->name); - mutex_unlock(&module_mutex); -#endif -} - -static int add_usage_links(struct module *mod) -{ - int ret = 0; -#ifdef CONFIG_MODULE_UNLOAD - struct module_use *use; - - mutex_lock(&module_mutex); - list_for_each_entry(use, &mod->target_list, target_list) { - ret = sysfs_create_link(use->target->holders_dir, - &mod->mkobj.kobj, mod->name); - if (ret) - break; - } - mutex_unlock(&module_mutex); - if (ret) - del_usage_links(mod); -#endif - return ret; -} - -static void module_remove_modinfo_attrs(struct module *mod, int end); - -static int module_add_modinfo_attrs(struct module *mod) -{ - struct module_attribute *attr; - struct module_attribute *temp_attr; - int error = 0; - int i; - - mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * - (ARRAY_SIZE(modinfo_attrs) + 1)), - GFP_KERNEL); - if (!mod->modinfo_attrs) - return -ENOMEM; - - temp_attr = mod->modinfo_attrs; - for (i = 0; (attr = modinfo_attrs[i]); i++) { - if (!attr->test || attr->test(mod)) { - memcpy(temp_attr, attr, sizeof(*temp_attr)); - sysfs_attr_init(&temp_attr->attr); - error = sysfs_create_file(&mod->mkobj.kobj, - &temp_attr->attr); - if (error) - goto error_out; - ++temp_attr; - } - } - - return 0; - -error_out: - if (i > 0) - module_remove_modinfo_attrs(mod, --i); - else - kfree(mod->modinfo_attrs); - return error; -} - -static void module_remove_modinfo_attrs(struct module *mod, int end) -{ - struct module_attribute *attr; - int i; - - for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { - if (end >= 0 && i > end) - break; - /* pick a field to test for end of list */ - if (!attr->attr.name) - break; - sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); - if (attr->free) - attr->free(mod); - } - kfree(mod->modinfo_attrs); -} - -static void mod_kobject_put(struct module *mod) -{ - DECLARE_COMPLETION_ONSTACK(c); - mod->mkobj.kobj_completion = &c; - kobject_put(&mod->mkobj.kobj); - wait_for_completion(&c); -} - -static int mod_sysfs_init(struct module *mod) -{ - int err; - struct kobject *kobj; - - if (!module_sysfs_initialized) { - pr_err("%s: module sysfs not initialized\n", mod->name); - err = -EINVAL; - goto out; - } - - kobj = kset_find_obj(module_kset, mod->name); - if (kobj) { - pr_err("%s: module is already loaded\n", mod->name); - kobject_put(kobj); - err = -EINVAL; - goto out; - } - - mod->mkobj.mod = mod; - - memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); - mod->mkobj.kobj.kset = module_kset; - err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, - "%s", mod->name); - if (err) - mod_kobject_put(mod); - -out: - return err; -} - -static int mod_sysfs_setup(struct module *mod, - const struct load_info *info, - struct kernel_param *kparam, - unsigned int num_params) -{ - int err; - - err = mod_sysfs_init(mod); - if (err) - goto out; - - mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); - if (!mod->holders_dir) { - err = -ENOMEM; - goto out_unreg; - } - - err = module_param_sysfs_setup(mod, kparam, num_params); - if (err) - goto out_unreg_holders; - - err = module_add_modinfo_attrs(mod); - if (err) - goto out_unreg_param; - - err = add_usage_links(mod); - if (err) - goto out_unreg_modinfo_attrs; - - add_sect_attrs(mod, info); - add_notes_attrs(mod, info); - - return 0; - -out_unreg_modinfo_attrs: - module_remove_modinfo_attrs(mod, -1); -out_unreg_param: - module_param_sysfs_remove(mod); -out_unreg_holders: - kobject_put(mod->holders_dir); -out_unreg: - mod_kobject_put(mod); -out: - return err; -} - -static void mod_sysfs_fini(struct module *mod) -{ - remove_notes_attrs(mod); - remove_sect_attrs(mod); - mod_kobject_put(mod); -} - -static void init_param_lock(struct module *mod) -{ - mutex_init(&mod->param_lock); -} -#else /* !CONFIG_SYSFS */ - -static int mod_sysfs_setup(struct module *mod, - const struct load_info *info, - struct kernel_param *kparam, - unsigned int num_params) -{ - return 0; -} - -static void mod_sysfs_fini(struct module *mod) -{ -} - -static void module_remove_modinfo_attrs(struct module *mod, int end) -{ -} - -static void del_usage_links(struct module *mod) -{ -} - -static void init_param_lock(struct module *mod) -{ -} -#endif /* CONFIG_SYSFS */ - -static void mod_sysfs_teardown(struct module *mod) -{ - del_usage_links(mod); - module_remove_modinfo_attrs(mod, -1); - module_param_sysfs_remove(mod); - kobject_put(mod->mkobj.drivers_dir); - kobject_put(mod->holders_dir); - mod_sysfs_fini(mod); -} - /* * LKM RO/NX protection: protect module's text/ro-data * from modification and any data from execution. diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c new file mode 100644 index 000000000000..ce68f821dcd1 --- /dev/null +++ b/kernel/module/sysfs.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module sysfs support + * + * Copyright (C) 2008 Rusty Russell + */ + +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* + * /sys/module/foo/sections stuff + * J. Corbet + */ +#ifdef CONFIG_KALLSYMS +struct module_sect_attr { + struct bin_attribute battr; + unsigned long address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[]; +}; + +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) +static ssize_t module_sect_read(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, + char *buf, loff_t pos, size_t count) +{ + struct module_sect_attr *sattr = + container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; + + if (pos != 0) + return -EINVAL; + + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; +} + +static void free_sect_attrs(struct module_sect_attrs *sect_attrs) +{ + unsigned int section; + + for (section = 0; section < sect_attrs->nsections; section++) + kfree(sect_attrs->attrs[section].battr.attr.name); + kfree(sect_attrs); +} + +static void add_sect_attrs(struct module *mod, const struct load_info *info) +{ + unsigned int nloaded = 0, i, size[2]; + struct module_sect_attrs *sect_attrs; + struct module_sect_attr *sattr; + struct bin_attribute **gattr; + + /* Count loaded sections and allocate structures */ + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i])) + nloaded++; + size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), + sizeof(sect_attrs->grp.bin_attrs[0])); + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); + sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); + if (!sect_attrs) + return; + + /* Setup section attributes. */ + sect_attrs->grp.name = "sections"; + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; + + sect_attrs->nsections = 0; + sattr = §_attrs->attrs[0]; + gattr = §_attrs->grp.bin_attrs[0]; + for (i = 0; i < info->hdr->e_shnum; i++) { + Elf_Shdr *sec = &info->sechdrs[i]; + + if (sect_empty(sec)) + continue; + sysfs_bin_attr_init(&sattr->battr); + sattr->address = sec->sh_addr; + sattr->battr.attr.name = + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); + if (!sattr->battr.attr.name) + goto out; + sect_attrs->nsections++; + sattr->battr.read = module_sect_read; + sattr->battr.size = MODULE_SECT_READ_SIZE; + sattr->battr.attr.mode = 0400; + *(gattr++) = &(sattr++)->battr; + } + *gattr = NULL; + + if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) + goto out; + + mod->sect_attrs = sect_attrs; + return; +out: + free_sect_attrs(sect_attrs); +} + +static void remove_sect_attrs(struct module *mod) +{ + if (mod->sect_attrs) { + sysfs_remove_group(&mod->mkobj.kobj, + &mod->sect_attrs->grp); + /* + * We are positive that no one is using any sect attrs + * at this point. Deallocate immediately. + */ + free_sect_attrs(mod->sect_attrs); + mod->sect_attrs = NULL; + } +} + +/* + * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. + */ + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[]; +}; + +static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) +{ + /* + * The caller checked the pos and count against our size. + */ + memcpy(buf, bin_attr->private + pos, count); + return count; +} + +static void free_notes_attrs(struct module_notes_attrs *notes_attrs, + unsigned int i) +{ + if (notes_attrs->dir) { + while (i-- > 0) + sysfs_remove_bin_file(notes_attrs->dir, + ¬es_attrs->attrs[i]); + kobject_put(notes_attrs->dir); + } + kfree(notes_attrs); +} + +static void add_notes_attrs(struct module *mod, const struct load_info *info) +{ + unsigned int notes, loaded, i; + struct module_notes_attrs *notes_attrs; + struct bin_attribute *nattr; + + /* failed to create section attributes, so can't create notes */ + if (!mod->sect_attrs) + return; + + /* Count notes sections and allocate structures. */ + notes = 0; + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i]) && + info->sechdrs[i].sh_type == SHT_NOTE) + ++notes; + + if (notes == 0) + return; + + notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), + GFP_KERNEL); + if (!notes_attrs) + return; + + notes_attrs->notes = notes; + nattr = ¬es_attrs->attrs[0]; + for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { + if (sect_empty(&info->sechdrs[i])) + continue; + if (info->sechdrs[i].sh_type == SHT_NOTE) { + sysfs_bin_attr_init(nattr); + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; + nattr->attr.mode = 0444; + nattr->size = info->sechdrs[i].sh_size; + nattr->private = (void *)info->sechdrs[i].sh_addr; + nattr->read = module_notes_read; + ++nattr; + } + ++loaded; + } + + notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); + if (!notes_attrs->dir) + goto out; + + for (i = 0; i < notes; ++i) + if (sysfs_create_bin_file(notes_attrs->dir, + ¬es_attrs->attrs[i])) + goto out; + + mod->notes_attrs = notes_attrs; + return; + +out: + free_notes_attrs(notes_attrs, i); +} + +static void remove_notes_attrs(struct module *mod) +{ + if (mod->notes_attrs) + free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); +} + +#else /* !CONFIG_KALLSYMS */ +static inline void add_sect_attrs(struct module *mod, const struct load_info *info) { } +static inline void remove_sect_attrs(struct module *mod) { } +static inline void add_notes_attrs(struct module *mod, const struct load_info *info) { } +static inline void remove_notes_attrs(struct module *mod) { } +#endif /* CONFIG_KALLSYMS */ + +static void del_usage_links(struct module *mod) +{ +#ifdef CONFIG_MODULE_UNLOAD + struct module_use *use; + + mutex_lock(&module_mutex); + list_for_each_entry(use, &mod->target_list, target_list) + sysfs_remove_link(use->target->holders_dir, mod->name); + mutex_unlock(&module_mutex); +#endif +} + +static int add_usage_links(struct module *mod) +{ + int ret = 0; +#ifdef CONFIG_MODULE_UNLOAD + struct module_use *use; + + mutex_lock(&module_mutex); + list_for_each_entry(use, &mod->target_list, target_list) { + ret = sysfs_create_link(use->target->holders_dir, + &mod->mkobj.kobj, mod->name); + if (ret) + break; + } + mutex_unlock(&module_mutex); + if (ret) + del_usage_links(mod); +#endif + return ret; +} + +static void module_remove_modinfo_attrs(struct module *mod, int end) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { + if (end >= 0 && i > end) + break; + /* pick a field to test for end of list */ + if (!attr->attr.name) + break; + sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); + if (attr->free) + attr->free(mod); + } + kfree(mod->modinfo_attrs); +} + +static int module_add_modinfo_attrs(struct module *mod) +{ + struct module_attribute *attr; + struct module_attribute *temp_attr; + int error = 0; + int i; + + mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * + (modinfo_attrs_count + 1)), + GFP_KERNEL); + if (!mod->modinfo_attrs) + return -ENOMEM; + + temp_attr = mod->modinfo_attrs; + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (!attr->test || attr->test(mod)) { + memcpy(temp_attr, attr, sizeof(*temp_attr)); + sysfs_attr_init(&temp_attr->attr); + error = sysfs_create_file(&mod->mkobj.kobj, + &temp_attr->attr); + if (error) + goto error_out; + ++temp_attr; + } + } + + return 0; + +error_out: + if (i > 0) + module_remove_modinfo_attrs(mod, --i); + else + kfree(mod->modinfo_attrs); + return error; +} + +static void mod_kobject_put(struct module *mod) +{ + DECLARE_COMPLETION_ONSTACK(c); + + mod->mkobj.kobj_completion = &c; + kobject_put(&mod->mkobj.kobj); + wait_for_completion(&c); +} + +static int mod_sysfs_init(struct module *mod) +{ + int err; + struct kobject *kobj; + + if (!module_sysfs_initialized) { + pr_err("%s: module sysfs not initialized\n", mod->name); + err = -EINVAL; + goto out; + } + + kobj = kset_find_obj(module_kset, mod->name); + if (kobj) { + pr_err("%s: module is already loaded\n", mod->name); + kobject_put(kobj); + err = -EINVAL; + goto out; + } + + mod->mkobj.mod = mod; + + memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); + mod->mkobj.kobj.kset = module_kset; + err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, + "%s", mod->name); + if (err) + mod_kobject_put(mod); + +out: + return err; +} + +int mod_sysfs_setup(struct module *mod, + const struct load_info *info, + struct kernel_param *kparam, + unsigned int num_params) +{ + int err; + + err = mod_sysfs_init(mod); + if (err) + goto out; + + mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); + if (!mod->holders_dir) { + err = -ENOMEM; + goto out_unreg; + } + + err = module_param_sysfs_setup(mod, kparam, num_params); + if (err) + goto out_unreg_holders; + + err = module_add_modinfo_attrs(mod); + if (err) + goto out_unreg_param; + + err = add_usage_links(mod); + if (err) + goto out_unreg_modinfo_attrs; + + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); + + return 0; + +out_unreg_modinfo_attrs: + module_remove_modinfo_attrs(mod, -1); +out_unreg_param: + module_param_sysfs_remove(mod); +out_unreg_holders: + kobject_put(mod->holders_dir); +out_unreg: + mod_kobject_put(mod); +out: + return err; +} + +static void mod_sysfs_fini(struct module *mod) +{ + remove_notes_attrs(mod); + remove_sect_attrs(mod); + mod_kobject_put(mod); +} + +void mod_sysfs_teardown(struct module *mod) +{ + del_usage_links(mod); + module_remove_modinfo_attrs(mod, -1); + module_param_sysfs_remove(mod); + kobject_put(mod->mkobj.drivers_dir); + kobject_put(mod->holders_dir); + mod_sysfs_fini(mod); +} + +void init_param_lock(struct module *mod) +{ + mutex_init(&mod->param_lock); +} -- cgit v1.2.3-59-g8ed1b From f64205a42046d3802c423fa2059e7fca39af127c Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:43 +0000 Subject: module: Move kdb module related code out of main kdb code No functional change. This patch migrates the kdb 'lsmod' command support out of main kdb code into its own file under kernel/module. In addition to the above, a minor style warning i.e. missing a blank line after declarations, was resolved too. The new file was added to MAINTAINERS. Finally we remove linux/module.h as it is entirely redundant. Reviewed-by: Daniel Thompson Acked-by: Daniel Thompson Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- MAINTAINERS | 1 + include/linux/kdb.h | 1 + kernel/debug/kdb/kdb_io.c | 1 - kernel/debug/kdb/kdb_keyboard.c | 1 - kernel/debug/kdb/kdb_main.c | 49 ------------------------------------ kernel/debug/kdb/kdb_private.h | 4 --- kernel/debug/kdb/kdb_support.c | 1 - kernel/module/Makefile | 1 + kernel/module/kdb.c | 56 +++++++++++++++++++++++++++++++++++++++++ kernel/module/main.c | 4 --- 10 files changed, 59 insertions(+), 60 deletions(-) create mode 100644 kernel/module/kdb.c (limited to 'kernel/module') diff --git a/MAINTAINERS b/MAINTAINERS index 6dcd93fb3a96..87cc05c6b462 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10907,6 +10907,7 @@ F: drivers/tty/serial/kgdboc.c F: include/linux/kdb.h F: include/linux/kgdb.h F: kernel/debug/ +F: kernel/module/kdb.c KHADAS MCU MFD DRIVER M: Neil Armstrong diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ea0f5e580fac..07dfb6a20a1c 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -222,5 +222,6 @@ enum { extern int kdbgetintenv(const char *, int *); extern int kdb_set(int, const char **); +int kdb_lsmod(int argc, const char **argv); #endif /* !_KDB_H */ diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 6735ac36b718..67d3c48a1522 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -9,7 +9,6 @@ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ -#include #include #include #include diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c index f877a0a0d7cf..f87c750d3eb3 100644 --- a/kernel/debug/kdb/kdb_keyboard.c +++ b/kernel/debug/kdb/kdb_keyboard.c @@ -11,7 +11,6 @@ #include #include #include -#include #include /* Keyboard Controller Registers on normal PCs. */ diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 0852a537dad4..f3a30cd5037f 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -2004,54 +2003,6 @@ static int kdb_ef(int argc, const char **argv) return 0; } -#if defined(CONFIG_MODULES) -/* - * kdb_lsmod - This function implements the 'lsmod' command. Lists - * currently loaded kernel modules. - * Mostly taken from userland lsmod. - */ -static int kdb_lsmod(int argc, const char **argv) -{ - struct module *mod; - - if (argc != 0) - return KDB_ARGCOUNT; - - kdb_printf("Module Size modstruct Used by\n"); - list_for_each_entry(mod, kdb_modules, list) { - if (mod->state == MODULE_STATE_UNFORMED) - continue; - - kdb_printf("%-20s%8u 0x%px ", mod->name, - mod->core_layout.size, (void *)mod); -#ifdef CONFIG_MODULE_UNLOAD - kdb_printf("%4d ", module_refcount(mod)); -#endif - if (mod->state == MODULE_STATE_GOING) - kdb_printf(" (Unloading)"); - else if (mod->state == MODULE_STATE_COMING) - kdb_printf(" (Loading)"); - else - kdb_printf(" (Live)"); - kdb_printf(" 0x%px", mod->core_layout.base); - -#ifdef CONFIG_MODULE_UNLOAD - { - struct module_use *use; - kdb_printf(" [ "); - list_for_each_entry(use, &mod->source_list, - source_list) - kdb_printf("%s ", use->target->name); - kdb_printf("]\n"); - } -#endif - } - - return 0; -} - -#endif /* CONFIG_MODULES */ - /* * kdb_env - This function implements the 'env' command. Display the * current environment variables. diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 0d2f9feea0a4..1f8c519a5f81 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -226,10 +226,6 @@ extern void kdb_kbd_cleanup_state(void); #define kdb_kbd_cleanup_state() #endif /* ! CONFIG_KDB_KEYBOARD */ -#ifdef CONFIG_MODULES -extern struct list_head *kdb_modules; -#endif /* CONFIG_MODULES */ - extern char kdb_prompt_str[]; #define KDB_WORD_SIZE ((int)sizeof(unsigned long)) diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 85cb51c4a17e..0a39497140bf 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/module/Makefile b/kernel/module/Makefile index cf8dcdc6b55f..88f5cdcdb067 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_PROC_FS) += procfs.o obj-$(CONFIG_SYSFS) += sysfs.o +obj-$(CONFIG_KGDB_KDB) += kdb.o diff --git a/kernel/module/kdb.c b/kernel/module/kdb.c new file mode 100644 index 000000000000..a446c699db0a --- /dev/null +++ b/kernel/module/kdb.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module kdb support + * + * Copyright (C) 2010 Jason Wessel + */ + +#include +#include +#include "internal.h" + +/* + * kdb_lsmod - This function implements the 'lsmod' command. Lists + * currently loaded kernel modules. + * Mostly taken from userland lsmod. + */ +int kdb_lsmod(int argc, const char **argv) +{ + struct module *mod; + + if (argc != 0) + return KDB_ARGCOUNT; + + kdb_printf("Module Size modstruct Used by\n"); + list_for_each_entry(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; + + kdb_printf("%-20s%8u 0x%px ", mod->name, + mod->core_layout.size, (void *)mod); +#ifdef CONFIG_MODULE_UNLOAD + kdb_printf("%4d ", module_refcount(mod)); +#endif + if (mod->state == MODULE_STATE_GOING) + kdb_printf(" (Unloading)"); + else if (mod->state == MODULE_STATE_COMING) + kdb_printf(" (Loading)"); + else + kdb_printf(" (Live)"); + kdb_printf(" 0x%px", mod->core_layout.base); + +#ifdef CONFIG_MODULE_UNLOAD + { + struct module_use *use; + + kdb_printf(" [ "); + list_for_each_entry(use, &mod->source_list, + source_list) + kdb_printf("%s ", use->target->name); + kdb_printf("]\n"); + } +#endif + } + + return 0; +} diff --git a/kernel/module/main.c b/kernel/module/main.c index 0cd0590dd411..a2dc54726621 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -108,10 +108,6 @@ static void mod_update_bounds(struct module *mod) __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); } -#ifdef CONFIG_KGDB_KDB -struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ -#endif /* CONFIG_KGDB_KDB */ - static void module_assert_mutex_or_preempt(void) { #ifdef CONFIG_LOCKDEP -- cgit v1.2.3-59-g8ed1b From 47889798da4307ed78346f04c5d95c87abbf696b Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Tue, 22 Mar 2022 14:03:44 +0000 Subject: module: Move version support into a separate file No functional change. This patch migrates module version support out of core code into kernel/module/version.c. In addition simple code refactoring to make this possible. Reviewed-by: Christophe Leroy Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 1 + kernel/module/internal.h | 48 +++++++++++++++ kernel/module/main.c | 156 +++-------------------------------------------- kernel/module/version.c | 109 +++++++++++++++++++++++++++++++++ 4 files changed, 166 insertions(+), 148 deletions(-) create mode 100644 kernel/module/version.c (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index 88f5cdcdb067..e2eff9853a28 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_PROC_FS) += procfs.o obj-$(CONFIG_SYSFS) += sysfs.o obj-$(CONFIG_KGDB_KDB) += kdb.o +obj-$(CONFIG_MODVERSIONS) += version.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 62d749ef695e..3fc139d5074b 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -70,7 +70,27 @@ struct load_info { } index; }; +enum mod_license { + NOT_GPL_ONLY, + GPL_ONLY, +}; + +struct find_symbol_arg { + /* Input */ + const char *name; + bool gplok; + bool warn; + + /* Output */ + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + int mod_verify_sig(const void *mod, struct load_info *info); +int try_to_force_load(struct module *mod, const char *reason); +bool find_symbol(struct find_symbol_arg *fsa); struct module *find_module_all(const char *name, size_t len, bool even_unformed); int cmp_name(const void *name, const void *sym); long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, @@ -225,3 +245,31 @@ static inline int mod_sysfs_setup(struct module *mod, static inline void mod_sysfs_teardown(struct module *mod) { } static inline void init_param_lock(struct module *mod) { } #endif /* CONFIG_SYSFS */ + +#ifdef CONFIG_MODVERSIONS +int check_version(const struct load_info *info, + const char *symname, struct module *mod, const s32 *crc); +void module_layout(struct module *mod, struct modversion_info *ver, struct kernel_param *kp, + struct kernel_symbol *ks, struct tracepoint * const *tp); +int check_modstruct_version(const struct load_info *info, struct module *mod); +int same_magic(const char *amagic, const char *bmagic, bool has_crcs); +#else /* !CONFIG_MODVERSIONS */ +static inline int check_version(const struct load_info *info, + const char *symname, + struct module *mod, + const s32 *crc) +{ + return 1; +} + +static inline int check_modstruct_version(const struct load_info *info, + struct module *mod) +{ + return 1; +} + +static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) +{ + return strcmp(amagic, bmagic) == 0; +} +#endif /* CONFIG_MODVERSIONS */ diff --git a/kernel/module/main.c b/kernel/module/main.c index a2dc54726621..5f06c06df9e1 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -86,6 +86,12 @@ struct mod_tree_root mod_tree __cacheline_aligned = { static unsigned long module_addr_min = -1UL, module_addr_max; #endif /* CONFIG_MODULES_TREE_LOOKUP */ +struct symsearch { + const struct kernel_symbol *start, *stop; + const s32 *crcs; + enum mod_license license; +}; + /* * Bounds of module text, for speeding up __module_address. * Protected by module_mutex. @@ -244,28 +250,6 @@ static __maybe_unused void *any_section_objs(const struct load_info *info, #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif -struct symsearch { - const struct kernel_symbol *start, *stop; - const s32 *crcs; - enum mod_license { - NOT_GPL_ONLY, - GPL_ONLY, - } license; -}; - -struct find_symbol_arg { - /* Input */ - const char *name; - bool gplok; - bool warn; - - /* Output */ - struct module *owner; - const s32 *crc; - const struct kernel_symbol *sym; - enum mod_license license; -}; - static bool check_exported_symbol(const struct symsearch *syms, struct module *owner, unsigned int symnum, void *data) @@ -327,7 +311,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, * Find an exported symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ -static bool find_symbol(struct find_symbol_arg *fsa) +bool find_symbol(struct find_symbol_arg *fsa) { static const struct symsearch arr[] = { { __start___ksymtab, __stop___ksymtab, __start___kcrctab, @@ -1001,7 +985,7 @@ size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); static const char vermagic[] = VERMAGIC_STRING; -static int try_to_force_load(struct module *mod, const char *reason) +int try_to_force_load(struct module *mod, const char *reason) { #ifdef CONFIG_MODULE_FORCE_LOAD if (!test_taint(TAINT_FORCED_MODULE)) @@ -1013,115 +997,6 @@ static int try_to_force_load(struct module *mod, const char *reason) #endif } -#ifdef CONFIG_MODVERSIONS - -static u32 resolve_rel_crc(const s32 *crc) -{ - return *(u32 *)((void *)crc + *crc); -} - -static int check_version(const struct load_info *info, - const char *symname, - struct module *mod, - const s32 *crc) -{ - Elf_Shdr *sechdrs = info->sechdrs; - unsigned int versindex = info->index.vers; - unsigned int i, num_versions; - struct modversion_info *versions; - - /* Exporting module didn't supply crcs? OK, we're already tainted. */ - if (!crc) - return 1; - - /* No versions at all? modprobe --force does this. */ - if (versindex == 0) - return try_to_force_load(mod, symname) == 0; - - versions = (void *) sechdrs[versindex].sh_addr; - num_versions = sechdrs[versindex].sh_size - / sizeof(struct modversion_info); - - for (i = 0; i < num_versions; i++) { - u32 crcval; - - if (strcmp(versions[i].name, symname) != 0) - continue; - - if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) - crcval = resolve_rel_crc(crc); - else - crcval = *crc; - if (versions[i].crc == crcval) - return 1; - pr_debug("Found checksum %X vs module %lX\n", - crcval, versions[i].crc); - goto bad_version; - } - - /* Broken toolchain. Warn once, then let it go.. */ - pr_warn_once("%s: no symbol version for %s\n", info->name, symname); - return 1; - -bad_version: - pr_warn("%s: disagrees about version of symbol %s\n", - info->name, symname); - return 0; -} - -static inline int check_modstruct_version(const struct load_info *info, - struct module *mod) -{ - struct find_symbol_arg fsa = { - .name = "module_layout", - .gplok = true, - }; - - /* - * Since this should be found in kernel (which can't be removed), no - * locking is necessary -- use preempt_disable() to placate lockdep. - */ - preempt_disable(); - if (!find_symbol(&fsa)) { - preempt_enable(); - BUG(); - } - preempt_enable(); - return check_version(info, "module_layout", mod, fsa.crc); -} - -/* First part is kernel version, which we ignore if module has crcs. */ -static inline int same_magic(const char *amagic, const char *bmagic, - bool has_crcs) -{ - if (has_crcs) { - amagic += strcspn(amagic, " "); - bmagic += strcspn(bmagic, " "); - } - return strcmp(amagic, bmagic) == 0; -} -#else -static inline int check_version(const struct load_info *info, - const char *symname, - struct module *mod, - const s32 *crc) -{ - return 1; -} - -static inline int check_modstruct_version(const struct load_info *info, - struct module *mod) -{ - return 1; -} - -static inline int same_magic(const char *amagic, const char *bmagic, - bool has_crcs) -{ - return strcmp(amagic, bmagic) == 0; -} -#endif /* CONFIG_MODVERSIONS */ - static char *get_modinfo(const struct load_info *info, const char *tag); static char *get_next_modinfo(const struct load_info *info, const char *tag, char *prev); @@ -3247,18 +3122,3 @@ void print_modules(void) pr_cont(" [last unloaded: %s]", last_unloaded_module); pr_cont("\n"); } - -#ifdef CONFIG_MODVERSIONS -/* - * Generate the signature for all relevant module structures here. - * If these change, we don't want to try to parse the module. - */ -void module_layout(struct module *mod, - struct modversion_info *ver, - struct kernel_param *kp, - struct kernel_symbol *ks, - struct tracepoint * const *tp) -{ -} -EXPORT_SYMBOL(module_layout); -#endif diff --git a/kernel/module/version.c b/kernel/module/version.c new file mode 100644 index 000000000000..adaedce1dc97 --- /dev/null +++ b/kernel/module/version.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module version support + * + * Copyright (C) 2008 Rusty Russell + */ + +#include +#include +#include +#include "internal.h" + +static u32 resolve_rel_crc(const s32 *crc) +{ + return *(u32 *)((void *)crc + *crc); +} + +int check_version(const struct load_info *info, + const char *symname, + struct module *mod, + const s32 *crc) +{ + Elf_Shdr *sechdrs = info->sechdrs; + unsigned int versindex = info->index.vers; + unsigned int i, num_versions; + struct modversion_info *versions; + + /* Exporting module didn't supply crcs? OK, we're already tainted. */ + if (!crc) + return 1; + + /* No versions at all? modprobe --force does this. */ + if (versindex == 0) + return try_to_force_load(mod, symname) == 0; + + versions = (void *)sechdrs[versindex].sh_addr; + num_versions = sechdrs[versindex].sh_size + / sizeof(struct modversion_info); + + for (i = 0; i < num_versions; i++) { + u32 crcval; + + if (strcmp(versions[i].name, symname) != 0) + continue; + + if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) + crcval = resolve_rel_crc(crc); + else + crcval = *crc; + if (versions[i].crc == crcval) + return 1; + pr_debug("Found checksum %X vs module %lX\n", + crcval, versions[i].crc); + goto bad_version; + } + + /* Broken toolchain. Warn once, then let it go.. */ + pr_warn_once("%s: no symbol version for %s\n", info->name, symname); + return 1; + +bad_version: + pr_warn("%s: disagrees about version of symbol %s\n", info->name, symname); + return 0; +} + +int check_modstruct_version(const struct load_info *info, + struct module *mod) +{ + struct find_symbol_arg fsa = { + .name = "module_layout", + .gplok = true, + }; + + /* + * Since this should be found in kernel (which can't be removed), no + * locking is necessary -- use preempt_disable() to placate lockdep. + */ + preempt_disable(); + if (!find_symbol(&fsa)) { + preempt_enable(); + BUG(); + } + preempt_enable(); + return check_version(info, "module_layout", mod, fsa.crc); +} + +/* First part is kernel version, which we ignore if module has crcs. */ +int same_magic(const char *amagic, const char *bmagic, + bool has_crcs) +{ + if (has_crcs) { + amagic += strcspn(amagic, " "); + bmagic += strcspn(bmagic, " "); + } + return strcmp(amagic, bmagic) == 0; +} + +/* + * Generate the signature for all relevant module structures here. + * If these change, we don't want to try to parse the module. + */ +void module_layout(struct module *mod, + struct modversion_info *ver, + struct kernel_param *kp, + struct kernel_symbol *ks, + struct tracepoint * const *tp) +{ +} +EXPORT_SYMBOL(module_layout); -- cgit v1.2.3-59-g8ed1b From 0597579356fe5b6c0b99196e4743d4c2978f654a Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 10:00:58 +0100 Subject: module: Make module_enable_x() independent of CONFIG_ARCH_HAS_STRICT_MODULE_RWX module_enable_x() has nothing to do with CONFIG_ARCH_HAS_STRICT_MODULE_RWX allthough by coincidence architectures who need module_enable_x() are selection CONFIG_ARCH_HAS_STRICT_MODULE_RWX. Enable module_enable_x() for everyone everytime. If an architecture already has module text set executable, it's a no-op. Don't check text_size alignment. When CONFIG_STRICT_MODULE_RWX is set the verification is already done in frob_rodata(). When CONFIG_STRICT_MODULE_RWX is not set it is not a big deal to have the start of data as executable. Just make sure we entirely get the last page when the boundary is not aligned. And don't BUG on misaligned base as some architectures like nios2 use kmalloc() for allocating modules. So just bail out in that case. If that's a problem, a page fault will occur later anyway. Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 6 ++---- kernel/module/main.c | 12 +++++------- 2 files changed, 7 insertions(+), 11 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 3fc139d5074b..972bc811dcd2 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -23,9 +23,9 @@ /* * Modules' sections will be aligned on page boundaries * to ensure complete separation of code and data, but - * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y + * only when CONFIG_STRICT_MODULE_RWX=y */ -#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX +#ifdef CONFIG_STRICT_MODULE_RWX # define debug_align(X) PAGE_ALIGN(X) #else # define debug_align(X) (X) @@ -175,10 +175,8 @@ static inline struct module *mod_find(unsigned long addr) } #endif /* CONFIG_MODULES_TREE_LOOKUP */ -#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX void frob_text(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)); -#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ #ifdef CONFIG_STRICT_MODULE_RWX void module_enable_ro(const struct module *mod, bool after_init); diff --git a/kernel/module/main.c b/kernel/module/main.c index 5f06c06df9e1..2bf8c617a901 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1144,24 +1144,22 @@ resolve_symbol_wait(struct module *mod, * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of * whether we are strict. */ -#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX void frob_text(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { - BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); - BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); set_memory((unsigned long)layout->base, - layout->text_size >> PAGE_SHIFT); + PAGE_ALIGN(layout->text_size) >> PAGE_SHIFT); } static void module_enable_x(const struct module *mod) { + if (!PAGE_ALIGNED(mod->core_layout.base) || + !PAGE_ALIGNED(mod->init_layout.base)) + return; + frob_text(&mod->core_layout, set_memory_x); frob_text(&mod->init_layout, set_memory_x); } -#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ -static void module_enable_x(const struct module *mod) { } -#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ void __weak module_memfree(void *module_region) { -- cgit v1.2.3-59-g8ed1b From 32a08c17d8096f0fd2c6600bc5fe8464aaf68ea7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 10:00:59 +0100 Subject: module: Move module_enable_x() and frob_text() in strict_rwx.c Move module_enable_x() together with module_enable_nx() and module_enable_ro(). Those three functions are going together, they are all used to set up the correct page flags on the different sections. As module_enable_x() is used independently of CONFIG_STRICT_MODULE_RWX, build strict_rwx.c all the time and use IS_ENABLED(CONFIG_STRICT_MODULE_RWX) when relevant. Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/Makefile | 3 +-- kernel/module/internal.h | 15 +-------------- kernel/module/main.c | 37 ----------------------------------- kernel/module/strict_rwx.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 53 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/Makefile b/kernel/module/Makefile index e2eff9853a28..d1ca799c12e2 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -7,12 +7,11 @@ # and produce insane amounts of uninteresting coverage. KCOV_INSTRUMENT_module.o := n -obj-y += main.o +obj-y += main.o strict_rwx.o obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o obj-$(CONFIG_MODULE_SIG) += signing.o obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o -obj-$(CONFIG_STRICT_MODULE_RWX) += strict_rwx.o obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_PROC_FS) += procfs.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 972bc811dcd2..c59473b232df 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -175,25 +175,12 @@ static inline struct module *mod_find(unsigned long addr) } #endif /* CONFIG_MODULES_TREE_LOOKUP */ -void frob_text(const struct module_layout *layout, int (*set_memory)(unsigned long start, - int num_pages)); - -#ifdef CONFIG_STRICT_MODULE_RWX void module_enable_ro(const struct module *mod, bool after_init); void module_enable_nx(const struct module *mod); +void module_enable_x(const struct module *mod); int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod); -#else /* !CONFIG_STRICT_MODULE_RWX */ -static inline void module_enable_nx(const struct module *mod) { } -static inline void module_enable_ro(const struct module *mod, bool after_init) {} -static inline int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *mod) -{ - return 0; -} -#endif /* CONFIG_STRICT_MODULE_RWX */ - #ifdef CONFIG_MODULE_SIG int module_sig_check(struct load_info *info, int flags); #else /* !CONFIG_MODULE_SIG */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 2bf8c617a901..7175b44ba88a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1124,43 +1124,6 @@ resolve_symbol_wait(struct module *mod, return ksym; } -/* - * LKM RO/NX protection: protect module's text/ro-data - * from modification and any data from execution. - * - * General layout of module is: - * [text] [read-only-data] [ro-after-init] [writable data] - * text_size -----^ ^ ^ ^ - * ro_size ------------------------| | | - * ro_after_init_size -----------------------------| | - * size -----------------------------------------------------------| - * - * These values are always page-aligned (as is base) - */ - -/* - * Since some arches are moving towards PAGE_KERNEL module allocations instead - * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the - * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of - * whether we are strict. - */ -void frob_text(const struct module_layout *layout, - int (*set_memory)(unsigned long start, int num_pages)) -{ - set_memory((unsigned long)layout->base, - PAGE_ALIGN(layout->text_size) >> PAGE_SHIFT); -} - -static void module_enable_x(const struct module *mod) -{ - if (!PAGE_ALIGNED(mod->core_layout.base) || - !PAGE_ALIGNED(mod->init_layout.base)) - return; - - frob_text(&mod->core_layout, set_memory_x); - frob_text(&mod->init_layout, set_memory_x); -} - void __weak module_memfree(void *module_region) { /* diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c index 7949dfd449c2..43332b4416b0 100644 --- a/kernel/module/strict_rwx.c +++ b/kernel/module/strict_rwx.c @@ -11,6 +11,34 @@ #include #include "internal.h" +/* + * LKM RO/NX protection: protect module's text/ro-data + * from modification and any data from execution. + * + * General layout of module is: + * [text] [read-only-data] [ro-after-init] [writable data] + * text_size -----^ ^ ^ ^ + * ro_size ------------------------| | | + * ro_after_init_size -----------------------------| | + * size -----------------------------------------------------------| + * + * These values are always page-aligned (as is base) when + * CONFIG_STRICT_MODULE_RWX is set. + */ + +/* + * Since some arches are moving towards PAGE_KERNEL module allocations instead + * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() independent of + * CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we + * are strict. + */ +static void frob_text(const struct module_layout *layout, + int (*set_memory)(unsigned long start, int num_pages)) +{ + set_memory((unsigned long)layout->base, + PAGE_ALIGN(layout->text_size) >> PAGE_SHIFT); +} + static void frob_rodata(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { @@ -41,10 +69,24 @@ static void frob_writable_data(const struct module_layout *layout, (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); } +void module_enable_x(const struct module *mod) +{ + if (!PAGE_ALIGNED(mod->core_layout.base) || + !PAGE_ALIGNED(mod->init_layout.base)) + return; + + frob_text(&mod->core_layout, set_memory_x); + frob_text(&mod->init_layout, set_memory_x); +} + void module_enable_ro(const struct module *mod, bool after_init) { + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return; +#ifdef CONFIG_STRICT_MODULE_RWX if (!rodata_enabled) return; +#endif set_vm_flush_reset_perms(mod->core_layout.base); set_vm_flush_reset_perms(mod->init_layout.base); @@ -60,6 +102,9 @@ void module_enable_ro(const struct module *mod, bool after_init) void module_enable_nx(const struct module *mod) { + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return; + frob_rodata(&mod->core_layout, set_memory_nx); frob_ro_after_init(&mod->core_layout, set_memory_nx); frob_writable_data(&mod->core_layout, set_memory_nx); @@ -73,6 +118,9 @@ int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const unsigned long shf_wx = SHF_WRITE | SHF_EXECINSTR; int i; + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return 0; + for (i = 0; i < hdr->e_shnum; i++) { if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", -- cgit v1.2.3-59-g8ed1b From ef505058dc5524488a84423b4d5e8a7598a23a2e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 10:01:00 +0100 Subject: module: Rework layout alignment to avoid BUG_ON()s Perform layout alignment verification up front and WARN_ON() and fail module loading instead of crashing the machine. Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 1 + kernel/module/main.c | 5 +++++ kernel/module/strict_rwx.c | 27 ++++++++++++++++++--------- 3 files changed, 24 insertions(+), 9 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index c59473b232df..e94defbeda00 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -180,6 +180,7 @@ void module_enable_nx(const struct module *mod); void module_enable_x(const struct module *mod); int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod); +bool module_check_misalignment(const struct module *mod); #ifdef CONFIG_MODULE_SIG int module_sig_check(struct load_info *info, int flags); diff --git a/kernel/module/main.c b/kernel/module/main.c index 7175b44ba88a..55e710bc7f46 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2550,6 +2550,9 @@ static int complete_formation(struct module *mod, struct load_info *info) /* This relies on module_mutex for list integrity. */ module_bug_finalize(info->hdr, info->sechdrs, mod); + if (module_check_misalignment(mod)) + goto out_misaligned; + module_enable_ro(mod, false); module_enable_nx(mod); module_enable_x(mod); @@ -2563,6 +2566,8 @@ static int complete_formation(struct module *mod, struct load_info *info) return 0; +out_misaligned: + err = -EINVAL; out: mutex_unlock(&module_mutex); return err; diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c index 43332b4416b0..f36ea54c1dac 100644 --- a/kernel/module/strict_rwx.c +++ b/kernel/module/strict_rwx.c @@ -42,9 +42,6 @@ static void frob_text(const struct module_layout *layout, static void frob_rodata(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { - BUG_ON(!PAGE_ALIGNED(layout->base)); - BUG_ON(!PAGE_ALIGNED(layout->text_size)); - BUG_ON(!PAGE_ALIGNED(layout->ro_size)); set_memory((unsigned long)layout->base + layout->text_size, (layout->ro_size - layout->text_size) >> PAGE_SHIFT); } @@ -52,9 +49,6 @@ static void frob_rodata(const struct module_layout *layout, static void frob_ro_after_init(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { - BUG_ON(!PAGE_ALIGNED(layout->base)); - BUG_ON(!PAGE_ALIGNED(layout->ro_size)); - BUG_ON(!PAGE_ALIGNED(layout->ro_after_init_size)); set_memory((unsigned long)layout->base + layout->ro_size, (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); } @@ -62,13 +56,28 @@ static void frob_ro_after_init(const struct module_layout *layout, static void frob_writable_data(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { - BUG_ON(!PAGE_ALIGNED(layout->base)); - BUG_ON(!PAGE_ALIGNED(layout->ro_after_init_size)); - BUG_ON(!PAGE_ALIGNED(layout->size)); set_memory((unsigned long)layout->base + layout->ro_after_init_size, (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); } +static bool layout_check_misalignment(const struct module_layout *layout) +{ + return WARN_ON(!PAGE_ALIGNED(layout->base)) || + WARN_ON(!PAGE_ALIGNED(layout->text_size)) || + WARN_ON(!PAGE_ALIGNED(layout->ro_size)) || + WARN_ON(!PAGE_ALIGNED(layout->ro_after_init_size)) || + WARN_ON(!PAGE_ALIGNED(layout->size)); +} + +bool module_check_misalignment(const struct module *mod) +{ + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return false; + + return layout_check_misalignment(&mod->core_layout) || + layout_check_misalignment(&mod->init_layout); +} + void module_enable_x(const struct module *mod) { if (!PAGE_ALIGNED(mod->core_layout.base) || -- cgit v1.2.3-59-g8ed1b From 7337f929d5672e37a80c8582d357f084320f475f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 10:01:01 +0100 Subject: module: Rename debug_align() as strict_align() debug_align() was added by commit 84e1c6bb38eb ("x86: Add RO/NX protection for loadable kernel modules") At that time the config item was CONFIG_DEBUG_SET_MODULE_RONX. But nowadays it has changed to CONFIG_STRICT_MODULE_RWX and debug_align() is confusing because it has nothing to do with DEBUG. Rename it strict_align() Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 4 ++-- kernel/module/kallsyms.c | 4 ++-- kernel/module/main.c | 14 +++++++------- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index e94defbeda00..cbc268af23ae 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -26,9 +26,9 @@ * only when CONFIG_STRICT_MODULE_RWX=y */ #ifdef CONFIG_STRICT_MODULE_RWX -# define debug_align(X) PAGE_ALIGN(X) +# define strict_align(X) PAGE_ALIGN(X) #else -# define debug_align(X) (X) +# define strict_align(X) (X) #endif extern struct mutex module_mutex; diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index a3da0686a2a6..438492e103c8 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -139,7 +139,7 @@ void layout_symtab(struct module *mod, struct load_info *info) mod->core_layout.size += strtab_size; info->core_typeoffs = mod->core_layout.size; mod->core_layout.size += ndst * sizeof(char); - mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.size = strict_align(mod->core_layout.size); /* Put string table section at end of init part of module. */ strsect->sh_flags |= SHF_ALLOC; @@ -154,7 +154,7 @@ void layout_symtab(struct module *mod, struct load_info *info) mod->init_layout.size += sizeof(struct mod_kallsyms); info->init_typeoffs = mod->init_layout.size; mod->init_layout.size += nsrc * sizeof(char); - mod->init_layout.size = debug_align(mod->init_layout.size); + mod->init_layout.size = strict_align(mod->init_layout.size); } /* diff --git a/kernel/module/main.c b/kernel/module/main.c index 55e710bc7f46..e6a72c3651f6 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1447,19 +1447,19 @@ static void layout_sections(struct module *mod, struct load_info *info) } switch (m) { case 0: /* executable */ - mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.size = strict_align(mod->core_layout.size); mod->core_layout.text_size = mod->core_layout.size; break; case 1: /* RO: text and ro-data */ - mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.size = strict_align(mod->core_layout.size); mod->core_layout.ro_size = mod->core_layout.size; break; case 2: /* RO after init */ - mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.size = strict_align(mod->core_layout.size); mod->core_layout.ro_after_init_size = mod->core_layout.size; break; case 4: /* whole core */ - mod->core_layout.size = debug_align(mod->core_layout.size); + mod->core_layout.size = strict_align(mod->core_layout.size); break; } } @@ -1481,11 +1481,11 @@ static void layout_sections(struct module *mod, struct load_info *info) } switch (m) { case 0: /* executable */ - mod->init_layout.size = debug_align(mod->init_layout.size); + mod->init_layout.size = strict_align(mod->init_layout.size); mod->init_layout.text_size = mod->init_layout.size; break; case 1: /* RO: text and ro-data */ - mod->init_layout.size = debug_align(mod->init_layout.size); + mod->init_layout.size = strict_align(mod->init_layout.size); mod->init_layout.ro_size = mod->init_layout.size; break; case 2: @@ -1496,7 +1496,7 @@ static void layout_sections(struct module *mod, struct load_info *info) mod->init_layout.ro_after_init_size = mod->init_layout.ro_size; break; case 4: /* whole init */ - mod->init_layout.size = debug_align(mod->init_layout.size); + mod->init_layout.size = strict_align(mod->init_layout.size); break; } } -- cgit v1.2.3-59-g8ed1b From 80b8bf4369906aefbcb63a03012aed7a1abcbd18 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 13:02:11 +0100 Subject: module: Always have struct mod_tree_root In order to separate text and data, we need to setup two rb trees. This means that struct mod_tree_root is required even without MODULES_TREE_LOOKUP. Signed-off-by: Christophe Leroy Reviewed-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 4 +++- kernel/module/main.c | 5 ----- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index cbc268af23ae..ac64e53ac5e3 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -143,15 +143,17 @@ static inline void module_decompress_cleanup(struct load_info *info) } #endif -#ifdef CONFIG_MODULES_TREE_LOOKUP struct mod_tree_root { +#ifdef CONFIG_MODULES_TREE_LOOKUP struct latch_tree_root root; +#endif unsigned long addr_min; unsigned long addr_max; }; extern struct mod_tree_root mod_tree; +#ifdef CONFIG_MODULES_TREE_LOOKUP void mod_tree_insert(struct module *mod); void mod_tree_remove_init(struct module *mod); void mod_tree_remove(struct module *mod); diff --git a/kernel/module/main.c b/kernel/module/main.c index e6a72c3651f6..84d567c57575 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -74,7 +74,6 @@ static void do_free_init(struct work_struct *w); static DECLARE_WORK(init_free_wq, do_free_init); static LLIST_HEAD(init_free_list); -#ifdef CONFIG_MODULES_TREE_LOOKUP struct mod_tree_root mod_tree __cacheline_aligned = { .addr_min = -1UL, }; @@ -82,10 +81,6 @@ struct mod_tree_root mod_tree __cacheline_aligned = { #define module_addr_min mod_tree.addr_min #define module_addr_max mod_tree.addr_max -#else /* !CONFIG_MODULES_TREE_LOOKUP */ -static unsigned long module_addr_min = -1UL, module_addr_max; -#endif /* CONFIG_MODULES_TREE_LOOKUP */ - struct symsearch { const struct kernel_symbol *start, *stop; const s32 *crcs; -- cgit v1.2.3-59-g8ed1b From 446d55666d5599ca58c1ceac25ce4b5191e70835 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 13:02:12 +0100 Subject: module: Prepare for handling several RB trees In order to separate text and data, we need to setup two rb trees. Modify functions to give the tree as a parameter. Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 4 ++-- kernel/module/main.c | 16 ++++++++-------- kernel/module/tree_lookup.c | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index ac64e53ac5e3..0f3146347256 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -157,13 +157,13 @@ extern struct mod_tree_root mod_tree; void mod_tree_insert(struct module *mod); void mod_tree_remove_init(struct module *mod); void mod_tree_remove(struct module *mod); -struct module *mod_find(unsigned long addr); +struct module *mod_find(unsigned long addr, struct mod_tree_root *tree); #else /* !CONFIG_MODULES_TREE_LOOKUP */ static inline void mod_tree_insert(struct module *mod) { } static inline void mod_tree_remove_init(struct module *mod) { } static inline void mod_tree_remove(struct module *mod) { } -static inline struct module *mod_find(unsigned long addr) +static inline struct module *mod_find(unsigned long addr, struct mod_tree_root *tree) { struct module *mod; diff --git a/kernel/module/main.c b/kernel/module/main.c index 84d567c57575..392ac847d90a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -91,22 +91,22 @@ struct symsearch { * Bounds of module text, for speeding up __module_address. * Protected by module_mutex. */ -static void __mod_update_bounds(void *base, unsigned int size) +static void __mod_update_bounds(void *base, unsigned int size, struct mod_tree_root *tree) { unsigned long min = (unsigned long)base; unsigned long max = min + size; - if (min < module_addr_min) - module_addr_min = min; - if (max > module_addr_max) - module_addr_max = max; + if (min < tree->addr_min) + tree->addr_min = min; + if (max > tree->addr_max) + tree->addr_max = max; } static void mod_update_bounds(struct module *mod) { - __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); + __mod_update_bounds(mod->core_layout.base, mod->core_layout.size, &mod_tree); if (mod->init_layout.size) - __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); + __mod_update_bounds(mod->init_layout.base, mod->init_layout.size, &mod_tree); } static void module_assert_mutex_or_preempt(void) @@ -3017,7 +3017,7 @@ struct module *__module_address(unsigned long addr) module_assert_mutex_or_preempt(); - mod = mod_find(addr); + mod = mod_find(addr, &mod_tree); if (mod) { BUG_ON(!within_module(addr, mod)); if (mod->state == MODULE_STATE_UNFORMED) diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c index 0bc4ec3b22ce..995fe68059db 100644 --- a/kernel/module/tree_lookup.c +++ b/kernel/module/tree_lookup.c @@ -61,14 +61,14 @@ static const struct latch_tree_ops mod_tree_ops = { .comp = mod_tree_comp, }; -static noinline void __mod_tree_insert(struct mod_tree_node *node) +static noinline void __mod_tree_insert(struct mod_tree_node *node, struct mod_tree_root *tree) { - latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); + latch_tree_insert(&node->node, &tree->root, &mod_tree_ops); } -static void __mod_tree_remove(struct mod_tree_node *node) +static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *tree) { - latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); + latch_tree_erase(&node->node, &tree->root, &mod_tree_ops); } /* @@ -80,28 +80,28 @@ void mod_tree_insert(struct module *mod) mod->core_layout.mtn.mod = mod; mod->init_layout.mtn.mod = mod; - __mod_tree_insert(&mod->core_layout.mtn); + __mod_tree_insert(&mod->core_layout.mtn, &mod_tree); if (mod->init_layout.size) - __mod_tree_insert(&mod->init_layout.mtn); + __mod_tree_insert(&mod->init_layout.mtn, &mod_tree); } void mod_tree_remove_init(struct module *mod) { if (mod->init_layout.size) - __mod_tree_remove(&mod->init_layout.mtn); + __mod_tree_remove(&mod->init_layout.mtn, &mod_tree); } void mod_tree_remove(struct module *mod) { - __mod_tree_remove(&mod->core_layout.mtn); + __mod_tree_remove(&mod->core_layout.mtn, &mod_tree); mod_tree_remove_init(mod); } -struct module *mod_find(unsigned long addr) +struct module *mod_find(unsigned long addr, struct mod_tree_root *tree) { struct latch_tree_node *ltn; - ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); + ltn = latch_tree_find((void *)addr, &tree->root, &mod_tree_ops); if (!ltn) return NULL; -- cgit v1.2.3-59-g8ed1b From 6ab9942c44b2d213a16b2620e4baf0223122222f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 13:02:13 +0100 Subject: module: Introduce data_layout In order to allow separation of data from text, add another layout, called data_layout. For architectures requesting separation of text and data, only text will go in core_layout and data will go in data_layout. For architectures which keep text and data together, make data_layout an alias of core_layout, that way data_layout can be used for all data manipulations, regardless of whether data is in core_layout or data_layout. Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 2 ++ kernel/module/kallsyms.c | 18 +++++++++--------- kernel/module/main.c | 20 ++++++++++++-------- kernel/module/strict_rwx.c | 10 +++++----- 4 files changed, 28 insertions(+), 22 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 0f3146347256..0aabbf5cbcd1 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -20,6 +20,8 @@ /* Maximum number of characters written by module_flags() */ #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) +#define data_layout core_layout + /* * Modules' sections will be aligned on page boundaries * to ensure complete separation of code and data, but diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 438492e103c8..3e11523bc6f6 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -134,12 +134,12 @@ void layout_symtab(struct module *mod, struct load_info *info) } /* Append room for core symbols at end of core part. */ - info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); - info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); - mod->core_layout.size += strtab_size; - info->core_typeoffs = mod->core_layout.size; - mod->core_layout.size += ndst * sizeof(char); - mod->core_layout.size = strict_align(mod->core_layout.size); + info->symoffs = ALIGN(mod->data_layout.size, symsect->sh_addralign ?: 1); + info->stroffs = mod->data_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); + mod->data_layout.size += strtab_size; + info->core_typeoffs = mod->data_layout.size; + mod->data_layout.size += ndst * sizeof(char); + mod->data_layout.size = strict_align(mod->data_layout.size); /* Put string table section at end of init part of module. */ strsect->sh_flags |= SHF_ALLOC; @@ -187,9 +187,9 @@ void add_kallsyms(struct module *mod, const struct load_info *info) * Now populate the cut down core kallsyms for after init * and set types up while we still have access to sections. */ - mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; - mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; - mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; + mod->core_kallsyms.symtab = dst = mod->data_layout.base + info->symoffs; + mod->core_kallsyms.strtab = s = mod->data_layout.base + info->stroffs; + mod->core_kallsyms.typetab = mod->data_layout.base + info->core_typeoffs; src = rcu_dereference_sched(mod->kallsyms)->symtab; for (ndst = i = 0; i < rcu_dereference_sched(mod->kallsyms)->num_symtab; i++) { rcu_dereference_sched(mod->kallsyms)->typetab[i] = elf_type(src + i, info); diff --git a/kernel/module/main.c b/kernel/module/main.c index 392ac847d90a..78658283408d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1190,7 +1190,7 @@ static void free_module(struct module *mod) percpu_modfree(mod); /* Free lock-classes; relies on the preceding sync_rcu(). */ - lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); + lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size); /* Finally, free the core (containing the module structure) */ module_memfree(mod->core_layout.base); @@ -1431,13 +1431,15 @@ static void layout_sections(struct module *mod, struct load_info *info) for (i = 0; i < info->hdr->e_shnum; ++i) { Elf_Shdr *s = &info->sechdrs[i]; const char *sname = info->secstrings + s->sh_name; + unsigned int *sizep; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL || module_init_layout_section(sname)) continue; - s->sh_entsize = module_get_offset(mod, &mod->core_layout.size, s, i); + sizep = m ? &mod->data_layout.size : &mod->core_layout.size; + s->sh_entsize = module_get_offset(mod, sizep, s, i); pr_debug("\t%s\n", sname); } switch (m) { @@ -1446,15 +1448,15 @@ static void layout_sections(struct module *mod, struct load_info *info) mod->core_layout.text_size = mod->core_layout.size; break; case 1: /* RO: text and ro-data */ - mod->core_layout.size = strict_align(mod->core_layout.size); - mod->core_layout.ro_size = mod->core_layout.size; + mod->data_layout.size = strict_align(mod->data_layout.size); + mod->data_layout.ro_size = mod->data_layout.size; break; case 2: /* RO after init */ - mod->core_layout.size = strict_align(mod->core_layout.size); - mod->core_layout.ro_after_init_size = mod->core_layout.size; + mod->data_layout.size = strict_align(mod->data_layout.size); + mod->data_layout.ro_after_init_size = mod->data_layout.size; break; case 4: /* whole core */ - mod->core_layout.size = strict_align(mod->core_layout.size); + mod->data_layout.size = strict_align(mod->data_layout.size); break; } } @@ -2134,6 +2136,8 @@ static int move_module(struct module *mod, struct load_info *info) if (shdr->sh_entsize & INIT_OFFSET_MASK) dest = mod->init_layout.base + (shdr->sh_entsize & ~INIT_OFFSET_MASK); + else if (!(shdr->sh_flags & SHF_EXECINSTR)) + dest = mod->data_layout.base + shdr->sh_entsize; else dest = mod->core_layout.base + shdr->sh_entsize; @@ -2829,7 +2833,7 @@ static int load_module(struct load_info *info, const char __user *uargs, mutex_unlock(&module_mutex); free_module: /* Free lock-classes; relies on the preceding sync_rcu() */ - lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); + lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size); module_deallocate(mod, info); free_copy: diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c index f36ea54c1dac..fe3c10891407 100644 --- a/kernel/module/strict_rwx.c +++ b/kernel/module/strict_rwx.c @@ -101,12 +101,12 @@ void module_enable_ro(const struct module *mod, bool after_init) set_vm_flush_reset_perms(mod->init_layout.base); frob_text(&mod->core_layout, set_memory_ro); - frob_rodata(&mod->core_layout, set_memory_ro); + frob_rodata(&mod->data_layout, set_memory_ro); frob_text(&mod->init_layout, set_memory_ro); frob_rodata(&mod->init_layout, set_memory_ro); if (after_init) - frob_ro_after_init(&mod->core_layout, set_memory_ro); + frob_ro_after_init(&mod->data_layout, set_memory_ro); } void module_enable_nx(const struct module *mod) @@ -114,9 +114,9 @@ void module_enable_nx(const struct module *mod) if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) return; - frob_rodata(&mod->core_layout, set_memory_nx); - frob_ro_after_init(&mod->core_layout, set_memory_nx); - frob_writable_data(&mod->core_layout, set_memory_nx); + frob_rodata(&mod->data_layout, set_memory_nx); + frob_ro_after_init(&mod->data_layout, set_memory_nx); + frob_writable_data(&mod->data_layout, set_memory_nx); frob_rodata(&mod->init_layout, set_memory_nx); frob_writable_data(&mod->init_layout, set_memory_nx); } -- cgit v1.2.3-59-g8ed1b From 01dc0386efb769056257410ba5754558384006a7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 13:02:14 +0100 Subject: module: Add CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC Add CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC to allow architectures to request having modules data in vmalloc area instead of module area. This is required on powerpc book3s/32 in order to set data non executable, because it is not possible to set executability on page basis, this is done per 256 Mbytes segments. The module area has exec right, vmalloc area has noexec. This can also be useful on other powerpc/32 in order to maximize the chance of code being close enough to kernel core to avoid branch trampolines. Cc: Jason Wessel Acked-by: Daniel Thompson Cc: Douglas Anderson Signed-off-by: Christophe Leroy [mcgrof: rebased in light of kernel/module/kdb.c move] Signed-off-by: Luis Chamberlain --- arch/Kconfig | 6 +++++ include/linux/module.h | 8 +++++++ kernel/module/internal.h | 3 +++ kernel/module/kdb.c | 10 ++++++-- kernel/module/main.c | 58 +++++++++++++++++++++++++++++++++++++++++++-- kernel/module/procfs.c | 8 +++++-- kernel/module/strict_rwx.c | 1 + kernel/module/tree_lookup.c | 8 +++++++ 8 files changed, 96 insertions(+), 6 deletions(-) (limited to 'kernel/module') diff --git a/arch/Kconfig b/arch/Kconfig index 29b0167c088b..24945cee808b 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -888,6 +888,12 @@ config MODULES_USE_ELF_REL Modules only use ELF REL relocations. Modules with ELF RELA relocations will give an error. +config ARCH_WANTS_MODULES_DATA_IN_VMALLOC + bool + help + For architectures like powerpc/32 which have constraints on module + allocation and need to allocate module data outside of module area. + config HAVE_IRQ_EXIT_ON_IRQ_STACK bool help diff --git a/include/linux/module.h b/include/linux/module.h index 5e2059f3afc7..46d4d5f2516e 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -422,6 +422,9 @@ struct module { /* Core layout: rbtree is accessed frequently, so keep together. */ struct module_layout core_layout __module_layout_align; struct module_layout init_layout; +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + struct module_layout data_layout; +#endif /* Arch-specific module values */ struct mod_arch_specific arch; @@ -569,6 +572,11 @@ bool is_module_text_address(unsigned long addr); static inline bool within_module_core(unsigned long addr, const struct module *mod) { +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + if ((unsigned long)mod->data_layout.base <= addr && + addr < (unsigned long)mod->data_layout.base + mod->data_layout.size) + return true; +#endif return (unsigned long)mod->core_layout.base <= addr && addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; } diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 0aabbf5cbcd1..3e23bef5884d 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -20,7 +20,9 @@ /* Maximum number of characters written by module_flags() */ #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) +#ifndef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC #define data_layout core_layout +#endif /* * Modules' sections will be aligned on page boundaries @@ -154,6 +156,7 @@ struct mod_tree_root { }; extern struct mod_tree_root mod_tree; +extern struct mod_tree_root mod_data_tree; #ifdef CONFIG_MODULES_TREE_LOOKUP void mod_tree_insert(struct module *mod); diff --git a/kernel/module/kdb.c b/kernel/module/kdb.c index a446c699db0a..f4317f92e189 100644 --- a/kernel/module/kdb.c +++ b/kernel/module/kdb.c @@ -26,8 +26,11 @@ int kdb_lsmod(int argc, const char **argv) if (mod->state == MODULE_STATE_UNFORMED) continue; - kdb_printf("%-20s%8u 0x%px ", mod->name, - mod->core_layout.size, (void *)mod); + kdb_printf("%-20s%8u", mod->name, mod->core_layout.size); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + kdb_printf("/%8u", mod->data_layout.size); +#endif + kdb_printf(" 0x%px ", (void *)mod); #ifdef CONFIG_MODULE_UNLOAD kdb_printf("%4d ", module_refcount(mod)); #endif @@ -38,6 +41,9 @@ int kdb_lsmod(int argc, const char **argv) else kdb_printf(" (Live)"); kdb_printf(" 0x%px", mod->core_layout.base); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + kdb_printf("/0x%px", mod->data_layout.base); +#endif #ifdef CONFIG_MODULE_UNLOAD { diff --git a/kernel/module/main.c b/kernel/module/main.c index 78658283408d..84b828431dcb 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -78,6 +78,12 @@ struct mod_tree_root mod_tree __cacheline_aligned = { .addr_min = -1UL, }; +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC +struct mod_tree_root mod_data_tree __cacheline_aligned = { + .addr_min = -1UL, +}; +#endif + #define module_addr_min mod_tree.addr_min #define module_addr_max mod_tree.addr_max @@ -107,6 +113,9 @@ static void mod_update_bounds(struct module *mod) __mod_update_bounds(mod->core_layout.base, mod->core_layout.size, &mod_tree); if (mod->init_layout.size) __mod_update_bounds(mod->init_layout.base, mod->init_layout.size, &mod_tree); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + __mod_update_bounds(mod->data_layout.base, mod->data_layout.size, &mod_data_tree); +#endif } static void module_assert_mutex_or_preempt(void) @@ -940,6 +949,17 @@ static ssize_t show_coresize(struct module_attribute *mattr, static struct module_attribute modinfo_coresize = __ATTR(coresize, 0444, show_coresize, NULL); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC +static ssize_t show_datasize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) +{ + return sprintf(buffer, "%u\n", mk->mod->data_layout.size); +} + +static struct module_attribute modinfo_datasize = + __ATTR(datasize, 0444, show_datasize, NULL); +#endif + static ssize_t show_initsize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { @@ -968,6 +988,9 @@ struct module_attribute *modinfo_attrs[] = { &modinfo_srcversion, &modinfo_initstate, &modinfo_coresize, +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + &modinfo_datasize, +#endif &modinfo_initsize, &modinfo_taint, #ifdef CONFIG_MODULE_UNLOAD @@ -1194,6 +1217,9 @@ static void free_module(struct module *mod) /* Finally, free the core (containing the module structure) */ module_memfree(mod->core_layout.base); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + vfree(mod->data_layout.base); +#endif } void *__symbol_get(const char *symbol) @@ -2124,6 +2150,24 @@ static int move_module(struct module *mod, struct load_info *info) } else mod->init_layout.base = NULL; +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + /* Do the allocs. */ + ptr = vmalloc(mod->data_layout.size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a + * leak. + */ + kmemleak_not_leak(ptr); + if (!ptr) { + module_memfree(mod->core_layout.base); + module_memfree(mod->init_layout.base); + return -ENOMEM; + } + + memset(ptr, 0, mod->data_layout.size); + mod->data_layout.base = ptr; +#endif /* Transfer each section which specifies SHF_ALLOC */ pr_debug("final section addresses:\n"); for (i = 0; i < info->hdr->e_shnum; i++) { @@ -2299,6 +2343,9 @@ static void module_deallocate(struct module *mod, struct load_info *info) module_arch_freeing_init(mod); module_memfree(mod->init_layout.base); module_memfree(mod->core_layout.base); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + vfree(mod->data_layout.base); +#endif } int __weak module_finalize(const Elf_Ehdr *hdr, @@ -3015,13 +3062,20 @@ bool is_module_address(unsigned long addr) struct module *__module_address(unsigned long addr) { struct module *mod; + struct mod_tree_root *tree; - if (addr < module_addr_min || addr > module_addr_max) + if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) + tree = &mod_tree; +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + else if (addr >= mod_data_tree.addr_min && addr <= mod_data_tree.addr_max) + tree = &mod_data_tree; +#endif + else return NULL; module_assert_mutex_or_preempt(); - mod = mod_find(addr, &mod_tree); + mod = mod_find(addr, tree); if (mod) { BUG_ON(!within_module(addr, mod)); if (mod->state == MODULE_STATE_UNFORMED) diff --git a/kernel/module/procfs.c b/kernel/module/procfs.c index 2717e130788e..9a8f4f0f6329 100644 --- a/kernel/module/procfs.c +++ b/kernel/module/procfs.c @@ -67,13 +67,17 @@ static int m_show(struct seq_file *m, void *p) struct module *mod = list_entry(p, struct module, list); char buf[MODULE_FLAGS_BUF_SIZE]; void *value; + unsigned int size; /* We always ignore unformed modules. */ if (mod->state == MODULE_STATE_UNFORMED) return 0; - seq_printf(m, "%s %u", - mod->name, mod->init_layout.size + mod->core_layout.size); + size = mod->init_layout.size + mod->core_layout.size; +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + size += mod->data_layout.size; +#endif + seq_printf(m, "%s %u", mod->name, size); print_unload_info(m, mod); /* Informative for users. */ diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c index fe3c10891407..14fbea66f12f 100644 --- a/kernel/module/strict_rwx.c +++ b/kernel/module/strict_rwx.c @@ -75,6 +75,7 @@ bool module_check_misalignment(const struct module *mod) return false; return layout_check_misalignment(&mod->core_layout) || + layout_check_misalignment(&mod->data_layout) || layout_check_misalignment(&mod->init_layout); } diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c index 995fe68059db..8ec5cfd60496 100644 --- a/kernel/module/tree_lookup.c +++ b/kernel/module/tree_lookup.c @@ -83,6 +83,11 @@ void mod_tree_insert(struct module *mod) __mod_tree_insert(&mod->core_layout.mtn, &mod_tree); if (mod->init_layout.size) __mod_tree_insert(&mod->init_layout.mtn, &mod_tree); + +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + mod->data_layout.mtn.mod = mod; + __mod_tree_insert(&mod->data_layout.mtn, &mod_data_tree); +#endif } void mod_tree_remove_init(struct module *mod) @@ -95,6 +100,9 @@ void mod_tree_remove(struct module *mod) { __mod_tree_remove(&mod->core_layout.mtn, &mod_tree); mod_tree_remove_init(mod); +#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC + __mod_tree_remove(&mod->data_layout.mtn, &mod_data_tree); +#endif } struct module *mod_find(unsigned long addr, struct mod_tree_root *tree) -- cgit v1.2.3-59-g8ed1b From 55ce556dbf92ec779b65336593d213ceef3f26f3 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Feb 2022 13:02:15 +0100 Subject: module: Remove module_addr_min and module_addr_max Replace module_addr_min and module_addr_max by mod_tree.addr_min and mod_tree.addr_max Signed-off-by: Christophe Leroy Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/main.c b/kernel/module/main.c index 84b828431dcb..05a42d8fcd7a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -63,7 +63,7 @@ * Mutex protects: * 1) List of modules (also safely readable with preempt_disable), * 2) module_use links, - * 3) module_addr_min/module_addr_max. + * 3) mod_tree.addr_min/mod_tree.addr_max. * (delete and add uses RCU list operations). */ DEFINE_MUTEX(module_mutex); @@ -2972,14 +2972,14 @@ static void cfi_init(struct module *mod) mod->exit = *exit; #endif - cfi_module_add(mod, module_addr_min); + cfi_module_add(mod, mod_tree.addr_min); #endif } static void cfi_cleanup(struct module *mod) { #ifdef CONFIG_CFI_CLANG - cfi_module_remove(mod, module_addr_min); + cfi_module_remove(mod, mod_tree.addr_min); #endif } -- cgit v1.2.3-59-g8ed1b From c14e522bc76efed6e947cd0ab83a1fac7a7a3ec9 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Mon, 2 May 2022 21:51:03 +0100 Subject: module: Make module_flags_taint() accept a module's taints bitmap and usable outside core code No functional change. The purpose of this patch is to modify module_flags_taint() to accept a module's taints bitmap as a parameter and modifies all users accordingly. Furthermore, it is now possible to access a given module's taint flags data outside of non-essential code yet does remain for internal use only. This is in preparation for module unload taint tracking support. Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 1 + kernel/module/main.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 3e23bef5884d..abbd1c5ef264 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -100,6 +100,7 @@ int cmp_name(const void *name, const void *sym); long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, unsigned int section); char *module_flags(struct module *mod, char *buf); +size_t module_flags_taint(unsigned long taints, char *buf); static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) { diff --git a/kernel/module/main.c b/kernel/module/main.c index 05a42d8fcd7a..7dbdd098b995 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -890,13 +890,13 @@ static inline int module_unload_init(struct module *mod) } #endif /* CONFIG_MODULE_UNLOAD */ -static size_t module_flags_taint(struct module *mod, char *buf) +size_t module_flags_taint(unsigned long taints, char *buf) { size_t l = 0; int i; for (i = 0; i < TAINT_FLAGS_COUNT; i++) { - if (taint_flags[i].module && test_bit(i, &mod->taints)) + if (taint_flags[i].module && test_bit(i, &taints)) buf[l++] = taint_flags[i].c_true; } @@ -974,7 +974,7 @@ static ssize_t show_taint(struct module_attribute *mattr, { size_t l; - l = module_flags_taint(mk->mod, buffer); + l = module_flags_taint(mk->mod->taints, buffer); buffer[l++] = '\n'; return l; } @@ -2993,7 +2993,7 @@ char *module_flags(struct module *mod, char *buf) mod->state == MODULE_STATE_GOING || mod->state == MODULE_STATE_COMING) { buf[bx++] = '('; - bx += module_flags_taint(mod, buf + bx); + bx += module_flags_taint(mod->taints, buf + bx); /* Show a - for module-is-being-unloaded */ if (mod->state == MODULE_STATE_GOING) buf[bx++] = '-'; -- cgit v1.2.3-59-g8ed1b From 6fb0538d0121ffab770a505b183968d93466ad59 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Mon, 2 May 2022 21:51:04 +0100 Subject: module: Move module_assert_mutex_or_preempt() to internal.h No functional change. This patch migrates module_assert_mutex_or_preempt() to internal.h. So, the aforementiond function can be used outside of main/or core module code yet will remain restricted for internal use only. Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- kernel/module/internal.h | 12 ++++++++++++ kernel/module/main.c | 11 ----------- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/internal.h b/kernel/module/internal.h index abbd1c5ef264..0bdf64c9dfb5 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -10,6 +10,7 @@ #include #include #include +#include #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 @@ -102,6 +103,17 @@ long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, char *module_flags(struct module *mod, char *buf); size_t module_flags_taint(unsigned long taints, char *buf); +static inline void module_assert_mutex_or_preempt(void) +{ +#ifdef CONFIG_LOCKDEP + if (unlikely(!debug_locks)) + return; + + WARN_ON_ONCE(!rcu_read_lock_sched_held() && + !lockdep_is_held(&module_mutex)); +#endif +} + static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS diff --git a/kernel/module/main.c b/kernel/module/main.c index 7dbdd098b995..7a0484900320 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -118,17 +118,6 @@ static void mod_update_bounds(struct module *mod) #endif } -static void module_assert_mutex_or_preempt(void) -{ -#ifdef CONFIG_LOCKDEP - if (unlikely(!debug_locks)) - return; - - WARN_ON_ONCE(!rcu_read_lock_sched_held() && - !lockdep_is_held(&module_mutex)); -#endif -} - /* Block module loading/unloading? */ int modules_disabled = 0; core_param(nomodule, modules_disabled, bint, 0); -- cgit v1.2.3-59-g8ed1b From 99bd9956551b27cb6f5b445abaced7e13b9976cd Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Mon, 2 May 2022 21:52:52 +0100 Subject: module: Introduce module unload taint tracking Currently, only the initial module that tainted the kernel is recorded e.g. when an out-of-tree module is loaded. The purpose of this patch is to allow the kernel to maintain a record of each unloaded module that taints the kernel. So, in addition to displaying a list of linked modules (see print_modules()) e.g. in the event of a detected bad page, unloaded modules that carried a taint/or taints are displayed too. A tainted module unload count is maintained. The number of tracked modules is not fixed. This feature is disabled by default. Signed-off-by: Aaron Tomlin Signed-off-by: Luis Chamberlain --- init/Kconfig | 11 +++++++++ kernel/module/Makefile | 1 + kernel/module/internal.h | 21 +++++++++++++++++ kernel/module/main.c | 5 ++++ kernel/module/tracking.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 99 insertions(+) create mode 100644 kernel/module/tracking.c (limited to 'kernel/module') diff --git a/init/Kconfig b/init/Kconfig index ddcbefe535e9..6b30210f787d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -2118,6 +2118,17 @@ config MODULE_FORCE_UNLOAD rmmod). This is mainly for kernel developers and desperate users. If unsure, say N. +config MODULE_UNLOAD_TAINT_TRACKING + bool "Tainted module unload tracking" + depends on MODULE_UNLOAD + default n + help + This option allows you to maintain a record of each unloaded + module that tainted the kernel. In addition to displaying a + list of linked (or loaded) modules e.g. on detection of a bad + page (see bad_page()), the aforementioned details are also + shown. If unsure, say N. + config MODVERSIONS bool "Module versioning support" help diff --git a/kernel/module/Makefile b/kernel/module/Makefile index d1ca799c12e2..948efea81e85 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_PROC_FS) += procfs.o obj-$(CONFIG_SYSFS) += sysfs.o obj-$(CONFIG_KGDB_KDB) += kdb.o obj-$(CONFIG_MODVERSIONS) += version.o +obj-$(CONFIG_MODULE_UNLOAD_TAINT_TRACKING) += tracking.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 0bdf64c9dfb5..bc5507ab8450 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -145,6 +145,27 @@ static inline bool set_livepatch_module(struct module *mod) #endif } +#ifdef CONFIG_MODULE_UNLOAD_TAINT_TRACKING +struct mod_unload_taint { + struct list_head list; + char name[MODULE_NAME_LEN]; + unsigned long taints; + u64 count; +}; + +int try_add_tainted_module(struct module *mod); +void print_unloaded_tainted_modules(void); +#else /* !CONFIG_MODULE_UNLOAD_TAINT_TRACKING */ +static inline int try_add_tainted_module(struct module *mod) +{ + return 0; +} + +static inline void print_unloaded_tainted_modules(void) +{ +} +#endif /* CONFIG_MODULE_UNLOAD_TAINT_TRACKING */ + #ifdef CONFIG_MODULE_DECOMPRESS int module_decompress(struct load_info *info, const void *buf, size_t size); void module_decompress_cleanup(struct load_info *info); diff --git a/kernel/module/main.c b/kernel/module/main.c index 7a0484900320..6c3b4a846645 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1190,6 +1190,9 @@ static void free_module(struct module *mod) module_bug_cleanup(mod); /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ synchronize_rcu(); + if (try_add_tainted_module(mod)) + pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", + mod->name); mutex_unlock(&module_mutex); /* Clean up CFI for the module. */ @@ -3125,6 +3128,8 @@ void print_modules(void) continue; pr_cont(" %s%s", mod->name, module_flags(mod, buf)); } + + print_unloaded_tainted_modules(); preempt_enable(); if (last_unloaded_module[0]) pr_cont(" [last unloaded: %s]", last_unloaded_module); diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c new file mode 100644 index 000000000000..7f8133044d09 --- /dev/null +++ b/kernel/module/tracking.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Module taint unload tracking support + * + * Copyright (C) 2022 Aaron Tomlin + */ + +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static LIST_HEAD(unloaded_tainted_modules); + +int try_add_tainted_module(struct module *mod) +{ + struct mod_unload_taint *mod_taint; + + module_assert_mutex_or_preempt(); + + list_for_each_entry_rcu(mod_taint, &unloaded_tainted_modules, list, + lockdep_is_held(&module_mutex)) { + if (!strcmp(mod_taint->name, mod->name) && + mod_taint->taints & mod->taints) { + mod_taint->count++; + goto out; + } + } + + mod_taint = kmalloc(sizeof(*mod_taint), GFP_KERNEL); + if (unlikely(!mod_taint)) + return -ENOMEM; + strscpy(mod_taint->name, mod->name, MODULE_NAME_LEN); + mod_taint->taints = mod->taints; + list_add_rcu(&mod_taint->list, &unloaded_tainted_modules); + mod_taint->count = 1; +out: + return 0; +} + +void print_unloaded_tainted_modules(void) +{ + struct mod_unload_taint *mod_taint; + char buf[MODULE_FLAGS_BUF_SIZE]; + + if (!list_empty(&unloaded_tainted_modules)) { + printk(KERN_DEFAULT "Unloaded tainted modules:"); + list_for_each_entry_rcu(mod_taint, &unloaded_tainted_modules, + list) { + size_t l; + + l = module_flags_taint(mod_taint->taints, buf); + buf[l++] = '\0'; + pr_cont(" %s(%s):%llu", mod_taint->name, buf, + mod_taint->count); + } + } +} -- cgit v1.2.3-59-g8ed1b From 391e982bfa632b8315235d8be9c0a81374c6a19c Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 4 May 2022 12:54:20 +0300 Subject: module: fix [e_shstrndx].sh_size=0 OOB access It is trivial to craft a module to trigger OOB access in this line: if (info->secstrings[strhdr->sh_size - 1] != '\0') { BUG: unable to handle page fault for address: ffffc90000aa0fff PGD 100000067 P4D 100000067 PUD 100066067 PMD 10436f067 PTE 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 7 PID: 1215 Comm: insmod Not tainted 5.18.0-rc5-00007-g9bf578647087-dirty #10 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-4.fc34 04/01/2014 RIP: 0010:load_module+0x19b/0x2391 Fixes: ec2a29593c83 ("module: harden ELF info handling") Signed-off-by: Alexey Dobriyan [rebased patch onto modules-next] Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel/module') diff --git a/kernel/module/main.c b/kernel/module/main.c index 6c3b4a846645..23432fabfde8 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1726,6 +1726,10 @@ static int elf_validity_check(struct load_info *info) * strings in the section safe. */ info->secstrings = (void *)info->hdr + strhdr->sh_offset; + if (strhdr->sh_size == 0) { + pr_err("empty section name table\n"); + goto no_exec; + } if (info->secstrings[strhdr->sh_size - 1] != '\0') { pr_err("ELF Spec violation: section name table isn't null terminated\n"); goto no_exec; -- cgit v1.2.3-59-g8ed1b From 8eac910a49347821cbafc770a319e00ccd69d58b Mon Sep 17 00:00:00 2001 From: Lecopzer Chen Date: Wed, 27 Apr 2022 15:36:06 +0800 Subject: module: show disallowed symbol name for inherit_taint() The error log for inherit_taint() doesn't really help to find the symbol which violates GPL rules. For example, if a module has 300 symbol and includes 50 disallowed symbols, the log only shows the content below and we have no idea what symbol is. AAA: module using GPL-only symbols uses symbols from proprietary module BBB. It's hard for user who doesn't really know how the symbol was parsing. This patch add symbol name to tell the offending symbols explicitly. AAA: module using GPL-only symbols uses symbols SSS from proprietary module BBB. Signed-off-by: Lecopzer Chen Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/main.c b/kernel/module/main.c index 23432fabfde8..ac0a7882899b 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1038,20 +1038,20 @@ static int verify_namespace_is_imported(const struct load_info *info, return 0; } -static bool inherit_taint(struct module *mod, struct module *owner) +static bool inherit_taint(struct module *mod, struct module *owner, const char *name) { if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) return true; if (mod->using_gplonly_symbols) { - pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n", - mod->name, owner->name); + pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", + mod->name, name, owner->name); return false; } if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { - pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n", - mod->name, owner->name); + pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", + mod->name, name, owner->name); set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); } return true; @@ -1083,7 +1083,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, if (fsa.license == GPL_ONLY) mod->using_gplonly_symbols = true; - if (!inherit_taint(mod, fsa.owner)) { + if (!inherit_taint(mod, fsa.owner, name)) { fsa.sym = NULL; goto getname; } -- cgit v1.2.3-59-g8ed1b From c6eee9df57a6d9252bae93a9386d0d872798f5d5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 May 2022 12:52:10 +0900 Subject: module: do not pass opaque pointer for symbol search There is no need to use an opaque pointer for check_exported_symbol() or find_exported_symbol_in_section. Pass (struct find_symbol_arg *) explicitly. Signed-off-by: Masahiro Yamada Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/main.c b/kernel/module/main.c index ac0a7882899b..d00a8de6d8f5 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -244,11 +244,9 @@ static __maybe_unused void *any_section_objs(const struct load_info *info, #endif static bool check_exported_symbol(const struct symsearch *syms, - struct module *owner, - unsigned int symnum, void *data) + struct module *owner, unsigned int symnum, + struct find_symbol_arg *fsa) { - struct find_symbol_arg *fsa = data; - if (!fsa->gplok && syms->license == GPL_ONLY) return false; fsa->owner = owner; @@ -285,16 +283,15 @@ int cmp_name(const void *name, const void *sym) static bool find_exported_symbol_in_section(const struct symsearch *syms, struct module *owner, - void *data) + struct find_symbol_arg *fsa) { - struct find_symbol_arg *fsa = data; struct kernel_symbol *sym; sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); if (sym != NULL && check_exported_symbol(syms, owner, - sym - syms->start, data)) + sym - syms->start, fsa)) return true; return false; -- cgit v1.2.3-59-g8ed1b From cdd66eb52fdaa9bdab7f1be8dc9162bf4acc64ae Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 May 2022 12:52:11 +0900 Subject: module: do not binary-search in __ksymtab_gpl if fsa->gplok is false Currently, !fsa->gplok && syms->license == GPL_ONLY) is checked after bsearch() succeeds. It is meaningless to do the binary search in the GPL symbol table when fsa->gplok is false because we know find_exported_symbol_in_section() will fail anyway. This check should be done before bsearch(). Signed-off-by: Masahiro Yamada Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/main.c b/kernel/module/main.c index d00a8de6d8f5..8a068fff437c 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -247,8 +247,6 @@ static bool check_exported_symbol(const struct symsearch *syms, struct module *owner, unsigned int symnum, struct find_symbol_arg *fsa) { - if (!fsa->gplok && syms->license == GPL_ONLY) - return false; fsa->owner = owner; fsa->crc = symversion(syms->crcs, symnum); fsa->sym = &syms->start[symnum]; @@ -287,6 +285,9 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, { struct kernel_symbol *sym; + if (!fsa->gplok && syms->license == GPL_ONLY) + return false; + sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); -- cgit v1.2.3-59-g8ed1b From 7390b94a3c2d93272d6da4945b81a9cf78055b7b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 May 2022 12:52:12 +0900 Subject: module: merge check_exported_symbol() into find_exported_symbol_in_section() Now check_exported_symbol() always succeeds. Merge it into find_exported_symbol_in_search() to make the code concise. Signed-off-by: Masahiro Yamada Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'kernel/module') diff --git a/kernel/module/main.c b/kernel/module/main.c index 8a068fff437c..fed58d30725d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -243,17 +243,6 @@ static __maybe_unused void *any_section_objs(const struct load_info *info, #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif -static bool check_exported_symbol(const struct symsearch *syms, - struct module *owner, unsigned int symnum, - struct find_symbol_arg *fsa) -{ - fsa->owner = owner; - fsa->crc = symversion(syms->crcs, symnum); - fsa->sym = &syms->start[symnum]; - fsa->license = syms->license; - return true; -} - static const char *kernel_symbol_name(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -290,12 +279,15 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); + if (!sym) + return false; - if (sym != NULL && check_exported_symbol(syms, owner, - sym - syms->start, fsa)) - return true; + fsa->owner = owner; + fsa->crc = symversion(syms->crcs, sym - syms->start); + fsa->sym = sym; + fsa->license = syms->license; - return false; + return true; } /* -- cgit v1.2.3-59-g8ed1b