aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--init/main.c7
-rw-r--r--kernel/module.c5
2 files changed, 12 insertions, 0 deletions
diff --git a/init/main.c b/init/main.c
index a404936d85d8..fd37315835b4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1034,6 +1034,13 @@ __setup("rodata=", set_debug_rodata);
static void mark_readonly(void)
{
if (rodata_enabled) {
+ /*
+ * load_module() results in W+X mappings, which are cleaned up
+ * with call_rcu_sched(). Let's make sure that queued work is
+ * flushed so that we don't hit false positives looking for
+ * insecure pages which are W+X.
+ */
+ rcu_barrier_sched();
mark_rodata_ro();
rodata_test();
} else
diff --git a/kernel/module.c b/kernel/module.c
index ce8066b88178..c9bea7f2b43e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3517,6 +3517,11 @@ static noinline int do_init_module(struct module *mod)
* walking this with preempt disabled. In all the failure paths, we
* call synchronize_sched(), but we don't want to slow down the success
* path, so use actual RCU here.
+ * Note that module_alloc() on most architectures creates W+X page
+ * mappings which won't be cleaned up until do_free_init() runs. Any
+ * code such as mark_rodata_ro() which depends on those mappings to
+ * be cleaned up needs to sync with the queued work - ie
+ * rcu_barrier_sched()
*/
call_rcu_sched(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);