From ffb6e0c9a0572f8e5f8e9337a1b40ac2ec1493a1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 26 May 2015 14:45:29 -0700 Subject: tty: remove platform_sysrq_reset_seq The platform_sysrq_reset_seq code was intended as a way for an embedded platform to provide its own sysrq sequence at compile time. After over two years, nobody has started using it in an upstream kernel, and the platforms that were interested in it have moved on to devicetree, which can be used to configure the sequence without requiring kernel changes. The method is also incompatible with the way that most architectures build support for multiple platforms into a single kernel. Now the code is producing warnings when built with gcc-5.1: drivers/tty/sysrq.c: In function 'sysrq_init': drivers/tty/sysrq.c:959:33: warning: array subscript is above array bounds [-Warray-bounds] key = platform_sysrq_reset_seq[i]; We could fix this, but it seems unlikely that it will ever be used, so let's just remove the code instead. We still have the option to pass the sequence either in DT, using the kernel command line, or using the /sys/module/sysrq/parameters/reset_seq file. Fixes: 154b7a489a ("Input: sysrq - allow specifying alternate reset sequence") Signed-off-by: Arnd Bergmann Signed-off-by: Dmitry Torokhov --- drivers/tty/sysrq.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) (limited to 'drivers/tty/sysrq.c') diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 259a4d5a4e8f..b21881e39d28 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -55,9 +55,6 @@ static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static bool __read_mostly sysrq_always_enabled; -unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED }; -int sysrq_reset_downtime_ms __weak; - static bool sysrq_on(void) { return sysrq_enabled || sysrq_always_enabled; @@ -568,6 +565,7 @@ void handle_sysrq(int key) EXPORT_SYMBOL(handle_sysrq); #ifdef CONFIG_INPUT +static int sysrq_reset_downtime_ms; /* Simple translation table for the SysRq keys */ static const unsigned char sysrq_xlate[KEY_CNT] = @@ -948,23 +946,8 @@ static bool sysrq_handler_registered; static inline void sysrq_register_handler(void) { - unsigned short key; int error; - int i; - - /* First check if a __weak interface was instantiated. */ - for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) { - key = platform_sysrq_reset_seq[i]; - if (key == KEY_RESERVED || key > KEY_MAX) - break; - - sysrq_reset_seq[sysrq_reset_seq_len++] = key; - } - /* - * DT configuration takes precedence over anything that would - * have been defined via the __weak interface. - */ sysrq_of_get_keyreset_config(); error = input_register_handler(&sysrq_handler); -- cgit v1.2.3-59-g8ed1b From d1e9a4f547354ca258b6200ab6b1f0e4909b92e8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 19 May 2015 09:50:29 +0100 Subject: MIPS: Add SysRq operation to dump TLBs on all CPUs Add a MIPS specific SysRq operation to dump the TLB entries on all CPUs, using the 'x' trigger key. Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10072/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/Makefile | 1 + arch/mips/kernel/sysrq.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/tty/sysrq.c | 1 + 3 files changed, 79 insertions(+) create mode 100644 arch/mips/kernel/sysrq.c (limited to 'drivers/tty/sysrq.c') diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index d3d2ff2d76dc..a2debcbedb6d 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_64BIT) += cpu-bugs64.o diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c new file mode 100644 index 000000000000..5b539f5fc9d9 --- /dev/null +++ b/arch/mips/kernel/sysrq.c @@ -0,0 +1,77 @@ +/* + * MIPS specific sysrq operations. + * + * Copyright (C) 2015 Imagination Technologies Ltd. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Dump TLB entries on all CPUs. + */ + +static DEFINE_SPINLOCK(show_lock); + +static void sysrq_tlbdump_single(void *dummy) +{ + const int field = 2 * sizeof(unsigned long); + unsigned long flags; + + spin_lock_irqsave(&show_lock, flags); + + pr_info("CPU%d:\n", smp_processor_id()); + pr_info("Index : %0x\n", read_c0_index()); + pr_info("Pagemask: %0x\n", read_c0_pagemask()); + pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi()); + pr_info("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); + pr_info("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); + pr_info("Wired : %0x\n", read_c0_wired()); + pr_info("Pagegrain: %0x\n", read_c0_pagegrain()); + if (cpu_has_htw) { + pr_info("PWField : %0*lx\n", field, read_c0_pwfield()); + pr_info("PWSize : %0*lx\n", field, read_c0_pwsize()); + pr_info("PWCtl : %0x\n", read_c0_pwctl()); + } + pr_info("\n"); + dump_tlb_all(); + pr_info("\n"); + + spin_unlock_irqrestore(&show_lock, flags); +} + +#ifdef CONFIG_SMP +static void sysrq_tlbdump_othercpus(struct work_struct *dummy) +{ + smp_call_function(sysrq_tlbdump_single, NULL, 0); +} + +static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus); +#endif + +static void sysrq_handle_tlbdump(int key) +{ + sysrq_tlbdump_single(NULL); +#ifdef CONFIG_SMP + schedule_work(&sysrq_tlbdump); +#endif +} + +static struct sysrq_key_op sysrq_tlbdump_op = { + .handler = sysrq_handle_tlbdump, + .help_msg = "show-tlbs(x)", + .action_msg = "Show TLB entries", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; + +static int __init mips_sysrq_init(void) +{ + return register_sysrq_key('x', &sysrq_tlbdump_op); +} +arch_initcall(mips_sysrq_init); diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 843f2cdc280b..8ba52e56bb8b 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -463,6 +463,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { /* v: May be registered for frame buffer console restore */ NULL, /* v */ &sysrq_showstate_blocked_op, /* w */ + /* x: May be registered on mips for TLB dump */ /* x: May be registered on ppc/powerpc for xmon */ /* x: May be registered on sparc64 for global PMU dump */ NULL, /* x */ -- cgit v1.2.3-59-g8ed1b From dc56401fc9f25e8f93899991ec858c98a331d88c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 24 Jun 2015 16:57:19 -0700 Subject: mm: oom_kill: simplify OOM killer locking The zonelist locking and the oom_sem are two overlapping locks that are used to serialize global OOM killing against different things. The historical zonelist locking serializes OOM kills from allocations with overlapping zonelists against each other to prevent killing more tasks than necessary in the same memory domain. Only when neither tasklists nor zonelists from two concurrent OOM kills overlap (tasks in separate memcgs bound to separate nodes) are OOM kills allowed to execute in parallel. The younger oom_sem is a read-write lock to serialize OOM killing against the PM code trying to disable the OOM killer altogether. However, the OOM killer is a fairly cold error path, there is really no reason to optimize for highly performant and concurrent OOM kills. And the oom_sem is just flat-out redundant. Replace both locking schemes with a single global mutex serializing OOM kills regardless of context. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: David Rientjes Cc: Tetsuo Handa Cc: Andrea Arcangeli Cc: Dave Chinner Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/tty/sysrq.c | 2 + include/linux/oom.h | 5 +-- mm/memcontrol.c | 18 +++++--- mm/oom_kill.c | 127 +++++++++++----------------------------------------- mm/page_alloc.c | 8 ++-- 5 files changed, 46 insertions(+), 114 deletions(-) (limited to 'drivers/tty/sysrq.c') diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 843f2cdc280b..b20d2c0ec451 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -356,9 +356,11 @@ static struct sysrq_key_op sysrq_term_op = { static void moom_callback(struct work_struct *ignored) { + mutex_lock(&oom_lock); if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL, 0, NULL, true)) pr_info("OOM request ignored because killer is disabled\n"); + mutex_unlock(&oom_lock); } static DECLARE_WORK(moom_work, moom_callback); diff --git a/include/linux/oom.h b/include/linux/oom.h index a8e6a498cbcb..7deecb7bca5e 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -32,6 +32,8 @@ enum oom_scan_t { /* Thread is the potential origin of an oom condition; kill first on oom */ #define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) +extern struct mutex oom_lock; + static inline void set_current_oom_origin(void) { current->signal->oom_flags |= OOM_FLAG_ORIGIN; @@ -60,9 +62,6 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, struct mem_cgroup *memcg, nodemask_t *nodemask, const char *message); -extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags); -extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags); - extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, int order, const nodemask_t *nodemask, struct mem_cgroup *memcg); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 20a7e874f719..8da44a083397 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1530,6 +1530,8 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned int points = 0; struct task_struct *chosen = NULL; + mutex_lock(&oom_lock); + /* * If current has a pending SIGKILL or is exiting, then automatically * select it. The goal is to allow it to allocate so that it may @@ -1537,7 +1539,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, */ if (fatal_signal_pending(current) || task_will_free_mem(current)) { mark_oom_victim(current); - return; + goto unlock; } check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); @@ -1564,7 +1566,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, mem_cgroup_iter_break(memcg, iter); if (chosen) put_task_struct(chosen); - return; + goto unlock; case OOM_SCAN_OK: break; }; @@ -1585,11 +1587,13 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, css_task_iter_end(&it); } - if (!chosen) - return; - points = chosen_points * 1000 / totalpages; - oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, - NULL, "Memory cgroup out of memory"); + if (chosen) { + points = chosen_points * 1000 / totalpages; + oom_kill_process(chosen, gfp_mask, order, points, totalpages, + memcg, NULL, "Memory cgroup out of memory"); + } +unlock: + mutex_unlock(&oom_lock); } #if MAX_NUMNODES > 1 diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d3490b019d46..5cfda39b3268 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -42,7 +42,8 @@ int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; -static DEFINE_SPINLOCK(zone_scan_lock); + +DEFINE_MUTEX(oom_lock); #ifdef CONFIG_NUMA /** @@ -405,13 +406,12 @@ static atomic_t oom_victims = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); bool oom_killer_disabled __read_mostly; -static DECLARE_RWSEM(oom_sem); /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark * - * Has to be called with oom_sem taken for read and never after + * Has to be called with oom_lock held and never after * oom has been disabled already. */ void mark_oom_victim(struct task_struct *tsk) @@ -460,14 +460,14 @@ bool oom_killer_disable(void) * Make sure to not race with an ongoing OOM killer * and that the current is not the victim. */ - down_write(&oom_sem); + mutex_lock(&oom_lock); if (test_thread_flag(TIF_MEMDIE)) { - up_write(&oom_sem); + mutex_unlock(&oom_lock); return false; } oom_killer_disabled = true; - up_write(&oom_sem); + mutex_unlock(&oom_lock); wait_event(oom_victims_wait, !atomic_read(&oom_victims)); @@ -634,52 +634,6 @@ int unregister_oom_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(unregister_oom_notifier); -/* - * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero - * if a parallel OOM killing is already taking place that includes a zone in - * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. - */ -bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) -{ - struct zoneref *z; - struct zone *zone; - bool ret = true; - - spin_lock(&zone_scan_lock); - for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) { - ret = false; - goto out; - } - - /* - * Lock each zone in the zonelist under zone_scan_lock so a parallel - * call to oom_zonelist_trylock() doesn't succeed when it shouldn't. - */ - for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - set_bit(ZONE_OOM_LOCKED, &zone->flags); - -out: - spin_unlock(&zone_scan_lock); - return ret; -} - -/* - * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed - * allocation attempts with zonelists containing them may now recall the OOM - * killer, if necessary. - */ -void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) -{ - struct zoneref *z; - struct zone *zone; - - spin_lock(&zone_scan_lock); - for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - clear_bit(ZONE_OOM_LOCKED, &zone->flags); - spin_unlock(&zone_scan_lock); -} - /** * __out_of_memory - kill the "best" process when we run out of memory * @zonelist: zonelist pointer @@ -693,8 +647,8 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, - int order, nodemask_t *nodemask, bool force_kill) +bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, + int order, nodemask_t *nodemask, bool force_kill) { const nodemask_t *mpol_mask; struct task_struct *p; @@ -704,10 +658,13 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, enum oom_constraint constraint = CONSTRAINT_NONE; int killed = 0; + if (oom_killer_disabled) + return false; + blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ - return; + goto out; /* * If current has a pending SIGKILL or is exiting, then automatically @@ -720,7 +677,7 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, if (current->mm && (fatal_signal_pending(current) || task_will_free_mem(current))) { mark_oom_victim(current); - return; + goto out; } /* @@ -760,32 +717,8 @@ out: */ if (killed) schedule_timeout_killable(1); -} - -/** - * out_of_memory - tries to invoke OOM killer. - * @zonelist: zonelist pointer - * @gfp_mask: memory allocation flags - * @order: amount of memory being requested as a power of 2 - * @nodemask: nodemask passed to page allocator - * @force_kill: true if a task must be killed, even if others are exiting - * - * invokes __out_of_memory if the OOM is not disabled by oom_killer_disable() - * when it returns false. Otherwise returns true. - */ -bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, - int order, nodemask_t *nodemask, bool force_kill) -{ - bool ret = false; - - down_read(&oom_sem); - if (!oom_killer_disabled) { - __out_of_memory(zonelist, gfp_mask, order, nodemask, force_kill); - ret = true; - } - up_read(&oom_sem); - return ret; + return true; } /* @@ -795,27 +728,21 @@ bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, */ void pagefault_out_of_memory(void) { - struct zonelist *zonelist; - - down_read(&oom_sem); if (mem_cgroup_oom_synchronize(true)) - goto unlock; + return; - zonelist = node_zonelist(first_memory_node, GFP_KERNEL); - if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) { - if (!oom_killer_disabled) - __out_of_memory(NULL, 0, 0, NULL, false); - else - /* - * There shouldn't be any user tasks runable while the - * OOM killer is disabled so the current task has to - * be a racing OOM victim for which oom_killer_disable() - * is waiting for. - */ - WARN_ON(test_thread_flag(TIF_MEMDIE)); + if (!mutex_trylock(&oom_lock)) + return; - oom_zonelist_unlock(zonelist, GFP_KERNEL); + if (!out_of_memory(NULL, 0, 0, NULL, false)) { + /* + * There shouldn't be any user tasks runnable while the + * OOM killer is disabled, so the current task has to + * be a racing OOM victim for which oom_killer_disable() + * is waiting for. + */ + WARN_ON(test_thread_flag(TIF_MEMDIE)); } -unlock: - up_read(&oom_sem); + + mutex_unlock(&oom_lock); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3b02be4def90..cae21dc9d54e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2360,10 +2360,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, *did_some_progress = 0; /* - * Acquire the per-zone oom lock for each zone. If that - * fails, somebody else is making progress for us. + * Acquire the oom lock. If that fails, somebody else is + * making progress for us. */ - if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) { + if (!mutex_trylock(&oom_lock)) { *did_some_progress = 1; schedule_timeout_uninterruptible(1); return NULL; @@ -2408,7 +2408,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) *did_some_progress = 1; out: - oom_zonelist_unlock(ac->zonelist, gfp_mask); + mutex_unlock(&oom_lock); return page; } -- cgit v1.2.3-59-g8ed1b