From 27d4ee03078aba88c5e07dcc4917e8d01d046f38 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sun, 11 Feb 2018 10:38:28 +0100 Subject: workqueue: Allow retrieval of current task's work struct Introduce a helper to retrieve the current task's work struct if it is a workqueue worker. This allows us to fix a long-standing deadlock in several DRM drivers wherein the ->runtime_suspend callback waits for a specific worker to finish and that worker in turn calls a function which waits for runtime suspend to finish. That function is invoked from multiple call sites and waiting for runtime suspend to finish is the correct thing to do except if it's executing in the context of the worker. Cc: Lai Jiangshan Cc: Dave Airlie Cc: Ben Skeggs Cc: Alex Deucher Acked-by: Tejun Heo Reviewed-by: Lyude Paul Signed-off-by: Lukas Wunner Link: https://patchwork.freedesktop.org/patch/msgid/2d8f603074131eb87e588d2b803a71765bd3a2fd.1518338788.git.lukas@wunner.de --- include/linux/workqueue.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4a54ef96aff5..bc0cda180c8b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); +extern struct work_struct *current_work(void); extern bool current_is_workqueue_rescuer(void); extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); extern unsigned int work_busy(struct work_struct *work); -- cgit v1.2.3-59-g8ed1b From c0248c96631f38f02d58762fc018e316843acac8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Feb 2018 16:41:56 +0000 Subject: arm_pmu: kill arm_pmu_platdata Now that we have no platforms passing platform data to the arm_pmu code, we can get rid of the platdata and associated hooks, paving the way for rework of our IRQ handling. Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 27 ++++----------------------- include/linux/perf/arm_pmu.h | 17 ----------------- 2 files changed, 4 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 7bc5eee96b31..82b09d1cb42c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -320,17 +319,9 @@ validate_group(struct perf_event *event) return 0; } -static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) -{ - struct platform_device *pdev = armpmu->plat_device; - - return pdev ? dev_get_platdata(&pdev->dev) : NULL; -} - static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) { struct arm_pmu *armpmu; - struct arm_pmu_platdata *plat; int ret; u64 start_clock, finish_clock; @@ -342,13 +333,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) */ armpmu = *(void **)dev; - plat = armpmu_get_platdata(armpmu); - start_clock = sched_clock(); - if (plat && plat->handle_irq) - ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); - else - ret = armpmu->handle_irq(irq, armpmu); + ret = armpmu->handle_irq(irq, armpmu); finish_clock = sched_clock(); perf_sample_event_took(finish_clock - start_clock); @@ -578,7 +564,6 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) goto err_out; } } else { - struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); unsigned long irq_flags; err = irq_force_affinity(irq, cpumask_of(cpu)); @@ -589,13 +574,9 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) goto err_out; } - if (platdata && platdata->irq_flags) { - irq_flags = platdata->irq_flags; - } else { - irq_flags = IRQF_PERCPU | - IRQF_NOBALANCING | - IRQF_NO_THREAD; - } + irq_flags = IRQF_PERCPU | + IRQF_NOBALANCING | + IRQF_NO_THREAD; err = request_irq(irq, handler, irq_flags, "arm-pmu", per_cpu_ptr(&hw_events->percpu_pmu, cpu)); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index af0f44effd44..712764b35c6a 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -17,23 +17,6 @@ #include #include -/* - * struct arm_pmu_platdata - ARM PMU platform data - * - * @handle_irq: an optional handler which will be called from the - * interrupt and passed the address of the low level handler, - * and can be used to implement any platform specific handling - * before or after calling it. - * - * @irq_flags: if non-zero, these flags will be passed to request_irq - * when requesting interrupts for this PMU device. - */ -struct arm_pmu_platdata { - irqreturn_t (*handle_irq)(int irq, void *dev, - irq_handler_t pmu_handler); - unsigned long irq_flags; -}; - #ifdef CONFIG_ARM_PMU /* -- cgit v1.2.3-59-g8ed1b From d3d5aac206b4e9e569a22fe1811c909dde17587c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Feb 2018 16:41:57 +0000 Subject: arm_pmu: fold platform helpers into platform code The armpmu_{request,free}_irqs() helpers are only used by arm_pmu_platform.c, so let's fold them in and make them static. Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 21 --------------------- drivers/perf/arm_pmu_platform.c | 21 +++++++++++++++++++++ include/linux/perf/arm_pmu.h | 2 -- 3 files changed, 21 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 82b09d1cb42c..373dfd7d8a1d 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -534,14 +534,6 @@ void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } -void armpmu_free_irqs(struct arm_pmu *armpmu) -{ - int cpu; - - for_each_cpu(cpu, &armpmu->supported_cpus) - armpmu_free_irq(armpmu, cpu); -} - int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) { int err = 0; @@ -593,19 +585,6 @@ err_out: return err; } -int armpmu_request_irqs(struct arm_pmu *armpmu) -{ - int cpu, err; - - for_each_cpu(cpu, &armpmu->supported_cpus) { - err = armpmu_request_irq(armpmu, cpu); - if (err) - break; - } - - return err; -} - static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) { struct pmu_hw_events __percpu *hw_events = pmu->hw_events; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 46501cc79fd7..244558cfdbce 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -164,6 +164,27 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) return 0; } +static int armpmu_request_irqs(struct arm_pmu *armpmu) +{ + int cpu, err; + + for_each_cpu(cpu, &armpmu->supported_cpus) { + err = armpmu_request_irq(armpmu, cpu); + if (err) + break; + } + + return err; +} + +static void armpmu_free_irqs(struct arm_pmu *armpmu) +{ + int cpu; + + for_each_cpu(cpu, &armpmu->supported_cpus) + armpmu_free_irq(armpmu, cpu); +} + int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 712764b35c6a..899bc7ef0881 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -159,8 +159,6 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } struct arm_pmu *armpmu_alloc(void); void armpmu_free(struct arm_pmu *pmu); int armpmu_register(struct arm_pmu *pmu); -int armpmu_request_irqs(struct arm_pmu *armpmu); -void armpmu_free_irqs(struct arm_pmu *armpmu); int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); void armpmu_free_irq(struct arm_pmu *armpmu, int cpu); -- cgit v1.2.3-59-g8ed1b From 0dc1a1851af1d593eee248b94c1277c7c7ccbbce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Feb 2018 16:41:58 +0000 Subject: arm_pmu: add armpmu_alloc_atomic() In ACPI systems, we don't know the makeup of CPUs until we hotplug them on, and thus have to allocate the PMU datastructures at hotplug time. Thus, we must use GFP_ATOMIC allocations. Let's add an armpmu_alloc_atomic() that we can use in this case. Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 17 ++++++++++++++--- drivers/perf/arm_pmu_acpi.c | 2 +- include/linux/perf/arm_pmu.h | 1 + 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 373dfd7d8a1d..4f73c5e8d623 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -760,18 +760,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) &cpu_pmu->node); } -struct arm_pmu *armpmu_alloc(void) +static struct arm_pmu *__armpmu_alloc(gfp_t flags) { struct arm_pmu *pmu; int cpu; - pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); + pmu = kzalloc(sizeof(*pmu), flags); if (!pmu) { pr_info("failed to allocate PMU device!\n"); goto out; } - pmu->hw_events = alloc_percpu(struct pmu_hw_events); + pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); if (!pmu->hw_events) { pr_info("failed to allocate per-cpu PMU data.\n"); goto out_free_pmu; @@ -817,6 +817,17 @@ out: return NULL; } +struct arm_pmu *armpmu_alloc(void) +{ + return __armpmu_alloc(GFP_KERNEL); +} + +struct arm_pmu *armpmu_alloc_atomic(void) +{ + return __armpmu_alloc(GFP_ATOMIC); +} + + void armpmu_free(struct arm_pmu *pmu) { free_percpu(pmu->hw_events); diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 705f1a390e31..30c5f2bbce59 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -127,7 +127,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) return pmu; } - pmu = armpmu_alloc(); + pmu = armpmu_alloc_atomic(); if (!pmu) { pr_warn("Unable to allocate PMU for CPU%d\n", smp_processor_id()); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 899bc7ef0881..1f8bb83ef42f 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -157,6 +157,7 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); +struct arm_pmu *armpmu_alloc_atomic(void); void armpmu_free(struct arm_pmu *pmu); int armpmu_register(struct arm_pmu *pmu); int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); -- cgit v1.2.3-59-g8ed1b From 84b4be57ae17f8c0b3c1d8629e10f23910838fd7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 12 Dec 2017 16:56:06 +0000 Subject: arm_pmu: note IRQs and PMUs per-cpu To support ACPI systems, we need to request IRQs before we know the associated PMU, and thus we need some percpu variable that the IRQ handler can find the PMU from. As we're going to request IRQs without the PMU, we can't rely on the arm_pmu::active_irqs mask, and similarly need to track requested IRQs with a percpu variable. Signed-off-by: Mark Rutland [will: made armpmu_count_irq_users static] Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 69 +++++++++++++++++++++++++++++++++----------- include/linux/perf/arm_pmu.h | 1 - 2 files changed, 52 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 72118e6f9122..2b2af35db1b6 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -25,6 +25,9 @@ #include +static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); +static DEFINE_PER_CPU(int, cpu_irq); + static int armpmu_map_cache_event(const unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] @@ -332,6 +335,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) * dereference. */ armpmu = *(void **)dev; + if (WARN_ON_ONCE(!armpmu)) + return IRQ_NONE; start_clock = sched_clock(); ret = armpmu->handle_irq(irq, armpmu); @@ -517,29 +522,45 @@ int perf_num_counters(void) } EXPORT_SYMBOL_GPL(perf_num_counters); -void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) +static int armpmu_count_irq_users(const int irq) { - struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; - int irq = per_cpu(hw_events->irq, cpu); + int cpu, count = 0; - if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) - return; + for_each_possible_cpu(cpu) { + if (per_cpu(cpu_irq, cpu) == irq) + count++; + } + + return count; +} - if (irq_is_percpu_devid(irq)) { - free_percpu_irq(irq, &hw_events->percpu_pmu); - cpumask_clear(&armpmu->active_irqs); +void armpmu_free_cpu_irq(int irq, int cpu) +{ + if (per_cpu(cpu_irq, cpu) == 0) return; - } + if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) + return; + + if (!irq_is_percpu_devid(irq)) + free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); + else if (armpmu_count_irq_users(irq) == 1) + free_percpu_irq(irq, &cpu_armpmu); - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + per_cpu(cpu_irq, cpu) = 0; } -int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) +void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) { - int err = 0; struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; - const irq_handler_t handler = armpmu_dispatch_irq; int irq = per_cpu(hw_events->irq, cpu); + + armpmu_free_cpu_irq(irq, cpu); +} + +int armpmu_request_cpu_irq(int irq, int cpu) +{ + int err = 0; + const irq_handler_t handler = armpmu_dispatch_irq; if (!irq) return 0; @@ -560,16 +581,16 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) irq_set_status_flags(irq, IRQ_NOAUTOEN); err = request_irq(irq, handler, irq_flags, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, cpu)); - } else if (cpumask_empty(&armpmu->active_irqs)) { + per_cpu_ptr(&cpu_armpmu, cpu)); + } else if (armpmu_count_irq_users(irq) == 0) { err = request_percpu_irq(irq, handler, "arm-pmu", - &hw_events->percpu_pmu); + &cpu_armpmu); } if (err) goto err_out; - cpumask_set_cpu(cpu, &armpmu->active_irqs); + per_cpu(cpu_irq, cpu) = irq; return 0; err_out: @@ -577,6 +598,16 @@ err_out: return err; } +int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) +{ + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; + int irq = per_cpu(hw_events->irq, cpu); + if (!irq) + return 0; + + return armpmu_request_cpu_irq(irq, cpu); +} + static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) { struct pmu_hw_events __percpu *hw_events = pmu->hw_events; @@ -599,6 +630,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) if (pmu->reset) pmu->reset(pmu); + per_cpu(cpu_armpmu, cpu) = pmu; + irq = armpmu_get_cpu_irq(pmu, cpu); if (irq) { if (irq_is_percpu_devid(irq)) @@ -626,6 +659,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) disable_irq(irq); } + per_cpu(cpu_armpmu, cpu) = NULL; + return 0; } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 1f8bb83ef42f..feec9e7e85db 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -75,7 +75,6 @@ enum armpmu_attr_groups { struct arm_pmu { struct pmu pmu; - cpumask_t active_irqs; cpumask_t supported_cpus; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); -- cgit v1.2.3-59-g8ed1b From 167e61438da0664cab87c825a6c0cb83510d578e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 9 Oct 2017 17:09:05 +0100 Subject: arm_pmu: acpi: request IRQs up-front We can't request IRQs in atomic context, so for ACPI systems we'll have to request them up-front, and later associate them with CPUs. This patch reorganises the arm_pmu code to do so. As we no longer have the arm_pmu structure at probe time, a number of prototypes need to be adjusted, requiring changes to the common arm_pmu code and arm_pmu platform code. Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 22 ++-------------------- drivers/perf/arm_pmu_acpi.c | 19 ++++++------------- drivers/perf/arm_pmu_platform.c | 15 ++++++++++++--- include/linux/perf/arm_pmu.h | 5 +++-- 4 files changed, 23 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2b2af35db1b6..0c2ed11c0603 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -534,7 +534,7 @@ static int armpmu_count_irq_users(const int irq) return count; } -void armpmu_free_cpu_irq(int irq, int cpu) +void armpmu_free_irq(int irq, int cpu) { if (per_cpu(cpu_irq, cpu) == 0) return; @@ -549,15 +549,7 @@ void armpmu_free_cpu_irq(int irq, int cpu) per_cpu(cpu_irq, cpu) = 0; } -void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) -{ - struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; - int irq = per_cpu(hw_events->irq, cpu); - - armpmu_free_cpu_irq(irq, cpu); -} - -int armpmu_request_cpu_irq(int irq, int cpu) +int armpmu_request_irq(int irq, int cpu) { int err = 0; const irq_handler_t handler = armpmu_dispatch_irq; @@ -598,16 +590,6 @@ err_out: return err; } -int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) -{ - struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; - int irq = per_cpu(hw_events->irq, cpu); - if (!irq) - return 0; - - return armpmu_request_cpu_irq(irq, cpu); -} - static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) { struct pmu_hw_events __percpu *hw_events = pmu->hw_events; diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 09a1a36cff57..0f197516d708 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -89,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void) pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); } + /* + * Log and request the IRQ so the core arm_pmu code can manage + * it. We'll have to sanity-check IRQs later when we associate + * them with their PMUs. + */ per_cpu(pmu_irqs, cpu) = irq; + armpmu_request_irq(irq, cpu); } return 0; @@ -204,14 +210,6 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu) cpumask_set_cpu(cpu, &pmu->supported_cpus); - /* - * Log and request the IRQ so the core arm_pmu code can manage it. In - * some situations (e.g. mismatched PPIs), we may fail to request the - * IRQ. However, it may be too late for us to do anything about it. - * The common ARM PMU code will log a warning in this case. - */ - armpmu_request_irq(pmu, cpu); - /* * Ideally, we'd probe the PMU here when we find the first matching * CPU. We can't do that for several reasons; see the comment in @@ -281,11 +279,6 @@ static int arm_pmu_acpi_init(void) if (acpi_disabled) return 0; - /* - * We can't request IRQs yet, since we don't know the cookie value - * until we know which CPUs share the same logical PMU. We'll handle - * that in arm_pmu_acpi_cpu_starting(). - */ ret = arm_pmu_acpi_parse_irqs(); if (ret) return ret; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 1dc3c1f574e0..7729eda5909d 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -159,10 +159,15 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) static int armpmu_request_irqs(struct arm_pmu *armpmu) { + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; int cpu, err; for_each_cpu(cpu, &armpmu->supported_cpus) { - err = armpmu_request_irq(armpmu, cpu); + int irq = per_cpu(hw_events->irq, cpu); + if (!irq) + continue; + + err = armpmu_request_irq(irq, cpu); if (err) break; } @@ -173,9 +178,13 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu) static void armpmu_free_irqs(struct arm_pmu *armpmu) { int cpu; + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; - for_each_cpu(cpu, &armpmu->supported_cpus) - armpmu_free_irq(armpmu, cpu); + for_each_cpu(cpu, &armpmu->supported_cpus) { + int irq = per_cpu(hw_events->irq, cpu); + + armpmu_free_irq(irq, cpu); + } } int arm_pmu_device_probe(struct platform_device *pdev, diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index feec9e7e85db..40036a57d072 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -159,8 +160,8 @@ struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc_atomic(void); void armpmu_free(struct arm_pmu *pmu); int armpmu_register(struct arm_pmu *pmu); -int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); -void armpmu_free_irq(struct arm_pmu *armpmu, int cpu); +int armpmu_request_irq(int irq, int cpu); +void armpmu_free_irq(int irq, int cpu); #define ARMV8_PMU_PDEV_NAME "armv8-pmu" -- cgit v1.2.3-59-g8ed1b From d34bc48f8275b6ce0da44f639d68344891268ee9 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 21 Feb 2018 14:45:17 -0800 Subject: include/linux/sched/mm.h: re-inline mmdrop() As Peter points out, Doing a CALL+RET for just the decrement is a bit silly. Fixes: d70f2a14b72a4bc ("include/linux/sched/mm.h: uninline mmdrop_async(), etc") Acked-by: Peter Zijlstra (Intel) Cc: Ingo Molnar Cc: Michal Hocko Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 13 ++++++++++++- kernel/fork.c | 15 ++------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 1149533aa2fa..9806184bb3d5 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm) atomic_inc(&mm->mm_count); } -extern void mmdrop(struct mm_struct *mm); +extern void __mmdrop(struct mm_struct *mm); + +static inline void mmdrop(struct mm_struct *mm) +{ + /* + * The implicit full barrier implied by atomic_dec_and_test() is + * required by the membarrier system call before returning to + * user-space, after storing to rq->curr. + */ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} /** * mmget() - Pin the address space associated with a &struct mm_struct. diff --git a/kernel/fork.c b/kernel/fork.c index be8aa5b98666..e5d9d405ae4e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm) * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ -static void __mmdrop(struct mm_struct *mm) +void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); @@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm) put_user_ns(mm->user_ns); free_mm(mm); } - -void mmdrop(struct mm_struct *mm) -{ - /* - * The implicit full barrier implied by atomic_dec_and_test() is - * required by the membarrier system call before returning to - * user-space, after storing to rq->curr. - */ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); -} -EXPORT_SYMBOL_GPL(mmdrop); +EXPORT_SYMBOL_GPL(__mmdrop); static void mmdrop_async_fn(struct work_struct *work) { -- cgit v1.2.3-59-g8ed1b From 101110f6271ce956a049250c907bc960030577f8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 21 Feb 2018 14:45:20 -0800 Subject: Kbuild: always define endianess in kconfig.h Build testing with LTO found a couple of files that get compiled differently depending on whether asm/byteorder.h gets included early enough or not. In particular, include/asm-generic/qrwlock_types.h is affected by this, but there are probably others as well. The symptom is a series of LTO link time warnings, including these: net/netlabel/netlabel_unlabeled.h:223: error: type of 'netlbl_unlhsh_add' does not match original declaration [-Werror=lto-type-mismatch] int netlbl_unlhsh_add(struct net *net, net/netlabel/netlabel_unlabeled.c:377: note: 'netlbl_unlhsh_add' was previously declared here include/net/ipv6.h:360: error: type of 'ipv6_renew_options_kern' does not match original declaration [-Werror=lto-type-mismatch] ipv6_renew_options_kern(struct sock *sk, net/ipv6/exthdrs.c:1162: note: 'ipv6_renew_options_kern' was previously declared here net/core/dev.c:761: note: 'dev_get_by_name_rcu' was previously declared here struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) net/core/dev.c:761: note: code may be misoptimized unless -fno-strict-aliasing is used drivers/gpu/drm/i915/i915_drv.h:3377: error: type of 'i915_gem_object_set_to_wc_domain' does not match original declaration [-Werror=lto-type-mismatch] i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); drivers/gpu/drm/i915/i915_gem.c:3639: note: 'i915_gem_object_set_to_wc_domain' was previously declared here include/linux/debugfs.h:92:9: error: type of 'debugfs_attr_read' does not match original declaration [-Werror=lto-type-mismatch] ssize_t debugfs_attr_read(struct file *file, char __user *buf, fs/debugfs/file.c:318: note: 'debugfs_attr_read' was previously declared here include/linux/rwlock_api_smp.h:30: error: type of '_raw_read_unlock' does not match original declaration [-Werror=lto-type-mismatch] void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); kernel/locking/spinlock.c:246:26: note: '_raw_read_unlock' was previously declared here include/linux/fs.h:3308:5: error: type of 'simple_attr_open' does not match original declaration [-Werror=lto-type-mismatch] int simple_attr_open(struct inode *inode, struct file *file, fs/libfs.c:795: note: 'simple_attr_open' was previously declared here All of the above are caused by include/asm-generic/qrwlock_types.h failing to include asm/byteorder.h after commit e0d02285f16e ("locking/qrwlock: Use 'struct qrwlock' instead of 'struct __qrwlock'") in linux-4.15. Similar bugs may or may not exist in older kernels as well, but there is no easy way to test those with link-time optimizations, and kernels before 4.14 are harder to fix because they don't have Babu's patch series We had similar issues with CONFIG_ symbols in the past and ended up always including the configuration headers though linux/kconfig.h. This works around the issue through that same file, defining either __BIG_ENDIAN or __LITTLE_ENDIAN depending on CONFIG_CPU_BIG_ENDIAN, which is now always set on all architectures since commit 4c97a0c8fee3 ("arch: define CPU_BIG_ENDIAN for all fixed big endian archs"). Link: http://lkml.kernel.org/r/20180202154104.1522809-2-arnd@arndb.de Signed-off-by: Arnd Bergmann Cc: Babu Moger Cc: Andi Kleen Cc: Greg Kroah-Hartman Cc: Masahiro Yamada Cc: Nicolas Pitre Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kconfig.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index fec5076eda91..cc8fa109cfa3 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -4,6 +4,12 @@ #include +#ifdef CONFIG_CPU_BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#else +#define __LITTLE_ENDIAN 1234 +#endif + #define __ARG_PLACEHOLDER_1 0, #define __take_second_arg(__ignored, val, ...) val -- cgit v1.2.3-59-g8ed1b From c3cc39118c3610eb6ab4711bc624af7fc48a35fe Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 21 Feb 2018 14:45:24 -0800 Subject: mm: memcontrol: fix NR_WRITEBACK leak in memcg and system stats After commit a983b5ebee57 ("mm: memcontrol: fix excessive complexity in memory.stat reporting"), we observed slowly upward creeping NR_WRITEBACK counts over the course of several days, both the per-memcg stats as well as the system counter in e.g. /proc/meminfo. The conversion from full per-cpu stat counts to per-cpu cached atomic stat counts introduced an irq-unsafe RMW operation into the updates. Most stat updates come from process context, but one notable exception is the NR_WRITEBACK counter. While writebacks are issued from process context, they are retired from (soft)irq context. When writeback completions interrupt the RMW counter updates of new writebacks being issued, the decs from the completions are lost. Since the global updates are routed through the joint lruvec API, both the memcg counters as well as the system counters are affected. This patch makes the joint stat and event API irq safe. Link: http://lkml.kernel.org/r/20180203082353.17284-1-hannes@cmpxchg.org Fixes: a983b5ebee57 ("mm: memcontrol: fix excessive complexity in memory.stat reporting") Signed-off-by: Johannes Weiner Debugged-by: Tejun Heo Reviewed-by: Rik van Riel Reviewed-by: Andrew Morton Cc: Vladimir Davydov Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 882046863581..c46016bb25eb 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -523,9 +523,11 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg, static inline void mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) { - preempt_disable(); + unsigned long flags; + + local_irq_save(flags); __mod_memcg_state(memcg, idx, val); - preempt_enable(); + local_irq_restore(flags); } /** @@ -606,9 +608,11 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec, static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { - preempt_disable(); + unsigned long flags; + + local_irq_save(flags); __mod_lruvec_state(lruvec, idx, val); - preempt_enable(); + local_irq_restore(flags); } static inline void __mod_lruvec_page_state(struct page *page, @@ -630,9 +634,11 @@ static inline void __mod_lruvec_page_state(struct page *page, static inline void mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { - preempt_disable(); + unsigned long flags; + + local_irq_save(flags); __mod_lruvec_page_state(page, idx, val); - preempt_enable(); + local_irq_restore(flags); } unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, @@ -659,9 +665,11 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, static inline void count_memcg_events(struct mem_cgroup *memcg, int idx, unsigned long count) { - preempt_disable(); + unsigned long flags; + + local_irq_save(flags); __count_memcg_events(memcg, idx, count); - preempt_enable(); + local_irq_restore(flags); } /* idx can be of type enum memcg_event_item or vm_event_item */ -- cgit v1.2.3-59-g8ed1b From 9c4e6b1a7027f102990c0395296015a812525f4d Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Wed, 21 Feb 2018 14:45:28 -0800 Subject: mm, mlock, vmscan: no more skipping pagevecs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a thread mlocks an address space backed either by file pages which are currently not present in memory or swapped out anon pages (not in swapcache), a new page is allocated and added to the local pagevec (lru_add_pvec), I/O is triggered and the thread then sleeps on the page. On I/O completion, the thread can wake on a different CPU, the mlock syscall will then sets the PageMlocked() bit of the page but will not be able to put that page in unevictable LRU as the page is on the pagevec of a different CPU. Even on drain, that page will go to evictable LRU because the PageMlocked() bit is not checked on pagevec drain. The page will eventually go to right LRU on reclaim but the LRU stats will remain skewed for a long time. This patch puts all the pages, even unevictable, to the pagevecs and on the drain, the pages will be added on their LRUs correctly by checking their evictability. This resolves the mlocked pages on pagevec of other CPUs issue because when those pagevecs will be drained, the mlocked file pages will go to unevictable LRU. Also this makes the race with munlock easier to resolve because the pagevec drains happen in LRU lock. However there is still one place which makes a page evictable and does PageLRU check on that page without LRU lock and needs special attention. TestClearPageMlocked() and isolate_lru_page() in clear_page_mlock(). #0: __pagevec_lru_add_fn #1: clear_page_mlock SetPageLRU() if (!TestClearPageMlocked()) return smp_mb() // <--required // inside does PageLRU if (!PageMlocked()) if (isolate_lru_page()) move to evictable LRU putback_lru_page() else move to unevictable LRU In '#1', TestClearPageMlocked() provides full memory barrier semantics and thus the PageLRU check (inside isolate_lru_page) can not be reordered before it. In '#0', without explicit memory barrier, the PageMlocked() check can be reordered before SetPageLRU(). If that happens, '#0' can put a page in unevictable LRU and '#1' might have just cleared the Mlocked bit of that page but fails to isolate as PageLRU fails as '#0' still hasn't set PageLRU bit of that page. That page will be stranded on the unevictable LRU. There is one (good) side effect though. Without this patch, the pages allocated for System V shared memory segment are added to evictable LRUs even after shmctl(SHM_LOCK) on that segment. This patch will correctly put such pages to unevictable LRU. Link: http://lkml.kernel.org/r/20171121211241.18877-1-shakeelb@google.com Signed-off-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Jérôme Glisse Cc: Huang Ying Cc: Tim Chen Cc: Michal Hocko Cc: Greg Thelen Cc: Johannes Weiner Cc: Balbir Singh Cc: Minchan Kim Cc: Shaohua Li Cc: Jan Kara Cc: Nicholas Piggin Cc: Dan Williams Cc: Mel Gorman Cc: Hugh Dickins Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 -- mm/mlock.c | 6 ++++ mm/swap.c | 82 ++++++++++++++++++++++++++++++---------------------- mm/vmscan.c | 59 +------------------------------------ 4 files changed, 54 insertions(+), 95 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 7b6a59f722a3..a1a3f4ed94ce 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void add_page_to_unevictable_list(struct page *page); - extern void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma); diff --git a/mm/mlock.c b/mm/mlock.c index 79398200e423..74e5a6547c3d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -64,6 +64,12 @@ void clear_page_mlock(struct page *page) mod_zone_page_state(page_zone(page), NR_MLOCK, -hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGCLEARED); + /* + * The previous TestClearPageMlocked() corresponds to the smp_mb() + * in __pagevec_lru_add_fn(). + * + * See __pagevec_lru_add_fn for more explanation. + */ if (!isolate_lru_page(page)) { putback_lru_page(page); } else { diff --git a/mm/swap.c b/mm/swap.c index 567a7b96e41d..2d337710218f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -445,30 +445,6 @@ void lru_cache_add(struct page *page) __lru_cache_add(page); } -/** - * add_page_to_unevictable_list - add a page to the unevictable list - * @page: the page to be added to the unevictable list - * - * Add page directly to its zone's unevictable list. To avoid races with - * tasks that might be making the page evictable, through eg. munlock, - * munmap or exit, while it's not on the lru, we want to add the page - * while it's locked or otherwise "invisible" to other tasks. This is - * difficult to do when using the pagevec cache, so bypass that. - */ -void add_page_to_unevictable_list(struct page *page) -{ - struct pglist_data *pgdat = page_pgdat(page); - struct lruvec *lruvec; - - spin_lock_irq(&pgdat->lru_lock); - lruvec = mem_cgroup_page_lruvec(page, pgdat); - ClearPageActive(page); - SetPageUnevictable(page); - SetPageLRU(page); - add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); - spin_unlock_irq(&pgdat->lru_lock); -} - /** * lru_cache_add_active_or_unevictable * @page: the page to be added to LRU @@ -484,13 +460,9 @@ void lru_cache_add_active_or_unevictable(struct page *page, { VM_BUG_ON_PAGE(PageLRU(page), page); - if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { + if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) SetPageActive(page); - lru_cache_add(page); - return; - } - - if (!TestSetPageMlocked(page)) { + else if (!TestSetPageMlocked(page)) { /* * We use the irq-unsafe __mod_zone_page_stat because this * counter is not modified from interrupt context, and the pte @@ -500,7 +472,7 @@ void lru_cache_add_active_or_unevictable(struct page *page, hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); } - add_page_to_unevictable_list(page); + lru_cache_add(page); } /* @@ -886,15 +858,55 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, void *arg) { - int file = page_is_file_cache(page); - int active = PageActive(page); - enum lru_list lru = page_lru(page); + enum lru_list lru; + int was_unevictable = TestClearPageUnevictable(page); VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); + /* + * Page becomes evictable in two ways: + * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()]. + * 2) Before acquiring LRU lock to put the page to correct LRU and then + * a) do PageLRU check with lock [check_move_unevictable_pages] + * b) do PageLRU check before lock [clear_page_mlock] + * + * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need + * following strict ordering: + * + * #0: __pagevec_lru_add_fn #1: clear_page_mlock + * + * SetPageLRU() TestClearPageMlocked() + * smp_mb() // explicit ordering // above provides strict + * // ordering + * PageMlocked() PageLRU() + * + * + * if '#1' does not observe setting of PG_lru by '#0' and fails + * isolation, the explicit barrier will make sure that page_evictable + * check will put the page in correct LRU. Without smp_mb(), SetPageLRU + * can be reordered after PageMlocked check and can make '#1' to fail + * the isolation of the page whose Mlocked bit is cleared (#0 is also + * looking at the same page) and the evictable page will be stranded + * in an unevictable LRU. + */ + smp_mb(); + + if (page_evictable(page)) { + lru = page_lru(page); + update_page_reclaim_stat(lruvec, page_is_file_cache(page), + PageActive(page)); + if (was_unevictable) + count_vm_event(UNEVICTABLE_PGRESCUED); + } else { + lru = LRU_UNEVICTABLE; + ClearPageActive(page); + SetPageUnevictable(page); + if (!was_unevictable) + count_vm_event(UNEVICTABLE_PGCULLED); + } + add_page_to_lru_list(page, lruvec, lru); - update_page_reclaim_stat(lruvec, file, active); trace_mm_lru_insertion(page, lru); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 444749669187..bee53495a829 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -769,64 +769,7 @@ int remove_mapping(struct address_space *mapping, struct page *page) */ void putback_lru_page(struct page *page) { - bool is_unevictable; - int was_unevictable = PageUnevictable(page); - - VM_BUG_ON_PAGE(PageLRU(page), page); - -redo: - ClearPageUnevictable(page); - - if (page_evictable(page)) { - /* - * For evictable pages, we can use the cache. - * In event of a race, worst case is we end up with an - * unevictable page on [in]active list. - * We know how to handle that. - */ - is_unevictable = false; - lru_cache_add(page); - } else { - /* - * Put unevictable pages directly on zone's unevictable - * list. - */ - is_unevictable = true; - add_page_to_unevictable_list(page); - /* - * When racing with an mlock or AS_UNEVICTABLE clearing - * (page is unlocked) make sure that if the other thread - * does not observe our setting of PG_lru and fails - * isolation/check_move_unevictable_pages, - * we see PG_mlocked/AS_UNEVICTABLE cleared below and move - * the page back to the evictable list. - * - * The other side is TestClearPageMlocked() or shmem_lock(). - */ - smp_mb(); - } - - /* - * page's status can change while we move it among lru. If an evictable - * page is on unevictable list, it never be freed. To avoid that, - * check after we added it to the list, again. - */ - if (is_unevictable && page_evictable(page)) { - if (!isolate_lru_page(page)) { - put_page(page); - goto redo; - } - /* This means someone else dropped this page from LRU - * So, it will be freed or putback to LRU again. There is - * nothing to do here. - */ - } - - if (was_unevictable && !is_unevictable) - count_vm_event(UNEVICTABLE_PGRESCUED); - else if (!was_unevictable && is_unevictable) - count_vm_event(UNEVICTABLE_PGCULLED); - + lru_cache_add(page); put_page(page); /* drop ref from isolate */ } -- cgit v1.2.3-59-g8ed1b From 173a3efd3edb2ef6ef07471397c5f542a360e9c1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 21 Feb 2018 14:45:54 -0800 Subject: bug.h: work around GCC PR82365 in BUG() Looking at functions with large stack frames across all architectures led me discovering that BUG() suffers from the same problem as fortify_panic(), which I've added a workaround for already. In short, variables that go out of scope by calling a noreturn function or __builtin_unreachable() keep using stack space in functions afterwards. A workaround that was identified is to insert an empty assembler statement just before calling the function that doesn't return. I'm adding a macro "barrier_before_unreachable()" to document this, and insert calls to that in all instances of BUG() that currently suffer from this problem. The files that saw the largest change from this had these frame sizes before, and much less with my patch: fs/ext4/inode.c:82:1: warning: the frame size of 1672 bytes is larger than 800 bytes [-Wframe-larger-than=] fs/ext4/namei.c:434:1: warning: the frame size of 904 bytes is larger than 800 bytes [-Wframe-larger-than=] fs/ext4/super.c:2279:1: warning: the frame size of 1160 bytes is larger than 800 bytes [-Wframe-larger-than=] fs/ext4/xattr.c:146:1: warning: the frame size of 1168 bytes is larger than 800 bytes [-Wframe-larger-than=] fs/f2fs/inode.c:152:1: warning: the frame size of 1424 bytes is larger than 800 bytes [-Wframe-larger-than=] net/netfilter/ipvs/ip_vs_core.c:1195:1: warning: the frame size of 1068 bytes is larger than 800 bytes [-Wframe-larger-than=] net/netfilter/ipvs/ip_vs_core.c:395:1: warning: the frame size of 1084 bytes is larger than 800 bytes [-Wframe-larger-than=] net/netfilter/ipvs/ip_vs_ftp.c:298:1: warning: the frame size of 928 bytes is larger than 800 bytes [-Wframe-larger-than=] net/netfilter/ipvs/ip_vs_ftp.c:418:1: warning: the frame size of 908 bytes is larger than 800 bytes [-Wframe-larger-than=] net/netfilter/ipvs/ip_vs_lblcr.c:718:1: warning: the frame size of 960 bytes is larger than 800 bytes [-Wframe-larger-than=] drivers/net/xen-netback/netback.c:1500:1: warning: the frame size of 1088 bytes is larger than 800 bytes [-Wframe-larger-than=] In case of ARC and CRIS, it turns out that the BUG() implementation actually does return (or at least the compiler thinks it does), resulting in lots of warnings about uninitialized variable use and leaving noreturn functions, such as: block/cfq-iosched.c: In function 'cfq_async_queue_prio': block/cfq-iosched.c:3804:1: error: control reaches end of non-void function [-Werror=return-type] include/linux/dmaengine.h: In function 'dma_maxpq': include/linux/dmaengine.h:1123:1: error: control reaches end of non-void function [-Werror=return-type] This makes them call __builtin_trap() instead, which should normally dump the stack and kill the current process, like some of the other architectures already do. I tried adding barrier_before_unreachable() to panic() and fortify_panic() as well, but that had very little effect, so I'm not submitting that patch. Vineet said: : For ARC, it is double win. : : 1. Fixes 3 -Wreturn-type warnings : : | ../net/core/ethtool.c:311:1: warning: control reaches end of non-void function : [-Wreturn-type] : | ../kernel/sched/core.c:3246:1: warning: control reaches end of non-void function : [-Wreturn-type] : | ../include/linux/sunrpc/svc_xprt.h:180:1: warning: control reaches end of : non-void function [-Wreturn-type] : : 2. bloat-o-meter reports code size improvements as gcc elides the : generated code for stack return. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 Link: http://lkml.kernel.org/r/20171219114112.939391-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Acked-by: Vineet Gupta [arch/arc] Tested-by: Vineet Gupta [arch/arc] Cc: Mikael Starvik Cc: Jesper Nilsson Cc: Tony Luck Cc: Fenghua Yu Cc: Geert Uytterhoeven Cc: "David S. Miller" Cc: Christopher Li Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Kees Cook Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Will Deacon Cc: "Steven Rostedt (VMware)" Cc: Mark Rutland Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arc/include/asm/bug.h | 3 ++- arch/cris/include/arch-v10/arch/bug.h | 11 +++++++++-- arch/ia64/include/asm/bug.h | 6 +++++- arch/m68k/include/asm/bug.h | 3 +++ arch/sparc/include/asm/bug.h | 6 +++++- include/asm-generic/bug.h | 1 + include/linux/compiler-gcc.h | 15 ++++++++++++++- include/linux/compiler.h | 5 +++++ 8 files changed, 44 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index ea022d47896c..21ec82466d62 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h @@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address); #define BUG() do { \ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ - dump_stack(); \ + barrier_before_unreachable(); \ + __builtin_trap(); \ } while (0) #define HAVE_ARCH_BUG diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h index 905afeacfedf..06da9d49152a 100644 --- a/arch/cris/include/arch-v10/arch/bug.h +++ b/arch/cris/include/arch-v10/arch/bug.h @@ -44,18 +44,25 @@ struct bug_frame { * not be used like this with newer versions of gcc. */ #define BUG() \ +do { \ __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ "movu.w " __stringify(__LINE__) ",$r0\n\t"\ "jump 0f\n\t" \ ".section .rodata\n" \ "0:\t.string \"" __FILE__ "\"\n\t" \ - ".previous") + ".previous"); \ + unreachable(); \ +} while (0) #endif #else /* This just causes an oops. */ -#define BUG() (*(int *)0 = 0) +#define BUG() \ +do { \ + barrier_before_unreachable(); \ + __builtin_trap(); \ +} while (0) #endif diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h index bd3eeb8d1cfa..66b37a532765 100644 --- a/arch/ia64/include/asm/bug.h +++ b/arch/ia64/include/asm/bug.h @@ -4,7 +4,11 @@ #ifdef CONFIG_BUG #define ia64_abort() __builtin_trap() -#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) +#define BUG() do { \ + printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ + ia64_abort(); \ +} while (0) /* should this BUG be made generic? */ #define HAVE_ARCH_BUG diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h index b7e2bf1ba4a6..275dca1435bf 100644 --- a/arch/m68k/include/asm/bug.h +++ b/arch/m68k/include/asm/bug.h @@ -8,16 +8,19 @@ #ifndef CONFIG_SUN3 #define BUG() do { \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #else #define BUG() do { \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif #else #define BUG() do { \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #endif diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h index 6f17528356b2..ea53e418f6c0 100644 --- a/arch/sparc/include/asm/bug.h +++ b/arch/sparc/include/asm/bug.h @@ -9,10 +9,14 @@ void do_BUG(const char *file, int line); #define BUG() do { \ do_BUG(__FILE__, __LINE__); \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #else -#define BUG() __builtin_trap() +#define BUG() do { \ + barrier_before_unreachable(); \ + __builtin_trap(); \ +} while (0) #endif #define HAVE_ARCH_BUG diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 963b755d19b0..a7613e1b0c87 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -52,6 +52,7 @@ struct bug_entry { #ifndef HAVE_ARCH_BUG #define BUG() do { \ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 73bc63e0a1c4..901c1ccb3374 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -207,6 +207,15 @@ #endif #endif +/* + * calling noreturn functions, __builtin_unreachable() and __builtin_trap() + * confuse the stack allocation in gcc, leading to overly large stack + * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 + * + * Adding an empty inline assembly before it works around the problem + */ +#define barrier_before_unreachable() asm volatile("") + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer @@ -217,7 +226,11 @@ * unreleased. Really, we need to have autoconf for the kernel. */ #define unreachable() \ - do { annotate_unreachable(); __builtin_unreachable(); } while (0) + do { \ + annotate_unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ + } while (0) /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index e835fc0423ec..ab4711c63601 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define barrier_data(ptr) barrier() #endif +/* workaround for GCC PR82365 if needed */ +#ifndef barrier_before_unreachable +# define barrier_before_unreachable() do { } while (0) +#endif + /* Unreachable code */ #ifdef CONFIG_STACK_VALIDATION /* -- cgit v1.2.3-59-g8ed1b From 28128c61e08eaeced9cc8ec0e6b5d677b5b94690 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 22 Feb 2018 09:41:40 -0800 Subject: kconfig.h: Include compiler types to avoid missed struct attributes The header files for some structures could get included in such a way that struct attributes (specifically __randomize_layout from path.h) would be parsed as variable names instead of attributes. This could lead to some instances of a structure being unrandomized, causing nasty GPFs, etc. This patch makes sure the compiler_types.h header is included in kconfig.h so that we've always got types and struct attributes defined, since kconfig.h is included from the compiler command line. Reported-by: Patrick McLean Root-caused-by: Maciej S. Szmigiero Suggested-by: Linus Torvalds Tested-by: Maciej S. Szmigiero Fixes: 3859a271a003 ("randstruct: Mark various structs for randomization") Signed-off-by: Kees Cook Signed-off-by: Linus Torvalds --- include/linux/kconfig.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index fec5076eda91..c5fd4ee776ba 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -64,4 +64,7 @@ */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) +/* Make sure we always have all types and struct attributes defined. */ +#include + #endif /* __LINUX_KCONFIG_H */ -- cgit v1.2.3-59-g8ed1b From bef3efbeb897b56867e271cdbc5f8adaacaeb9cd Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Thu, 22 Feb 2018 09:15:06 -0800 Subject: efivarfs: Limit the rate for non-root to read files Each read from a file in efivarfs results in two calls to EFI (one to get the file size, another to get the actual data). On X86 these EFI calls result in broadcast system management interrupts (SMI) which affect performance of the whole system. A malicious user can loop performing reads from efivarfs bringing the system to its knees. Linus suggested per-user rate limit to solve this. So we add a ratelimit structure to "user_struct" and initialize it for the root user for no limit. When allocating user_struct for other users we set the limit to 100 per second. This could be used for other places that want to limit the rate of some detrimental user action. In efivarfs if the limit is exceeded when reading, we take an interruptible nap for 50ms and check the rate limit again. Signed-off-by: Tony Luck Acked-by: Ard Biesheuvel Signed-off-by: Linus Torvalds --- fs/efivarfs/file.c | 6 ++++++ include/linux/sched/user.h | 4 ++++ kernel/user.c | 3 +++ 3 files changed, 13 insertions(+) (limited to 'include/linux') diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c index 5f22e74bbade..8e568428c88b 100644 --- a/fs/efivarfs/file.c +++ b/fs/efivarfs/file.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -74,6 +75,11 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, ssize_t size = 0; int err; + while (!__ratelimit(&file->f_cred->user->ratelimit)) { + if (!msleep_interruptible(50)) + return -EINTR; + } + err = efivar_entry_size(var, &datasize); /* diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 0dcf4e480ef7..96fe289c4c6e 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -4,6 +4,7 @@ #include #include +#include struct key; @@ -41,6 +42,9 @@ struct user_struct { defined(CONFIG_NET) atomic_long_t locked_vm; #endif + + /* Miscellaneous per-user rate limit */ + struct ratelimit_state ratelimit; }; extern int uids_sysfs_init(void); diff --git a/kernel/user.c b/kernel/user.c index 9a20acce460d..36288d840675 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -101,6 +101,7 @@ struct user_struct root_user = { .sigpending = ATOMIC_INIT(0), .locked_shm = 0, .uid = GLOBAL_ROOT_UID, + .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0), }; /* @@ -191,6 +192,8 @@ struct user_struct *alloc_uid(kuid_t uid) new->uid = uid; atomic_set(&new->__count, 1); + ratelimit_state_init(&new->ratelimit, HZ, 100); + ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); /* * Before adding this, check whether we raced -- cgit v1.2.3-59-g8ed1b