From 8c0abfd6d2f6b0221194241ac2908751a2a0385f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 17 Nov 2021 18:57:01 -0800 Subject: rcutorture: Add CONFIG_PREEMPT_DYNAMIC=n to tiny scenarios With CONFIG_PREEMPT_DYNAMIC=y, the kernel builds with CONFIG_PREEMPTION=y because preemption can be enabled at runtime. This prevents any tests of Tiny RCU or Tiny SRCU from running correctly. This commit therefore explicitly sets CONFIG_PREEMPT_DYNAMIC=n for those scenarios. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/SRCU-T | 1 + tools/testing/selftests/rcutorture/configs/rcu/SRCU-U | 1 + tools/testing/selftests/rcutorture/configs/rcu/TINY01 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TINY02 | 1 + tools/testing/selftests/rcutorture/configs/rcuscale/TINY | 1 + 5 files changed, 5 insertions(+) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T index d6557c38dfe4..c70cf0405f24 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T @@ -2,6 +2,7 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TINY_SRCU=y CONFIG_RCU_TRACE=n CONFIG_DEBUG_LOCK_ALLOC=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U index 6bc24e99862f..bc9eeabaa1b1 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U @@ -2,6 +2,7 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TINY_SRCU=y CONFIG_RCU_TRACE=n CONFIG_DEBUG_LOCK_ALLOC=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY01 b/tools/testing/selftests/rcutorture/configs/rcu/TINY01 index 6db705e55487..0953c52fcfd7 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TINY01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY01 @@ -2,6 +2,7 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TINY_RCU=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02 b/tools/testing/selftests/rcutorture/configs/rcu/TINY02 index d8674264318d..30439f6fc20e 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02 @@ -2,6 +2,7 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TINY_RCU=y CONFIG_HZ_PERIODIC=y CONFIG_NO_HZ_IDLE=n diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TINY b/tools/testing/selftests/rcutorture/configs/rcuscale/TINY index fb05ef5279b4..64e9490c1f79 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/TINY +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TINY @@ -2,6 +2,7 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_PREEMPT_DYNAMIC=n #CHECK#CONFIG_TINY_RCU=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y -- cgit v1.2.3-59-g8ed1b From db4cb76861282604cf14fb362f507bb587d676ba Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 27 Sep 2021 10:54:22 -0700 Subject: doc: Remove obsolete kernel-per-CPU-kthreads RCU_FAST_NO_HZ advice This document advises building with both CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y. However, CONFIG_NO_HZ=y offloads callbacks from all nohz_full CPUs, and CPUs with offloaded callbacks do not benefit from CONFIG_RCU_FAST_NO_HZ=y. Quite the opposite: CONFIG_RCU_FAST_NO_HZ=y simply adds a bit of idle entry/exit overhead. This commit therefore changes that advice to only CONFIG_NO_HZ=y. Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-per-CPU-kthreads.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst index 5e51ee5b0358..e4a5fc26f1a9 100644 --- a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst +++ b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst @@ -208,7 +208,7 @@ Do at least one of the following: 2. Enable RCU to do its processing remotely via dyntick-idle by doing all of the following: - a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y. + a. Build with CONFIG_NO_HZ=y. b. Ensure that the CPU goes idle frequently, allowing other CPUs to detect that it has passed through an RCU quiescent state. If the kernel is built with CONFIG_NO_HZ_FULL=y, -- cgit v1.2.3-59-g8ed1b From 7c0be9f8901fed7bfe44cb3f8f7412b010209526 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Oct 2021 15:53:38 +1100 Subject: doc: Add refcount analogy to What is RCU The reader-writer-lock analogy is a useful way to think about RCU, but it is not always applicable. It is useful to have other analogies to work with, and particularly to emphasise that no single analogy is perfect. This patch add a "RCU as reference count" to the "what is RCU" document. See https://lwn.net/Articles/872559/ [ paulmck: Apply Akira Yokosawa feedback. ] Reviewed-by: Akira Yokosawa Signed-off-by: NeilBrown Signed-off-by: Paul E. McKenney --- Documentation/RCU/whatisRCU.rst | 90 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 82 insertions(+), 8 deletions(-) diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst index 17e95ab2a201..c34d2212eaca 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -39,9 +39,11 @@ different paths, as follows: :ref:`6. ANALOGY WITH READER-WRITER LOCKING <6_whatisRCU>` -:ref:`7. FULL LIST OF RCU APIs <7_whatisRCU>` +:ref:`7. ANALOGY WITH REFERENCE COUNTING <7_whatisRCU>` -:ref:`8. ANSWERS TO QUICK QUIZZES <8_whatisRCU>` +:ref:`8. FULL LIST OF RCU APIs <8_whatisRCU>` + +:ref:`9. ANSWERS TO QUICK QUIZZES <9_whatisRCU>` People who prefer starting with a conceptual overview should focus on Section 1, though most readers will profit by reading this section at @@ -677,7 +679,7 @@ Quick Quiz #1: occur when using this algorithm in a real-world Linux kernel? How could this deadlock be avoided? -:ref:`Answers to Quick Quiz <8_whatisRCU>` +:ref:`Answers to Quick Quiz <9_whatisRCU>` 5B. "TOY" EXAMPLE #2: CLASSIC RCU ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -732,7 +734,7 @@ Quick Quiz #2: Give an example where Classic RCU's read-side overhead is **negative**. -:ref:`Answers to Quick Quiz <8_whatisRCU>` +:ref:`Answers to Quick Quiz <9_whatisRCU>` .. _quiz_3: @@ -741,7 +743,7 @@ Quick Quiz #3: critical section, what the heck do you do in CONFIG_PREEMPT_RT, where normal spinlocks can block??? -:ref:`Answers to Quick Quiz <8_whatisRCU>` +:ref:`Answers to Quick Quiz <9_whatisRCU>` .. _6_whatisRCU: @@ -872,7 +874,79 @@ be used in place of synchronize_rcu(). .. _7_whatisRCU: -7. FULL LIST OF RCU APIs +7. ANALOGY WITH REFERENCE COUNTING +----------------------------------- + +The reader-writer analogy (illustrated by the previous section) is not +always the best way to think about using RCU. Another helpful analogy +considers RCU an effective reference count on everything which is +protected by RCU. + +A reference count typically does not prevent the referenced object's +values from changing, but does prevent changes to type -- particularly the +gross change of type that happens when that object's memory is freed and +re-allocated for some other purpose. Once a type-safe reference to the +object is obtained, some other mechanism is needed to ensure consistent +access to the data in the object. This could involve taking a spinlock, +but with RCU the typical approach is to perform reads with SMP-aware +operations such as smp_load_acquire(), to perform updates with atomic +read-modify-write operations, and to provide the necessary ordering. +RCU provides a number of support functions that embed the required +operations and ordering, such as the list_for_each_entry_rcu() macro +used in the previous section. + +A more focused view of the reference counting behavior is that, +between rcu_read_lock() and rcu_read_unlock(), any reference taken with +rcu_dereference() on a pointer marked as ``__rcu`` can be treated as +though a reference-count on that object has been temporarily increased. +This prevents the object from changing type. Exactly what this means +will depend on normal expectations of objects of that type, but it +typically includes that spinlocks can still be safely locked, normal +reference counters can be safely manipulated, and ``__rcu`` pointers +can be safely dereferenced. + +Some operations that one might expect to see on an object for +which an RCU reference is held include: + + - Copying out data that is guaranteed to be stable by the object's type. + - Using kref_get_unless_zero() or similar to get a longer-term + reference. This may fail of course. + - Acquiring a spinlock in the object, and checking if the object still + is the expected object and if so, manipulating it freely. + +The understanding that RCU provides a reference that only prevents a +change of type is particularly visible with objects allocated from a +slab cache marked ``SLAB_TYPESAFE_BY_RCU``. RCU operations may yield a +reference to an object from such a cache that has been concurrently +freed and the memory reallocated to a completely different object, +though of the same type. In this case RCU doesn't even protect the +identity of the object from changing, only its type. So the object +found may not be the one expected, but it will be one where it is safe +to take a reference or spinlock and then confirm that the identity +matches the expectations. + +With traditional reference counting -- such as that implemented by the +kref library in Linux -- there is typically code that runs when the last +reference to an object is dropped. With kref, this is the function +passed to kref_put(). When RCU is being used, such finalization code +must not be run until all ``__rcu`` pointers referencing the object have +been updated, and then a grace period has passed. Every remaining +globally visible pointer to the object must be considered to be a +potential counted reference, and the finalization code is typically run +using call_rcu() only after all those pointers have been changed. + +To see how to choose between these two analogies -- of RCU as a +reader-writer lock and RCU as a reference counting system -- it is useful +to reflect on the scale of the thing being protected. The reader-writer +lock analogy looks at larger multi-part objects such as a linked list +and shows how RCU can facilitate concurrency while elements are added +to, and removed from, the list. The reference-count analogy looks at +the individual objects and looks at how they can be accessed safely +within whatever whole they are a part of. + +.. _8_whatisRCU: + +8. FULL LIST OF RCU APIs ------------------------- The RCU APIs are documented in docbook-format header comments in the @@ -1035,9 +1109,9 @@ g. Otherwise, use RCU. Of course, this all assumes that you have determined that RCU is in fact the right tool for your job. -.. _8_whatisRCU: +.. _9_whatisRCU: -8. ANSWERS TO QUICK QUIZZES +9. ANSWERS TO QUICK QUIZZES ---------------------------- Quick Quiz #1: -- cgit v1.2.3-59-g8ed1b From 5861dad198fee7d99d7940464d72f17c061a427c Mon Sep 17 00:00:00 2001 From: Akira Yokosawa Date: Wed, 3 Nov 2021 08:48:15 +0900 Subject: doc: RCU: Avoid 'Symbol' font-family in SVG figures On Ubuntu Focal, strings in some of SVG files under Documentation/RCU/Design can not be rendered properly when converted to PDF. Ubuntu releases since Focal and Debian bullseye have trouble with "Symbol" font-family in SVG files. As those strings are mostly API names such as "READ_ONCE()", "WRITE_ONCE(), "rcu_read_lock()", and so on, using a generic monospace font-family should be a good alternative. Substitute the font-family name by a simple sed pattern: 's/Symbol/monospace/g' Signed-off-by: Akira Yokosawa Signed-off-by: Paul E. McKenney --- .../RCU/Design/Expedited-Grace-Periods/Funnel0.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel1.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel2.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel3.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel4.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel5.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel6.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel7.svg | 4 +- .../RCU/Design/Expedited-Grace-Periods/Funnel8.svg | 4 +- .../Design/Requirements/GPpartitionReaders1.svg | 36 ++++++------- .../Design/Requirements/ReadersPartitionGP1.svg | 62 +++++++++++----------- 11 files changed, 67 insertions(+), 67 deletions(-) diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg index 98af66557908..16b1ff0ad38c 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Funnel0.svg @@ -116,7 +116,7 @@ WRITE_ONCE(a, 1); WRITE_ONCE(b, 1); r1 = READ_ONCE(a); WRITE_ONCE(c, 1); r2 = READ_ONCE(b); r3 = READ_ONCE(c); thread0() thread1() rcu_read_lock(); rcu_read_lock(); rcu_read_unlock(); WRITE_ONCE(a, 1); WRITE_ONCE(b, 1); r1 = READ_ONCE(a); WRITE_ONCE(c, 1); WRITE_ONCE(d, 1); r2 = READ_ONCE(c); thread0() thread1() rcu_read_lock(); rcu_read_lock(); rcu_read_unlock(); r3 = READ_ONCE(d); QS r4 = READ_ONCE(b); r5 = READ_ONCE(e); rcu_read_lock(); rcu_read_unlock(); thread3() Date: Mon, 27 Sep 2021 14:13:24 -0700 Subject: torture: Remove RCU_FAST_NO_HZ from rcuscale and refscale scenarios All of the rcuscale and refscale scenarios that mention the Kconfig option CONFIG_RCU_FAST_NO_HZ disable it. But this Kconfig option is disabled by default, so this commit removes the pointless "CONFIG_RCU_FAST_NO_HZ=n" lines from these scenarios. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcuscale/TINY | 1 - tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01 | 1 - tools/testing/selftests/rcutorture/configs/rcuscale/TREE | 1 - tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 | 1 - tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT | 1 - tools/testing/selftests/rcutorture/configs/refscale/PREEMPT | 1 - 6 files changed, 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TINY b/tools/testing/selftests/rcutorture/configs/rcuscale/TINY index 64e9490c1f79..0fa2dc086e10 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/TINY +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TINY @@ -7,7 +7,6 @@ CONFIG_PREEMPT_DYNAMIC=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_PROVE_LOCKING=n diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01 b/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01 index e6baa2fbaeb3..227aba7783af 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01 +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01 @@ -5,7 +5,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_PROVE_LOCKING=n diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE index 4cc1cc581321..f110d9ffbe4c 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE @@ -6,7 +6,6 @@ CONFIG_PREEMPT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_HOTPLUG_CPU=y CONFIG_SUSPEND=n CONFIG_HIBERNATION=n diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 index f5952061fde7..9f83e5372796 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 +++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_HOTPLUG_CPU=y CONFIG_SUSPEND=n CONFIG_HIBERNATION=n diff --git a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT index ad505a887bec..7f06838a91e6 100644 --- a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT +++ b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT @@ -6,7 +6,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_HOTPLUG_CPU=y CONFIG_SUSPEND=n CONFIG_HIBERNATION=n diff --git a/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT b/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT index 4f08e641bb6b..52e3ef674056 100644 --- a/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT +++ b/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT @@ -6,7 +6,6 @@ CONFIG_PREEMPT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_HOTPLUG_CPU=y CONFIG_SUSPEND=n CONFIG_HIBERNATION=n -- cgit v1.2.3-59-g8ed1b From 24eab6e1ff58d33048c36b5452f5d284c3710d02 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 27 Sep 2021 14:17:35 -0700 Subject: torture: Remove RCU_FAST_NO_HZ from rcu scenarios All of the rcu scenarios that mentioning CONFIG_RCU_FAST_NO_HZ disable it. But this Kconfig option is disabled by default, so this commit removes the pointless "CONFIG_RCU_FAST_NO_HZ=n" lines from these scenarios. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/TREE02 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE05 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE06 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE07 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE08 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE10 | 1 - 6 files changed, 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 index 65daee4fbf5a..2871ee599891 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_TRACE=n CONFIG_RCU_FANOUT=3 CONFIG_RCU_FANOUT_LEAF=3 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 index 4f95f8544f3f..9f48c73709ec 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_TRACE=n CONFIG_HOTPLUG_CPU=y CONFIG_RCU_FANOUT=6 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 index bf4980d606b5..db27651de04b 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_TRACE=n CONFIG_RCU_FANOUT=6 CONFIG_RCU_FANOUT_LEAF=6 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 index d7afb271a586..2789b47e4ecd 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=y -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=y CONFIG_RCU_FANOUT=2 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 index c810c5276a89..8b561355b9ef 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_TRACE=n CONFIG_RCU_FANOUT=3 CONFIG_RCU_FANOUT_LEAF=2 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE10 b/tools/testing/selftests/rcutorture/configs/rcu/TREE10 index 7311f84a5876..4a00539bfdd7 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE10 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE10 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=n CONFIG_RCU_TRACE=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n -- cgit v1.2.3-59-g8ed1b From e2c73a6860bdf54f2c6bf8cddc34ddc91a1343e1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 27 Sep 2021 14:18:51 -0700 Subject: rcu: Remove the RCU_FAST_NO_HZ Kconfig option All of the uses of CONFIG_RCU_FAST_NO_HZ=y that I have seen involve systems with RCU callbacks offloaded. In this situation, all that this Kconfig option does is slow down idle entry/exit with an additional allways-taken early exit. If this is the only use case, then this Kconfig option nothing but an attractive nuisance that needs to go away. This commit therefore removes the RCU_FAST_NO_HZ Kconfig option. Signed-off-by: Paul E. McKenney --- Documentation/RCU/stallwarn.rst | 11 -- Documentation/admin-guide/kernel-parameters.txt | 4 - Documentation/timers/no_hz.rst | 10 +- kernel/rcu/Kconfig | 18 -- kernel/rcu/tree.c | 11 -- kernel/rcu/tree.h | 7 - kernel/rcu/tree_plugin.h | 185 +-------------------- kernel/rcu/tree_stall.h | 27 +-- .../selftests/rcutorture/configs/rcu/TREE01 | 1 - .../selftests/rcutorture/configs/rcu/TREE04 | 1 - .../selftests/rcutorture/doc/TREE_RCU-kconfig.txt | 1 - 11 files changed, 7 insertions(+), 269 deletions(-) diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst index 28f8ad16db25..78404625bad2 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -254,17 +254,6 @@ period (in this case 2603), the grace-period sequence number (7075), and an estimate of the total number of RCU callbacks queued across all CPUs (625 in this case). -In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed -for each CPU:: - - 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 dyntick_enabled: 1 - -The "last_accelerate:" prints the low-order 16 bits (in hex) of the -jiffies counter when this CPU last invoked rcu_try_advance_all_cbs() -from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from -rcu_prepare_for_idle(). "dyntick_enabled: 1" indicates that dyntick-idle -processing is enabled. - If the grace period ends just as the stall warning starts printing, there will be a spurious stall-warning message, which will include the following:: diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9725c546a0d4..36b83a47e54a 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4489,10 +4489,6 @@ on rcutree.qhimark at boot time and to zero to disable more aggressive help enlistment. - rcutree.rcu_idle_gp_delay= [KNL] - Set wakeup interval for idle CPUs that have - RCU callbacks (RCU_FAST_NO_HZ=y). - rcutree.rcu_kick_kthreads= [KNL] Cause the grace-period kthread to get an extra wake_up() if it sleeps three times longer than diff --git a/Documentation/timers/no_hz.rst b/Documentation/timers/no_hz.rst index 20ad23a6c618..f8786be15183 100644 --- a/Documentation/timers/no_hz.rst +++ b/Documentation/timers/no_hz.rst @@ -184,16 +184,12 @@ There are situations in which idle CPUs cannot be permitted to enter either dyntick-idle mode or adaptive-tick mode, the most common being when that CPU has RCU callbacks pending. -The CONFIG_RCU_FAST_NO_HZ=y Kconfig option may be used to cause such CPUs -to enter dyntick-idle mode or adaptive-tick mode anyway. In this case, -a timer will awaken these CPUs every four jiffies in order to ensure -that the RCU callbacks are processed in a timely fashion. - -Another approach is to offload RCU callback processing to "rcuo" kthreads +Avoid this by offloading RCU callback processing to "rcuo" kthreads using the CONFIG_RCU_NOCB_CPU=y Kconfig option. The specific CPUs to offload may be selected using The "rcu_nocbs=" kernel boot parameter, which takes a comma-separated list of CPUs and CPU ranges, for example, -"1,3-5" selects CPUs 1, 3, 4, and 5. +"1,3-5" selects CPUs 1, 3, 4, and 5. Note that CPUs specified by +the "nohz_full" kernel boot parameter are also offloaded. The offloaded CPUs will never queue RCU callbacks, and therefore RCU never prevents offloaded CPUs from entering either dyntick-idle mode diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 3128b7cf8e1f..4005f2728f1a 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -169,24 +169,6 @@ config RCU_FANOUT_LEAF Take the default if unsure. -config RCU_FAST_NO_HZ - bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ_COMMON && SMP && RCU_EXPERT - default n - help - This option permits CPUs to enter dynticks-idle state even if - they have RCU callbacks queued, and prevents RCU from waking - these CPUs up more than roughly once every four jiffies (by - default, you can adjust this using the rcutree.rcu_idle_gp_delay - parameter), thus improving energy efficiency. On the other - hand, this option increases the duration of RCU grace periods, - for example, slowing down synchronize_rcu(). - - Say Y if energy efficiency is critically important, and you - don't care about increased grace-period durations. - - Say N if you are unsure. - config RCU_BOOST bool "Enable RCU priority boosting" depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ef8d36f580fc..5e98257c2910 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -624,7 +624,6 @@ static noinstr void rcu_eqs_enter(bool user) instrumentation_begin(); trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); - rcu_prepare_for_idle(); rcu_preempt_deferred_qs(current); // instrumentation for the noinstr rcu_dynticks_eqs_enter() @@ -768,9 +767,6 @@ noinstr void rcu_nmi_exit(void) trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ - if (!in_nmi()) - rcu_prepare_for_idle(); - // instrumentation for the noinstr rcu_dynticks_eqs_enter() instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); instrumentation_end(); @@ -872,7 +868,6 @@ static void noinstr rcu_eqs_exit(bool user) // instrumentation for the noinstr rcu_dynticks_eqs_exit() instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); - rcu_cleanup_after_idle(); trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); WRITE_ONCE(rdp->dynticks_nesting, 1); @@ -1014,12 +1009,6 @@ noinstr void rcu_nmi_enter(void) rcu_dynticks_eqs_exit(); // ... but is watching here. - if (!in_nmi()) { - instrumentation_begin(); - rcu_cleanup_after_idle(); - instrumentation_end(); - } - instrumentation_begin(); // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 305cf6aeb408..7ca1aa46083c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -189,11 +189,6 @@ struct rcu_data { bool rcu_urgent_qs; /* GP old need light quiescent state. */ bool rcu_forced_tick; /* Forced tick to provide QS. */ bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ -#ifdef CONFIG_RCU_FAST_NO_HZ - unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ - unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */ - int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ -#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ /* 4) rcu_barrier(), OOM callbacks, and expediting. */ struct rcu_head barrier_head; @@ -419,8 +414,6 @@ static bool rcu_is_callbacks_kthread(void); static void rcu_cpu_kthread_setup(unsigned int cpu); static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); static void __init rcu_spawn_boost_kthreads(void); -static void rcu_cleanup_after_idle(void); -static void rcu_prepare_for_idle(void); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_need_deferred_qs(struct task_struct *t); static void rcu_preempt_deferred_qs(struct task_struct *t); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5199559fbbf0..19f7d578cedb 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -51,8 +51,6 @@ static void __init rcu_bootup_announce_oddness(void) RCU_FANOUT); if (rcu_fanout_exact) pr_info("\tHierarchical RCU autobalancing is disabled.\n"); - if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) - pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); if (IS_ENABLED(CONFIG_PROVE_RCU)) pr_info("\tRCU lockdep checking is enabled.\n"); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) @@ -1253,16 +1251,14 @@ static void __init rcu_spawn_boost_kthreads(void) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) - /* * Check to see if any future non-offloaded RCU-related work will need * to be done by the current CPU, even if none need be done immediately, * returning 1 if so. This function is part of the RCU implementation; * it is -not- an exported member of the RCU API. * - * Because we not have RCU_FAST_NO_HZ, just check whether or not this - * CPU has RCU callbacks queued. + * Just check whether or not this CPU has non-offloaded RCU callbacks + * queued. */ int rcu_needs_cpu(u64 basemono, u64 *nextevt) { @@ -1271,183 +1267,6 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); } -/* - * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up - * after it. - */ -static void rcu_cleanup_after_idle(void) -{ -} - -/* - * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, - * is nothing. - */ -static void rcu_prepare_for_idle(void) -{ -} - -#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ - -/* - * This code is invoked when a CPU goes idle, at which point we want - * to have the CPU do everything required for RCU so that it can enter - * the energy-efficient dyntick-idle mode. - * - * The following preprocessor symbol controls this: - * - * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted - * to sleep in dyntick-idle mode with RCU callbacks pending. This - * is sized to be roughly one RCU grace period. Those energy-efficiency - * benchmarkers who might otherwise be tempted to set this to a large - * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your - * system. And if you are -that- concerned about energy efficiency, - * just power the system down and be done with it! - * - * The value below works well in practice. If future workloads require - * adjustment, they can be converted into kernel config parameters, though - * making the state machine smarter might be a better option. - */ -#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ - -static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; -module_param(rcu_idle_gp_delay, int, 0644); - -/* - * Try to advance callbacks on the current CPU, but only if it has been - * awhile since the last time we did so. Afterwards, if there are any - * callbacks ready for immediate invocation, return true. - */ -static bool __maybe_unused rcu_try_advance_all_cbs(void) -{ - bool cbs_ready = false; - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_node *rnp; - - /* Exit early if we advanced recently. */ - if (jiffies == rdp->last_advance_all) - return false; - rdp->last_advance_all = jiffies; - - rnp = rdp->mynode; - - /* - * Don't bother checking unless a grace period has - * completed since we last checked and there are - * callbacks not yet ready to invoke. - */ - if ((rcu_seq_completed_gp(rdp->gp_seq, - rcu_seq_current(&rnp->gp_seq)) || - unlikely(READ_ONCE(rdp->gpwrap))) && - rcu_segcblist_pend_cbs(&rdp->cblist)) - note_gp_changes(rdp); - - if (rcu_segcblist_ready_cbs(&rdp->cblist)) - cbs_ready = true; - return cbs_ready; -} - -/* - * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready - * to invoke. If the CPU has callbacks, try to advance them. Tell the - * caller about what to set the timeout. - * - * The caller must have disabled interrupts. - */ -int rcu_needs_cpu(u64 basemono, u64 *nextevt) -{ - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - unsigned long dj; - - lockdep_assert_irqs_disabled(); - - /* If no non-offloaded callbacks, RCU doesn't need the CPU. */ - if (rcu_segcblist_empty(&rdp->cblist) || - rcu_rdp_is_offloaded(rdp)) { - *nextevt = KTIME_MAX; - return 0; - } - - /* Attempt to advance callbacks. */ - if (rcu_try_advance_all_cbs()) { - /* Some ready to invoke, so initiate later invocation. */ - invoke_rcu_core(); - return 1; - } - rdp->last_accelerate = jiffies; - - /* Request timer and round. */ - dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; - - *nextevt = basemono + dj * TICK_NSEC; - return 0; -} - -/* - * Prepare a CPU for idle from an RCU perspective. The first major task is to - * sense whether nohz mode has been enabled or disabled via sysfs. The second - * major task is to accelerate (that is, assign grace-period numbers to) any - * recently arrived callbacks. - * - * The caller must have disabled interrupts. - */ -static void rcu_prepare_for_idle(void) -{ - bool needwake; - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_node *rnp; - int tne; - - lockdep_assert_irqs_disabled(); - if (rcu_rdp_is_offloaded(rdp)) - return; - - /* Handle nohz enablement switches conservatively. */ - tne = READ_ONCE(tick_nohz_active); - if (tne != rdp->tick_nohz_enabled_snap) { - if (!rcu_segcblist_empty(&rdp->cblist)) - invoke_rcu_core(); /* force nohz to see update. */ - rdp->tick_nohz_enabled_snap = tne; - return; - } - if (!tne) - return; - - /* - * If we have not yet accelerated this jiffy, accelerate all - * callbacks on this CPU. - */ - if (rdp->last_accelerate == jiffies) - return; - rdp->last_accelerate = jiffies; - if (rcu_segcblist_pend_cbs(&rdp->cblist)) { - rnp = rdp->mynode; - raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ - needwake = rcu_accelerate_cbs(rnp, rdp); - raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ - if (needwake) - rcu_gp_kthread_wake(); - } -} - -/* - * Clean up for exit from idle. Attempt to advance callbacks based on - * any grace periods that elapsed while the CPU was idle, and if any - * callbacks are now ready to invoke, initiate invocation. - */ -static void rcu_cleanup_after_idle(void) -{ - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - - lockdep_assert_irqs_disabled(); - if (rcu_rdp_is_offloaded(rdp)) - return; - if (rcu_try_advance_all_cbs()) - invoke_rcu_core(); -} - -#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ - /* * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the * grace-period kthread will do force_quiescent_state() processing? diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 5e2fa6fd97f1..21bebf7c9030 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -347,26 +347,6 @@ static void rcu_dump_cpu_stacks(void) } } -#ifdef CONFIG_RCU_FAST_NO_HZ - -static void print_cpu_stall_fast_no_hz(char *cp, int cpu) -{ - struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - - sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d", - rdp->last_accelerate & 0xffff, jiffies & 0xffff, - !!rdp->tick_nohz_enabled_snap); -} - -#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ - -static void print_cpu_stall_fast_no_hz(char *cp, int cpu) -{ - *cp = '\0'; -} - -#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ - static const char * const gp_state_names[] = { [RCU_GP_IDLE] = "RCU_GP_IDLE", [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", @@ -408,13 +388,12 @@ static bool rcu_is_gp_kthread_starving(unsigned long *jp) * of RCU grace periods that this CPU is ignorant of, for example, "1" * if the CPU was aware of the previous grace period. * - * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. + * Also print out idle info. */ static void print_cpu_stall_info(int cpu) { unsigned long delta; bool falsepositive; - char fast_no_hz[72]; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); char *ticks_title; unsigned long ticks_value; @@ -432,11 +411,10 @@ static void print_cpu_stall_info(int cpu) ticks_title = "ticks this GP"; ticks_value = rdp->ticks_this_gp; } - print_cpu_stall_fast_no_hz(fast_no_hz, cpu); delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); falsepositive = rcu_is_gp_kthread_starving(NULL) && rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); - pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n", + pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", cpu, "O."[!!cpu_online(cpu)], "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], @@ -449,7 +427,6 @@ static void print_cpu_stall_info(int cpu) rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, - fast_no_hz, falsepositive ? " (false positive?)" : ""); } diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 index b5b53973c01e..8ae41d5f81a3 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 @@ -6,7 +6,6 @@ CONFIG_PREEMPT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n -CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=y CONFIG_MAXSMP=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 index f6d6a40c0576..22ad0261728d 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -7,7 +7,6 @@ CONFIG_PREEMPT=n CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=y -CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_TRACE=y CONFIG_RCU_FANOUT=4 CONFIG_RCU_FANOUT_LEAF=3 diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt index 1b96d68473b8..42acb1a64ce1 100644 --- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt +++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt @@ -15,7 +15,6 @@ CONFIG_PROVE_RCU -- Hardwired to CONFIG_PROVE_LOCKING. CONFIG_RCU_BOOST -- one of PREEMPT_RCU. CONFIG_RCU_FANOUT -- Cover hierarchy, but overlap with others. CONFIG_RCU_FANOUT_LEAF -- Do one non-default. -CONFIG_RCU_FAST_NO_HZ -- Do one, but not with all nohz_full CPUs. CONFIG_RCU_NOCB_CPU -- Do three, one with no rcu_nocbs CPUs, one with rcu_nocbs=0, and one with all rcu_nocbs CPUs. CONFIG_RCU_TRACE -- Do half. -- cgit v1.2.3-59-g8ed1b From bc849e9192c75833a85f2e9376a265ab31f8eec7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 27 Sep 2021 14:30:20 -0700 Subject: rcu: Move rcu_needs_cpu() to tree.c Now that RCU_FAST_NO_HZ is no more, there is but one implementation of the rcu_needs_cpu() function. This commit therefore moves this function from kernel/rcu/tree_plugin.c to kernel/rcu/tree.c. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 18 ++++++++++++++++++ kernel/rcu/tree_plugin.h | 16 ---------------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5e98257c2910..4ac019e9b25f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1075,6 +1075,24 @@ void rcu_irq_enter_irqson(void) local_irq_restore(flags); } +/* + * Check to see if any future non-offloaded RCU-related work will need + * to be done by the current CPU, even if none need be done immediately, + * returning 1 if so. This function is part of the RCU implementation; + * it is -not- an exported member of the RCU API. This is used by + * the idle-entry code to figure out whether it is safe to disable the + * scheduler-clock interrupt. + * + * Just check whether or not this CPU has non-offloaded RCU callbacks + * queued. + */ +int rcu_needs_cpu(u64 basemono, u64 *nextevt) +{ + *nextevt = KTIME_MAX; + return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && + !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); +} + /* * If any sort of urgency was applied to the current CPU (for example, * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 19f7d578cedb..0575757a0f8f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1251,22 +1251,6 @@ static void __init rcu_spawn_boost_kthreads(void) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -/* - * Check to see if any future non-offloaded RCU-related work will need - * to be done by the current CPU, even if none need be done immediately, - * returning 1 if so. This function is part of the RCU implementation; - * it is -not- an exported member of the RCU API. - * - * Just check whether or not this CPU has non-offloaded RCU callbacks - * queued. - */ -int rcu_needs_cpu(u64 basemono, u64 *nextevt) -{ - *nextevt = KTIME_MAX; - return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && - !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); -} - /* * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the * grace-period kthread will do force_quiescent_state() processing? -- cgit v1.2.3-59-g8ed1b From 24ba53017e188e031f9cb8b290286fad52d2af00 Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Wed, 15 Sep 2021 17:02:18 +0800 Subject: rcu: Replace ________p1 and _________p1 with __UNIQUE_ID(rcu) This commit replaces both ________p1 and _________p1 with __UNIQUE_ID(rcu), and also adjusts the callers of the affected macros. __UNIQUE_ID(rcu) will generate unique variable names during compilation, which eliminates the need of ________p1 and _________p1 (both having 4 occurrences prior to the code change). This also avoids the variable name shadowing issue, or at least makes those wishing to cause shadowing problems work much harder to do so. The same idea is used for the min/max macros (commit 589a978 and commit e9092d0). Signed-off-by: Jim Huang Signed-off-by: Chun-Hung Tseng Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 50 +++++++++++++++++++++++++++--------------------- include/linux/srcu.h | 3 ++- 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5e0beb5c5659..88b42eb46406 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -364,6 +364,12 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ +#define __unrcu_pointer(p, local) \ +({ \ + typeof(*p) *local = (typeof(*p) *__force)(p); \ + rcu_check_sparse(p, __rcu); \ + ((typeof(*p) __force __kernel *)(local)); \ +}) /** * unrcu_pointer - mark a pointer as not being RCU protected * @p: pointer needing to lose its __rcu property @@ -371,39 +377,35 @@ static inline void rcu_preempt_sleep_check(void) { } * Converts @p from an __rcu pointer to a __kernel pointer. * This allows an __rcu pointer to be used with xchg() and friends. */ -#define unrcu_pointer(p) \ -({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \ - rcu_check_sparse(p, __rcu); \ - ((typeof(*p) __force __kernel *)(_________p1)); \ -}) +#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu)) -#define __rcu_access_pointer(p, space) \ +#define __rcu_access_pointer(p, local, space) \ ({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ + typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ rcu_check_sparse(p, space); \ - ((typeof(*p) __force __kernel *)(_________p1)); \ + ((typeof(*p) __force __kernel *)(local)); \ }) -#define __rcu_dereference_check(p, c, space) \ +#define __rcu_dereference_check(p, local, c, space) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ + typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_check_sparse(p, space); \ - ((typeof(*p) __force __kernel *)(________p1)); \ + ((typeof(*p) __force __kernel *)(local)); \ }) -#define __rcu_dereference_protected(p, c, space) \ +#define __rcu_dereference_protected(p, local, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) -#define rcu_dereference_raw(p) \ +#define __rcu_dereference_raw(p, local) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(p) ________p1 = READ_ONCE(p); \ - ((typeof(*p) __force __kernel *)(________p1)); \ + typeof(p) local = READ_ONCE(p); \ + ((typeof(*p) __force __kernel *)(local)); \ }) +#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu)) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable @@ -490,7 +492,7 @@ do { \ * when tearing down multi-linked structures after a grace period * has elapsed. */ -#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) +#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu) /** * rcu_dereference_check() - rcu_dereference with debug checking @@ -526,7 +528,8 @@ do { \ * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ - __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) + __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ + (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking @@ -541,7 +544,8 @@ do { \ * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ - __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) + __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ + (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking @@ -556,7 +560,8 @@ do { \ * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ - __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ + __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ + (c) || rcu_read_lock_sched_held(), \ __rcu) /* @@ -566,7 +571,8 @@ do { \ * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ -#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu) +#define rcu_dereference_raw_check(p) \ + __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented @@ -585,7 +591,7 @@ do { \ * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ - __rcu_dereference_protected((p), (c), __rcu) + __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu) /** diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e6011a9975af..01226e4d960a 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -117,7 +117,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) * lockdep_is_held() calls. */ #define srcu_dereference_check(p, ssp, c) \ - __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) + __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ + (c) || srcu_read_lock_held(ssp), __rcu) /** * srcu_dereference - fetch SRCU-protected pointer for later dereferencing -- cgit v1.2.3-59-g8ed1b From 2407a64f8045552203ee5cb9904ce75ce2fceef4 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 28 Sep 2021 08:21:28 +0800 Subject: rcu: in_irq() cleanup This commit replaces the obsolete and ambiguous macro in_irq() with its shiny new in_hardirq() equivalent. Signed-off-by: Changbin Du Signed-off-by: Paul E. McKenney --- include/linux/rcutiny.h | 2 +- kernel/rcu/tree.c | 2 +- kernel/rcu/tree_plugin.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 9be015305f9f..858f4d429946 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -85,7 +85,7 @@ static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ - (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) + (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) static inline void exit_rcu(void) { } static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) { diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ef8d36f580fc..f0f19dc7f19e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1467,7 +1467,7 @@ static void rcu_gp_kthread_wake(void) { struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); - if ((current == t && !in_irq() && !in_serving_softirq()) || + if ((current == t && !in_hardirq() && !in_serving_softirq()) || !READ_ONCE(rcu_state.gp_flags) || !t) return; WRITE_ONCE(rcu_state.gp_wake_time, jiffies); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5199559fbbf0..599084c4c21f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -642,7 +642,7 @@ static void rcu_read_unlock_special(struct task_struct *t) (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node); // Need to defer quiescent state until everything is enabled. - if (use_softirq && (in_irq() || (expboost && !irqs_were_disabled))) { + if (use_softirq && (in_hardirq() || (expboost && !irqs_were_disabled))) { // Using softirq, safe to awaken, and either the // wakeup is free or there is either an expedited // GP in flight or a potential need to deboost. -- cgit v1.2.3-59-g8ed1b From 17ea3718824912e773b0fd78579694b2e75ee597 Mon Sep 17 00:00:00 2001 From: Zhouyi Zhou Date: Sun, 24 Oct 2021 08:36:34 +0800 Subject: rcu: Improve tree_plugin.h comments and add code cleanups This commit cleans up some comments and code in kernel/rcu/tree_plugin.h. Signed-off-by: Zhouyi Zhou Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 599084c4c21f..f44580276fbd 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -16,7 +16,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) { /* - * In order to read the offloaded state of an rdp is a safe + * In order to read the offloaded state of an rdp in a safe * and stable way and prevent from its value to be changed * under us, we must either hold the barrier mutex, the cpu * hotplug lock (read or write) or the nocb lock. Local @@ -56,7 +56,7 @@ static void __init rcu_bootup_announce_oddness(void) if (IS_ENABLED(CONFIG_PROVE_RCU)) pr_info("\tRCU lockdep checking is enabled.\n"); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) - pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n"); + pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); if (RCU_NUM_LVLS >= 4) pr_info("\tFour(or more)-level hierarchy is enabled.\n"); if (RCU_FANOUT_LEAF != 16) @@ -88,13 +88,13 @@ static void __init rcu_bootup_announce_oddness(void) if (rcu_kick_kthreads) pr_info("\tKick kthreads if too-long grace period.\n"); if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) - pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); + pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); if (gp_preinit_delay) pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); if (gp_init_delay) pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); if (gp_cleanup_delay) - pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); + pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay); if (!use_softirq) pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) @@ -1153,7 +1153,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) /* * Create an RCU-boost kthread for the specified node if one does not * already exist. We only create this kthread for preemptible RCU. - * Returns zero if all is well, a negated errno otherwise. */ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) { @@ -1455,7 +1454,7 @@ static void rcu_cleanup_after_idle(void) * CPU unless the grace period has extended for too long. * * This code relies on the fact that all NO_HZ_FULL CPUs are also - * CONFIG_RCU_NOCB_CPU CPUs. + * RCU_NOCB_CPU CPUs. */ static bool rcu_nohz_full_cpu(void) { -- cgit v1.2.3-59-g8ed1b From c2cf0767e98eb4487444e5c7ebba491a866811ce Mon Sep 17 00:00:00 2001 From: Zqiang Date: Mon, 15 Nov 2021 13:15:46 +0800 Subject: rcu: Avoid running boost kthreads on isolated CPUs When the boost kthreads are created on systems with nohz_full CPUs, the cpus_allowed_ptr is set to housekeeping_cpumask(HK_FLAG_KTHREAD). However, when the rcu_boost_kthread_setaffinity() is called, the original affinity will be changed and these kthreads can subsequently run on nohz_full CPUs. This commit makes rcu_boost_kthread_setaffinity() restrict these boost kthreads to housekeeping CPUs. Signed-off-by: Zqiang Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index f44580276fbd..87fc4609b756 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1203,8 +1203,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) if ((mask & leaf_node_cpu_bit(rnp, cpu)) && cpu != outgoingcpu) cpumask_set_cpu(cpu, cm); + cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU)); if (cpumask_weight(cm) == 0) - cpumask_setall(cm); + cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU)); set_cpus_allowed_ptr(t, cm); free_cpumask_var(cm); } -- cgit v1.2.3-59-g8ed1b From 300c0c5e721834f484b03fa3062602dd8ff48413 Mon Sep 17 00:00:00 2001 From: Jun Miao Date: Tue, 16 Nov 2021 07:23:02 +0800 Subject: rcu: Avoid alloc_pages() when recording stack The default kasan_record_aux_stack() calls stack_depot_save() with GFP_NOWAIT, which in turn can then call alloc_pages(GFP_NOWAIT, ...). In general, however, it is not even possible to use either GFP_ATOMIC nor GFP_NOWAIT in certain non-preemptive contexts/RT kernel including raw_spin_locks (see gfp.h and ab00db216c9c7). Fix it by instructing stackdepot to not expand stack storage via alloc_pages() in case it runs out by using kasan_record_aux_stack_noalloc(). Jianwei Hu reported: BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:969 in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 15319, name: python3 INFO: lockdep is turned off. irq event stamp: 0 hardirqs last enabled at (0): [<0000000000000000>] 0x0 hardirqs last disabled at (0): [] copy_process+0xaf3/0x2590 softirqs last enabled at (0): [] copy_process+0xaf3/0x2590 softirqs last disabled at (0): [<0000000000000000>] 0x0 CPU: 6 PID: 15319 Comm: python3 Tainted: G W O 5.15-rc7-preempt-rt #1 Hardware name: Supermicro SYS-E300-9A-8C/A2SDi-8C-HLN4F, BIOS 1.1b 12/17/2018 Call Trace: show_stack+0x52/0x58 dump_stack+0xa1/0xd6 ___might_sleep.cold+0x11c/0x12d rt_spin_lock+0x3f/0xc0 rmqueue+0x100/0x1460 rmqueue+0x100/0x1460 mark_usage+0x1a0/0x1a0 ftrace_graph_ret_addr+0x2a/0xb0 rmqueue_pcplist.constprop.0+0x6a0/0x6a0 __kasan_check_read+0x11/0x20 __zone_watermark_ok+0x114/0x270 get_page_from_freelist+0x148/0x630 is_module_text_address+0x32/0xa0 __alloc_pages_nodemask+0x2f6/0x790 __alloc_pages_slowpath.constprop.0+0x12d0/0x12d0 create_prof_cpu_mask+0x30/0x30 alloc_pages_current+0xb1/0x150 stack_depot_save+0x39f/0x490 kasan_save_stack+0x42/0x50 kasan_save_stack+0x23/0x50 kasan_record_aux_stack+0xa9/0xc0 __call_rcu+0xff/0x9c0 call_rcu+0xe/0x10 put_object+0x53/0x70 __delete_object+0x7b/0x90 kmemleak_free+0x46/0x70 slab_free_freelist_hook+0xb4/0x160 kfree+0xe5/0x420 kfree_const+0x17/0x30 kobject_cleanup+0xaa/0x230 kobject_put+0x76/0x90 netdev_queue_update_kobjects+0x17d/0x1f0 ... ... ksys_write+0xd9/0x180 __x64_sys_write+0x42/0x50 do_syscall_64+0x38/0x50 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Links: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/include/linux/kasan.h?id=7cb3007ce2da27ec02a1a3211941e7fe6875b642 Fixes: 84109ab58590 ("rcu: Record kvfree_call_rcu() call stack for KASAN") Fixes: 26e760c9a7c8 ("rcu: kasan: record and print call_rcu() call stack") Reported-by: Jianwei Hu Reviewed-by: Uladzislau Rezki (Sony) Acked-by: Marco Elver Tested-by: Juri Lelli Signed-off-by: Jun Miao Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f0f19dc7f19e..c8beea8dd3a0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2982,7 +2982,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) head->func = func; head->next = NULL; local_irq_save(flags); - kasan_record_aux_stack(head); + kasan_record_aux_stack_noalloc(head); rdp = this_cpu_ptr(&rcu_data); /* Add the callback to our list. */ @@ -3547,7 +3547,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) return; } - kasan_record_aux_stack(ptr); + kasan_record_aux_stack_noalloc(ptr); success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); if (!success) { run_page_cache_worker(krcp); -- cgit v1.2.3-59-g8ed1b From 937ed91c712273131de6d2a02caafd3ee84e0c72 Mon Sep 17 00:00:00 2001 From: Ammar Faizi Date: Sun, 24 Oct 2021 19:28:14 +0200 Subject: tools/nolibc: x86-64: Fix startup code bug Before this patch, the `_start` function looks like this: ``` 0000000000001170 <_start>: 1170: pop %rdi 1171: mov %rsp,%rsi 1174: lea 0x8(%rsi,%rdi,8),%rdx 1179: and $0xfffffffffffffff0,%rsp 117d: sub $0x8,%rsp 1181: call 1000
1186: movzbq %al,%rdi 118a: mov $0x3c,%rax 1191: syscall 1193: hlt 1194: data16 cs nopw 0x0(%rax,%rax,1) 119f: nop ``` Note the "and" to %rsp with $-16, it makes the %rsp be 16-byte aligned, but then there is a "sub" with $0x8 which makes the %rsp no longer 16-byte aligned, then it calls main. That's the bug! What actually the x86-64 System V ABI mandates is that right before the "call", the %rsp must be 16-byte aligned, not after the "call". So the "sub" with $0x8 here breaks the alignment. Remove it. An example where this rule matters is when the callee needs to align its stack at 16-byte for aligned move instruction, like `movdqa` and `movaps`. If the callee can't align its stack properly, it will result in segmentation fault. x86-64 System V ABI also mandates the deepest stack frame should be zero. Just to be safe, let's zero the %rbp on startup as the content of %rbp may be unspecified when the program starts. Now it looks like this: ``` 0000000000001170 <_start>: 1170: pop %rdi 1171: mov %rsp,%rsi 1174: lea 0x8(%rsi,%rdi,8),%rdx 1179: xor %ebp,%ebp # zero the %rbp 117b: and $0xfffffffffffffff0,%rsp # align the %rsp 117f: call 1000
1184: movzbq %al,%rdi 1188: mov $0x3c,%rax 118f: syscall 1191: hlt 1192: data16 cs nopw 0x0(%rax,%rax,1) 119d: nopl (%rax) ``` Cc: Bedirhan KURT Cc: Louvian Lyndal Reported-by: Peter Cordes Signed-off-by: Ammar Faizi [wt: I did this on purpose due to a misunderstanding of the spec, other archs will thus have to be rechecked, particularly i386] Cc: stable@vger.kernel.org Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 3430667b0d24..96b6d56acb57 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -399,14 +399,20 @@ struct stat { }) /* startup code */ +/* + * x86-64 System V ABI mandates: + * 1) %rsp must be 16-byte aligned right before the function call. + * 2) The deepest stack frame should be zero (the %rbp). + * + */ asm(".section .text\n" ".global _start\n" "_start:\n" "pop %rdi\n" // argc (first arg, %rdi) "mov %rsp, %rsi\n" // argv[] (second arg, %rsi) "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx) - "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned when - "sub $8, %rsp\n" // entering the callee + "xor %ebp, %ebp\n" // zero the stack frame + "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call "call main\n" // main() returns the status code, we'll exit with it. "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits "mov $60, %rax\n" // NR_exit == 60 -- cgit v1.2.3-59-g8ed1b From ebbe0d8a449d183fa43b42d84fcb248e25303985 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sun, 24 Oct 2021 19:28:15 +0200 Subject: tools/nolibc: i386: fix initial stack alignment After re-checking in the spec and comparing stack offsets with glibc, The last pushed argument must be 16-byte aligned (i.e. aligned before the call) so that in the callee esp+4 is multiple of 16, so the principle is the 32-bit equivalent to what Ammar fixed for x86_64. It's possible that 32-bit code using SSE2 or MMX could have been affected. In addition the frame pointer ought to be zero at the deepest level. Link: https://gitlab.com/x86-psABIs/i386-ABI/-/wikis/Intel386-psABI Cc: Ammar Faizi Cc: stable@vger.kernel.org Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 96b6d56acb57..7f300dc379e7 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -583,13 +583,21 @@ struct sys_stat_struct { }) /* startup code */ +/* + * i386 System V ABI mandates: + * 1) last pushed argument must be 16-byte aligned. + * 2) The deepest stack frame should be set to zero + * + */ asm(".section .text\n" ".global _start\n" "_start:\n" "pop %eax\n" // argc (first arg, %eax) "mov %esp, %ebx\n" // argv[] (second arg, %ebx) "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx) - "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned when + "xor %ebp, %ebp\n" // zero the stack frame + "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned before + "sub $4, %esp\n" // the call instruction (args are aligned) "push %ecx\n" // push all registers on the stack so that we "push %ebx\n" // support both regparm and plain stack modes "push %eax\n" -- cgit v1.2.3-59-g8ed1b From de0244ae40ae91145faaf164a4252347607c3711 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sun, 24 Oct 2021 19:28:16 +0200 Subject: tools/nolibc: fix incorrect truncation of exit code Ammar Faizi reported that our exit code handling is wrong. We truncate it to the lowest 8 bits but the syscall itself is expected to take a regular 32-bit signed integer, not an unsigned char. It's the kernel that later truncates it to the lowest 8 bits. The difference is visible in strace, where the program below used to show exit(255) instead of exit(-1): int main(void) { return -1; } This patch applies the fix to all archs. x86_64, i386, arm64, armv7 and mips were all tested and confirmed to work fine now. Risc-v was not tested but the change is trivial and exactly the same as for other archs. Reported-by: Ammar Faizi Cc: stable@vger.kernel.org Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 7f300dc379e7..3e2c6f2ed587 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -414,7 +414,7 @@ asm(".section .text\n" "xor %ebp, %ebp\n" // zero the stack frame "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call "call main\n" // main() returns the status code, we'll exit with it. - "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits + "mov %eax, %edi\n" // retrieve exit code (32 bit) "mov $60, %rax\n" // NR_exit == 60 "syscall\n" // really exit "hlt\n" // ensure it does not return @@ -602,9 +602,9 @@ asm(".section .text\n" "push %ebx\n" // support both regparm and plain stack modes "push %eax\n" "call main\n" // main() returns the status code in %eax - "movzbl %al, %ebx\n" // retrieve exit code from lower 8 bits - "movl $1, %eax\n" // NR_exit == 1 - "int $0x80\n" // exit now + "mov %eax, %ebx\n" // retrieve exit code (32-bit int) + "movl $1, %eax\n" // NR_exit == 1 + "int $0x80\n" // exit now "hlt\n" // ensure it does not ""); @@ -788,7 +788,6 @@ asm(".section .text\n" "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc) "bl main\n" // main() returns the status code, we'll exit with it. - "and %r0, %r0, $0xff\n" // limit exit code to 8 bits "movs r7, $1\n" // NR_exit == 1 "svc $0x00\n" ""); @@ -985,7 +984,6 @@ asm(".section .text\n" "add x2, x2, x1\n" // + argv "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee "bl main\n" // main() returns the status code, we'll exit with it. - "and x0, x0, 0xff\n" // limit exit code to 8 bits "mov x8, 93\n" // NR_exit == 93 "svc #0\n" ""); @@ -1190,7 +1188,7 @@ asm(".section .text\n" "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there! "jal main\n" // main() returns the status code, we'll exit with it. "nop\n" // delayed slot - "and $a0, $v0, 0xff\n" // limit exit code to 8 bits + "move $a0, $v0\n" // retrieve 32-bit exit code from v0 "li $v0, 4001\n" // NR_exit == 4001 "syscall\n" ".end __start\n" @@ -1388,7 +1386,6 @@ asm(".section .text\n" "add a2,a2,a1\n" // + argv "andi sp,a1,-16\n" // sp must be 16-byte aligned "call main\n" // main() returns the status code, we'll exit with it. - "andi a0, a0, 0xff\n" // limit exit code to 8 bits "li a7, 93\n" // NR_exit == 93 "ecall\n" ""); -- cgit v1.2.3-59-g8ed1b From bf91666959eeac44fb686e9359e37830944beef2 Mon Sep 17 00:00:00 2001 From: Ammar Faizi Date: Sun, 24 Oct 2021 19:43:21 +0200 Subject: tools/nolibc: x86: Remove `r8`, `r9` and `r10` from the clobber list Linux x86-64 syscall only clobbers rax, rcx and r11 (and "memory"). - rax for the return value. - rcx to save the return address. - r11 to save the rflags. Other registers are preserved. Having r8, r9 and r10 in the syscall clobber list is harmless, but this results in a missed-optimization. As the syscall doesn't clobber r8-r10, GCC should be allowed to reuse their value after the syscall returns to userspace. But since they are in the clobber list, GCC will always miss this opportunity. Remove them from the x86-64 syscall clobber list to help GCC generate better code and fix the comment. See also the x86-64 ABI, section A.2 AMD64 Linux Kernel Conventions, A.2.1 Calling Conventions [1]. Extra note: Some people may think it does not really give a benefit to remove r8, r9 and r10 from the syscall clobber list because the impression of syscall is a C function call, and function call always clobbers those 3. However, that is not the case for nolibc.h, because we have a potential to inline the "syscall" instruction (which its opcode is "0f 05") to the user functions. All syscalls in the nolibc.h are written as a static function with inline ASM and are likely always inline if we use optimization flag, so this is a profit not to have r8, r9 and r10 in the clobber list. Here is the example where this matters. Consider the following C code: ``` #include "tools/include/nolibc/nolibc.h" #define read_abc(a, b, c) __asm__ volatile("nop"::"r"(a),"r"(b),"r"(c)) int main(void) { int a = 0xaa; int b = 0xbb; int c = 0xcc; read_abc(a, b, c); write(1, "test\n", 5); read_abc(a, b, c); return 0; } ``` Compile with: gcc -Os test.c -o test -nostdlib With r8, r9, r10 in the clobber list, GCC generates this: 0000000000001000
: 1000: f3 0f 1e fa endbr64 1004: 41 54 push %r12 1006: 41 bc cc 00 00 00 mov $0xcc,%r12d 100c: 55 push %rbp 100d: bd bb 00 00 00 mov $0xbb,%ebp 1012: 53 push %rbx 1013: bb aa 00 00 00 mov $0xaa,%ebx 1018: 90 nop 1019: b8 01 00 00 00 mov $0x1,%eax 101e: bf 01 00 00 00 mov $0x1,%edi 1023: ba 05 00 00 00 mov $0x5,%edx 1028: 48 8d 35 d1 0f 00 00 lea 0xfd1(%rip),%rsi 102f: 0f 05 syscall 1031: 90 nop 1032: 31 c0 xor %eax,%eax 1034: 5b pop %rbx 1035: 5d pop %rbp 1036: 41 5c pop %r12 1038: c3 ret GCC thinks that syscall will clobber r8, r9, r10. So it spills 0xaa, 0xbb and 0xcc to callee saved registers (r12, rbp and rbx). This is clearly extra memory access and extra stack size for preserving them. But syscall does not actually clobber them, so this is a missed optimization. Now without r8, r9, r10 in the clobber list, GCC generates better code: 0000000000001000
: 1000: f3 0f 1e fa endbr64 1004: 41 b8 aa 00 00 00 mov $0xaa,%r8d 100a: 41 b9 bb 00 00 00 mov $0xbb,%r9d 1010: 41 ba cc 00 00 00 mov $0xcc,%r10d 1016: 90 nop 1017: b8 01 00 00 00 mov $0x1,%eax 101c: bf 01 00 00 00 mov $0x1,%edi 1021: ba 05 00 00 00 mov $0x5,%edx 1026: 48 8d 35 d3 0f 00 00 lea 0xfd3(%rip),%rsi 102d: 0f 05 syscall 102f: 90 nop 1030: 31 c0 xor %eax,%eax 1032: c3 ret Cc: Andy Lutomirski Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: x86@kernel.org Cc: "H. Peter Anvin" Cc: David Laight Acked-by: Andy Lutomirski Signed-off-by: Ammar Faizi Link: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/x86-64-psABI [1] Link: https://lore.kernel.org/lkml/20211011040344.437264-1-ammar.faizi@students.amikom.ac.id/ Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 3e2c6f2ed587..f9afe89ec6f2 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -265,12 +265,17 @@ struct stat { * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively * - the system call is performed by calling the syscall instruction * - syscall return comes in rax - * - rcx and r8..r11 may be clobbered, others are preserved. + * - rcx and r11 are clobbered, others are preserved. * - the arguments are cast to long and assigned into the target registers * which are then simply passed as registers to the asm code, so that we * don't have to experience issues with register constraints. * - the syscall number is always specified last in order to allow to force * some registers before (gcc refuses a %-register at the last position). + * - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1 + * Calling Conventions. + * + * Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/x86-64-psABI + * */ #define my_syscall0(num) \ @@ -280,9 +285,9 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret) \ + : "=a"(_ret) \ : "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -295,10 +300,10 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret) \ + : "=a"(_ret) \ : "r"(_arg1), \ "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -312,10 +317,10 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret) \ + : "=a"(_ret) \ : "r"(_arg1), "r"(_arg2), \ "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -330,10 +335,10 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret) \ + : "=a"(_ret) \ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -349,10 +354,10 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret), "=r"(_arg4) \ + : "=a"(_ret) \ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ "0"(_num) \ - : "rcx", "r8", "r9", "r11", "memory", "cc" \ + : "rcx", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -369,10 +374,10 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \ + : "=a"(_ret) \ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ "0"(_num) \ - : "rcx", "r9", "r11", "memory", "cc" \ + : "rcx", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -390,7 +395,7 @@ struct stat { \ asm volatile ( \ "syscall\n" \ - : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \ + : "=a"(_ret) \ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ "r"(_arg6), "0"(_num) \ : "rcx", "r11", "memory", "cc" \ -- cgit v1.2.3-59-g8ed1b From 7bdc0e7a390511cd3df8194003b908f15a6170a5 Mon Sep 17 00:00:00 2001 From: Ammar Faizi Date: Sun, 24 Oct 2021 19:43:22 +0200 Subject: tools/nolibc: x86-64: Use `mov $60,%eax` instead of `mov $60,%rax` Note that mov to 32-bit register will zero extend to 64-bit register. Thus `mov $60,%eax` has the same effect with `mov $60,%rax`. Use the shorter opcode to achieve the same thing. ``` b8 3c 00 00 00 mov $60,%eax (5 bytes) [1] 48 c7 c0 3c 00 00 00 mov $60,%rax (7 bytes) [2] ``` Currently, we use [2]. Change it to [1] for shorter code. Signed-off-by: Ammar Faizi Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index f9afe89ec6f2..4988866af0b5 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -420,7 +420,7 @@ asm(".section .text\n" "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call "call main\n" // main() returns the status code, we'll exit with it. "mov %eax, %edi\n" // retrieve exit code (32 bit) - "mov $60, %rax\n" // NR_exit == 60 + "mov $60, %eax\n" // NR_exit == 60 "syscall\n" // really exit "hlt\n" // ensure it does not return ""); -- cgit v1.2.3-59-g8ed1b From b0fe9dec66370cef23e9c281d229f4c44f84cfa2 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sun, 24 Oct 2021 19:43:23 +0200 Subject: tools/nolibc: Implement gettid() Allow test programs to determine their thread ID. Signed-off-by: Mark Brown Cc: Willy Tarreau Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 4988866af0b5..c1c285fe494a 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -1571,6 +1571,12 @@ pid_t sys_getpid(void) return my_syscall0(__NR_getpid); } +static __attribute__((unused)) +pid_t sys_gettid(void) +{ + return my_syscall0(__NR_gettid); +} + static __attribute__((unused)) int sys_gettimeofday(struct timeval *tv, struct timezone *tz) { @@ -2029,6 +2035,18 @@ pid_t getpid(void) return ret; } +static __attribute__((unused)) +pid_t gettid(void) +{ + pid_t ret = sys_gettid(); + + if (ret < 0) { + SET_ERRNO(-ret); + ret = -1; + } + return ret; +} + static __attribute__((unused)) int gettimeofday(struct timeval *tv, struct timezone *tz) { -- cgit v1.2.3-59-g8ed1b From 1f8da406a964dcc01121385a54fafd28e1c6a796 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 23 Sep 2021 10:07:14 -0700 Subject: srcu: Prevent redundant __srcu_read_unlock() wakeup Tiny SRCU readers can appear at task level, but also in interrupt and softirq handlers. Because Tiny SRCU is selected only in kernels built with CONFIG_SMP=n and CONFIG_PREEMPTION=n, it is not possible for a grace period to start while there is a non-task-level SRCU reader executing. This means that it does not make sense for __srcu_read_unlock() to awaken the Tiny SRCU grace period, because that can only happen when the grace period is waiting for one value of ->srcu_idx and __srcu_read_unlock() is ending the last reader for some other value of ->srcu_idx. After all, any such wakeup will be redundant. Worse yet, in some cases, such wakeups generate lockdep splats: ====================================================== WARNING: possible circular locking dependency detected 5.15.0-rc1+ #3758 Not tainted ------------------------------------------------------ rcu_torture_rea/53 is trying to acquire lock: ffffffff9514e6a8 (srcu_ctl.srcu_wq.lock){..-.}-{2:2}, at: xa/0x30 but task is already holding lock: ffff95c642479d80 (&p->pi_lock){-.-.}-{2:2}, at: _extend+0x370/0x400 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&p->pi_lock){-.-.}-{2:2}: _raw_spin_lock_irqsave+0x2f/0x50 try_to_wake_up+0x50/0x580 swake_up_locked.part.7+0xe/0x30 swake_up_one+0x22/0x30 rcutorture_one_extend+0x1b6/0x400 rcu_torture_one_read+0x290/0x5d0 rcu_torture_timer+0x1a/0x70 call_timer_fn+0xa6/0x230 run_timer_softirq+0x493/0x4c0 __do_softirq+0xc0/0x371 irq_exit+0x73/0x90 sysvec_apic_timer_interrupt+0x63/0x80 asm_sysvec_apic_timer_interrupt+0x12/0x20 default_idle+0xb/0x10 default_idle_call+0x5e/0x170 do_idle+0x18a/0x1f0 cpu_startup_entry+0xa/0x10 start_kernel+0x678/0x69f secondary_startup_64_no_verify+0xc2/0xcb -> #0 (srcu_ctl.srcu_wq.lock){..-.}-{2:2}: __lock_acquire+0x130c/0x2440 lock_acquire+0xc2/0x270 _raw_spin_lock_irqsave+0x2f/0x50 swake_up_one+0xa/0x30 rcutorture_one_extend+0x387/0x400 rcu_torture_one_read+0x290/0x5d0 rcu_torture_reader+0xac/0x200 kthread+0x12d/0x150 ret_from_fork+0x22/0x30 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&p->pi_lock); lock(srcu_ctl.srcu_wq.lock); lock(&p->pi_lock); lock(srcu_ctl.srcu_wq.lock); *** DEADLOCK *** 1 lock held by rcu_torture_rea/53: #0: ffff95c642479d80 (&p->pi_lock){-.-.}-{2:2}, at: _extend+0x370/0x400 stack backtrace: CPU: 0 PID: 53 Comm: rcu_torture_rea Not tainted 5.15.0-rc1+ Hardware name: Red Hat KVM/RHEL-AV, BIOS e_el8.5.0+746+bbd5d70c 04/01/2014 Call Trace: check_noncircular+0xfe/0x110 ? find_held_lock+0x2d/0x90 __lock_acquire+0x130c/0x2440 lock_acquire+0xc2/0x270 ? swake_up_one+0xa/0x30 ? find_held_lock+0x72/0x90 _raw_spin_lock_irqsave+0x2f/0x50 ? swake_up_one+0xa/0x30 swake_up_one+0xa/0x30 rcutorture_one_extend+0x387/0x400 rcu_torture_one_read+0x290/0x5d0 rcu_torture_reader+0xac/0x200 ? rcutorture_oom_notify+0xf0/0xf0 ? __kthread_parkme+0x61/0x90 ? rcu_torture_one_read+0x5d0/0x5d0 kthread+0x12d/0x150 ? set_kthread_struct+0x40/0x40 ret_from_fork+0x22/0x30 This is a false positive because there is only one CPU, and both locks are raw (non-preemptible) spinlocks. However, it is worthwhile getting rid of the redundant wakeup, which has the side effect of breaking the theoretical deadlock cycle. This commit therefore eliminates the redundant wakeups. Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutiny.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index a0ba2ed49bc6..92c002d65482 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -99,7 +99,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx) int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); - if (!newval && READ_ONCE(ssp->srcu_gp_waiting)) + if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) swake_up_one(&ssp->srcu_wq); } EXPORT_SYMBOL_GPL(__srcu_read_unlock); -- cgit v1.2.3-59-g8ed1b From f5dbc594b5bac1fa694174032b8d3d0249945fd3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 18 Sep 2021 20:40:48 -0700 Subject: rcu-tasks: Don't remove tasks with pending IPIs from holdout list Currently, the check_all_holdout_tasks_trace() function removes all tasks marked with ->trc_reader_checked from the holdout list, including those with IPIs pending. This means that the IPI handler might arrive at a task that has already been removed from the list, which is at best an accident waiting to happen. This commit therefore avoids removing tasks with IPIs pending from the holdout list. This in turn means that the "if" condition in the for_each_online_cpu() loop in rcu_tasks_trace_postgp() should always evaluate to false, so a WARN_ON_ONCE() is added to check that. Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 7da3c81c3f59..bd44cd4794d3 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1121,7 +1121,8 @@ static void check_all_holdout_tasks_trace(struct list_head *hop, trc_wait_for_one_reader(t, hop); // If check succeeded, remove this task from the list. - if (READ_ONCE(t->trc_reader_checked)) + if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && + READ_ONCE(t->trc_reader_checked)) trc_del_holdout(t); else if (needreport) show_stalled_task_trace(t, firstreport); @@ -1156,7 +1157,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) // Yes, this assumes that CPUs process IPIs in order. If that ever // changes, there will need to be a recheck and/or timed wait. for_each_online_cpu(cpu) - if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))) + if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); // Remove the safety count. -- cgit v1.2.3-59-g8ed1b From 902d82e6299631ec6b6c759ac4b45e44b45cc832 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 22 Sep 2021 20:31:44 -0700 Subject: rcutorture: Sanitize RCUTORTURE_RDR_MASK RCUTORTURE_RDR_MASK is currently not the bit indicated by RCUTORTURE_RDR_SHIFT, but is instead all the bits less significant than that one. This is an accident waiting to happen, so this commit makes RCUTORTURE_RDR_MASK be that one bit and adjusts uses accordingly. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 8b410d982990..1e03fe681f02 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -54,7 +54,7 @@ MODULE_AUTHOR("Paul E. McKenney and Josh Triplett extendables field, extendables param, and related definitions. */ #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ -#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) +#define RCUTORTURE_RDR_MASK (1 << RCUTORTURE_RDR_SHIFT) #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ @@ -1466,6 +1466,7 @@ static void rcutorture_one_extend(int *readstate, int newstate, if (lockit) raw_spin_lock_irqsave(¤t->pi_lock, flags); cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); + idxold = 0; if (lockit) raw_spin_unlock_irqrestore(¤t->pi_lock, flags); } @@ -1476,7 +1477,7 @@ static void rcutorture_one_extend(int *readstate, int newstate, /* Update the reader state. */ if (idxnew == -1) - idxnew = idxold & ~RCUTORTURE_RDR_MASK; + idxnew = idxold & RCUTORTURE_RDR_MASK; WARN_ON_ONCE(idxnew < 0); WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); *readstate = idxnew | newstate; @@ -1626,7 +1627,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) rcu_torture_writer_state, cookie, cur_ops->get_gp_state()); rcutorture_one_extend(&readstate, 0, trsp, rtrsp); - WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); + WARN_ON_ONCE(readstate); // This next splat is expected behavior if leakpointer, especially // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); -- cgit v1.2.3-59-g8ed1b From 1c3d53986f744c469f0e0b136b8922c83363bfde Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 22 Sep 2021 20:49:12 -0700 Subject: rcutorture: More thoroughly test nested readers Currently, nested readers occur only when a timer handler interrupts a reader. This is rare, and is thus insufficient testing of the transition between nesting levels. This commit therefore causes rcutorture nested readers to be the rule rather than the exception. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 73 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 23 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 1e03fe681f02..d295da380fb4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -53,15 +53,18 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); /* Bits for ->extendables field, extendables param, and related definitions. */ -#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ -#define RCUTORTURE_RDR_MASK (1 << RCUTORTURE_RDR_SHIFT) +#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ +#define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) +#define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ +#define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ -#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ -#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ +#define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ +#define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ +#define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ #define RCUTORTURE_MAX_EXTEND \ (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) @@ -1420,13 +1423,15 @@ static void rcutorture_one_extend(int *readstate, int newstate, struct rt_read_seg *rtrsp) { unsigned long flags; - int idxnew = -1; - int idxold = *readstate; + int idxnew1 = -1; + int idxnew2 = -1; + int idxold1 = *readstate; + int idxold2 = idxold1; int statesnew = ~*readstate & newstate; int statesold = *readstate & ~newstate; - WARN_ON_ONCE(idxold < 0); - WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); + WARN_ON_ONCE(idxold2 < 0); + WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); rtrsp->rt_readstate = newstate; /* First, put new protection in place to avoid critical-section gap. */ @@ -1440,8 +1445,10 @@ static void rcutorture_one_extend(int *readstate, int newstate, preempt_disable(); if (statesnew & RCUTORTURE_RDR_SCHED) rcu_read_lock_sched(); - if (statesnew & RCUTORTURE_RDR_RCU) - idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; + if (statesnew & RCUTORTURE_RDR_RCU_1) + idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; + if (statesnew & RCUTORTURE_RDR_RCU_2) + idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; /* * Next, remove old protection, in decreasing order of strength @@ -1460,13 +1467,19 @@ static void rcutorture_one_extend(int *readstate, int newstate, local_bh_enable(); if (statesold & RCUTORTURE_RDR_RBH) rcu_read_unlock_bh(); - if (statesold & RCUTORTURE_RDR_RCU) { + if (statesold & RCUTORTURE_RDR_RCU_2) { + cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); + WARN_ON_ONCE(idxnew2 != -1); + idxold2 = 0; + } + if (statesold & RCUTORTURE_RDR_RCU_1) { bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); if (lockit) raw_spin_lock_irqsave(¤t->pi_lock, flags); - cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); - idxold = 0; + cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); + WARN_ON_ONCE(idxnew1 != -1); + idxold1 = 0; if (lockit) raw_spin_unlock_irqrestore(¤t->pi_lock, flags); } @@ -1476,13 +1489,19 @@ static void rcutorture_one_extend(int *readstate, int newstate, cur_ops->read_delay(trsp, rtrsp); /* Update the reader state. */ - if (idxnew == -1) - idxnew = idxold & RCUTORTURE_RDR_MASK; - WARN_ON_ONCE(idxnew < 0); - WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); - *readstate = idxnew | newstate; - WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); - WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); + if (idxnew1 == -1) + idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; + WARN_ON_ONCE(idxnew1 < 0); + if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) + pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); + if (idxnew2 == -1) + idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; + WARN_ON_ONCE(idxnew2 < 0); + WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); + *readstate = idxnew1 | idxnew2 | newstate; + WARN_ON_ONCE(*readstate < 0); + if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) + pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); } /* Return the biggest extendables mask given current RCU and boot parameters. */ @@ -1492,7 +1511,7 @@ static int rcutorture_extend_mask_max(void) WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; - mask = mask | RCUTORTURE_RDR_RCU; + mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; return mask; } @@ -1507,13 +1526,21 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; - WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); + WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); /* Mostly only one bit (need preemption!), sometimes lots of bits. */ if (!(randmask1 & 0x7)) mask = mask & randmask2; else mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); + // Can't have nested RCU reader without outer RCU reader. + if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { + if (oldmask & RCUTORTURE_RDR_RCU_1) + mask &= ~RCUTORTURE_RDR_RCU_2; + else + mask |= RCUTORTURE_RDR_RCU_1; + } + /* * Can't enable bh w/irq disabled. */ @@ -1533,7 +1560,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) mask |= oldmask & bhs; } - return mask ?: RCUTORTURE_RDR_RCU; + return mask ?: RCUTORTURE_RDR_RCU_1; } /* -- cgit v1.2.3-59-g8ed1b From 340170fef01b18631128006de03522b671e6e2b5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 24 Sep 2021 21:30:26 -0700 Subject: rcutorture: Suppress pi-lock-across read-unlock testing for Tiny SRCU Because Tiny srcu_read_unlock() directly calls swake_up_one(), lockdep complains when a pi lock is held across that srcu_read_unlock(). Although this is a lockdep false positive (there is no other CPU to complete the deadlock cycle), lockdep is what it is at the moment. This commit therefore prevents rcutorture from holding pi lock across a Tiny srcu_read_unlock(). Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index d295da380fb4..503e14e62e8f 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -349,6 +349,7 @@ struct rcu_torture_ops { int can_boost; int extendables; int slow_gps; + int no_pi_lock; const char *name; }; @@ -670,6 +671,7 @@ static struct rcu_torture_ops srcu_ops = { .cb_barrier = srcu_torture_barrier, .stats = srcu_torture_stats, .irq_capable = 1, + .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), .name = "srcu" }; @@ -703,6 +705,7 @@ static struct rcu_torture_ops srcud_ops = { .cb_barrier = srcu_torture_barrier, .stats = srcu_torture_stats, .irq_capable = 1, + .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), .name = "srcud" }; @@ -723,6 +726,7 @@ static struct rcu_torture_ops busted_srcud_ops = { .cb_barrier = srcu_torture_barrier, .stats = srcu_torture_stats, .irq_capable = 1, + .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), .extendables = RCUTORTURE_MAX_EXTEND, .name = "busted_srcud" }; @@ -1473,8 +1477,9 @@ static void rcutorture_one_extend(int *readstate, int newstate, idxold2 = 0; } if (statesold & RCUTORTURE_RDR_RCU_1) { - bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); + bool lockit; + lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); if (lockit) raw_spin_lock_irqsave(¤t->pi_lock, flags); cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); -- cgit v1.2.3-59-g8ed1b From c30c876312f66b0422a1d632b8c45b6416deeec0 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Mon, 25 Oct 2021 11:26:56 +0800 Subject: refscale: Simplify the errexit checkpoint There is only the one OOM error case in main_func(), so this commit eliminates the errexit local variable in favor of a branch to cleanup code. Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 1631ef8a138d..b59457cef88d 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -637,7 +637,6 @@ static u64 process_durations(int n) // point all the timestamps are printed. static int main_func(void *arg) { - bool errexit = false; int exp, r; char buf1[64]; char *buf; @@ -651,7 +650,7 @@ static int main_func(void *arg) buf = kzalloc(64 + nruns * 32, GFP_KERNEL); if (!result_avg || !buf) { VERBOSE_SCALEOUT_ERRSTRING("out of memory"); - errexit = true; + goto oom_exit; } if (holdoff) schedule_timeout_interruptible(holdoff * HZ); @@ -663,8 +662,6 @@ static int main_func(void *arg) // Start exp readers up per experiment for (exp = 0; exp < nruns && !torture_must_stop(); exp++) { - if (errexit) - break; if (torture_must_stop()) goto end; @@ -698,26 +695,22 @@ static int main_func(void *arg) // Print the average of all experiments SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); - if (!errexit) { - buf[0] = 0; - strcat(buf, "\n"); - strcat(buf, "Runs\tTime(ns)\n"); - } + buf[0] = 0; + strcat(buf, "\n"); + strcat(buf, "Runs\tTime(ns)\n"); for (exp = 0; exp < nruns; exp++) { u64 avg; u32 rem; - if (errexit) - break; avg = div_u64_rem(result_avg[exp], 1000, &rem); sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem); strcat(buf, buf1); } - if (!errexit) - SCALEOUT("%s", buf); + SCALEOUT("%s", buf); +oom_exit: // This will shutdown everything including us. if (shutdown) { shutdown_start = 1; -- cgit v1.2.3-59-g8ed1b From 9880eb878c315b241002d43d7ff89df23713a51b Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Mon, 25 Oct 2021 11:26:57 +0800 Subject: refscale: Prevent buffer to pr_alert() being too long 0Day/LKP observed that the refscale results fail to complete when larger values of nrun (such as 300) are specified. The problem is that printk() can accept at most a 1024-byte buffer. This commit therefore prints the buffer whenever its length exceeds 800 bytes. CC: Philip Li Reported-by: kernel test robot Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index b59457cef88d..d8ed1ca3adc0 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -604,7 +604,7 @@ static u64 process_durations(int n) char *buf; u64 sum = 0; - buf = kmalloc(128 + nreaders * 32, GFP_KERNEL); + buf = kmalloc(800 + 64, GFP_KERNEL); if (!buf) return 0; buf[0] = 0; @@ -617,13 +617,15 @@ static u64 process_durations(int n) if (i % 5 == 0) strcat(buf, "\n"); + if (strlen(buf) >= 800) { + pr_alert("%s", buf); + buf[0] = 0; + } strcat(buf, buf1); sum += rt->last_duration_ns; } - strcat(buf, "\n"); - - SCALEOUT("%s\n", buf); + pr_alert("%s\n", buf); kfree(buf); return sum; @@ -647,7 +649,7 @@ static int main_func(void *arg) VERBOSE_SCALEOUT("main_func task started"); result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL); - buf = kzalloc(64 + nruns * 32, GFP_KERNEL); + buf = kzalloc(800 + 64, GFP_KERNEL); if (!result_avg || !buf) { VERBOSE_SCALEOUT_ERRSTRING("out of memory"); goto oom_exit; @@ -695,10 +697,7 @@ static int main_func(void *arg) // Print the average of all experiments SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); - buf[0] = 0; - strcat(buf, "\n"); - strcat(buf, "Runs\tTime(ns)\n"); - + pr_alert("Runs\tTime(ns)\n"); for (exp = 0; exp < nruns; exp++) { u64 avg; u32 rem; @@ -706,9 +705,13 @@ static int main_func(void *arg) avg = div_u64_rem(result_avg[exp], 1000, &rem); sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem); strcat(buf, buf1); + if (strlen(buf) >= 800) { + pr_alert("%s", buf); + buf[0] = 0; + } } - SCALEOUT("%s", buf); + pr_alert("%s", buf); oom_exit: // This will shutdown everything including us. -- cgit v1.2.3-59-g8ed1b From b6a4fd35d2d345fbfbd4421b0f626c3d8f1f6cf7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 21 Sep 2021 12:50:05 -0700 Subject: torture: Catch kvm.sh help text up with actual options This commit brings the kvm.sh script's help text up to date with recently (and some not-so-recently) added parameters. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 6cf9ec6a3d1c..6de0c183db5b 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -74,7 +74,9 @@ usage () { echo " --help" echo " --interactive" echo " --jitter N [ maxsleep (us) [ maxspin (us) ] ]" + echo " --kasan" echo " --kconfig Kconfig-options" + echo " --kcsan" echo " --kmake-arg kernel-make-arguments" echo " --mac nn:nn:nn:nn:nn:nn" echo " --memory megabytes|nnnG" @@ -83,6 +85,7 @@ usage () { echo " --qemu-cmd qemu-system-..." echo " --remote" echo " --results absolute-pathname" + echo " --shutdown-grace seconds" echo " --torture lock|rcu|rcuscale|refscale|scf" echo " --trust-make" exit 1 @@ -175,14 +178,14 @@ do jitter="$2" shift ;; + --kasan) + TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG + ;; --kconfig|--kconfigs) checkarg --kconfig "(Kconfig options)" $# "$2" '^CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\( CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\)*$' '^error$' TORTURE_KCONFIG_ARG="`echo "$TORTURE_KCONFIG_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`" shift ;; - --kasan) - TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG - ;; --kcsan) TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_STRICT=y CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG ;; -- cgit v1.2.3-59-g8ed1b From c06354a121777d3c6c5e60bed9d4f1aae04afd72 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 10 Nov 2021 07:47:41 -0800 Subject: torture: Make kvm-find-errors.sh report link-time undefined symbols This commit makes kvm-find-errors.sh check for and report undefined symbols that are detected at link time. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh | 4 ++-- tools/testing/selftests/rcutorture/bin/parse-build.sh | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh index daf64b507038..2e9e9e2eedb6 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh @@ -30,9 +30,9 @@ editor=${EDITOR-vi} files= for i in ${rundir}/*/Make.out do - if egrep -q "error:|warning:" < $i + if egrep -q "error:|warning:|^ld: .*undefined reference to" < $i then - egrep "error:|warning:" < $i > $i.diags + egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags files="$files $i.diags $i" fi done diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh index 9313e5065ae9..2dbfca3589b1 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-build.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh @@ -39,7 +39,8 @@ fi grep warning: < $F > $T/warnings grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings -cat $T/hwarnings $T/cwarnings > $T/rcuwarnings +grep "^ld: .*undefined reference to" $T/warnings | head -1 > $T/ldwarnings +cat $T/hwarnings $T/cwarnings $T/ldwarnings > $T/rcuwarnings if test -s $T/rcuwarnings then print_warning $title build errors: -- cgit v1.2.3-59-g8ed1b From f61537009e3a7c5be8d30ebf61cc9f8596800d4c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Nov 2021 12:47:50 -0800 Subject: torture: Retry download once before giving up Currently, a transient network error can kill a run if it happens while downloading the tarball to one of the target systems. This commit therefore does a 60-second wait and then a retry. If further experience indicates, a more elaborate mechanism might be used later. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-remote.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh index c7d42ef80c53..67ebc56111e3 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh @@ -157,8 +157,15 @@ do ret=$? if test "$ret" -ne 0 then - echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log" - exit 10 | tee -a "$oldrun/remote-log" + echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. | tee -a "$oldrun/remote-log" + sleep 60 + cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -" + ret=$? + if test "$ret" -ne 0 + then + echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log" + exit 10 + fi fi done -- cgit v1.2.3-59-g8ed1b From 4ead4e33194a4bb080f726fafef787b33ca98965 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Nov 2021 21:02:43 -0800 Subject: rcutorture: Cause TREE02 and TREE10 scenarios to do more callback flooding This commit enables two callback-flood kthreads for the TREE02 scenario and 28 for the TREE10 scenario. Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot | 1 + 2 files changed, 2 insertions(+) create mode 100644 tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot create mode 100644 tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot new file mode 100644 index 000000000000..dd914fa8f690 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot @@ -0,0 +1 @@ +rcutorture.fwd_progress=2 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot new file mode 100644 index 000000000000..dd914fa8f690 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot @@ -0,0 +1 @@ +rcutorture.fwd_progress=2 -- cgit v1.2.3-59-g8ed1b From a959ed627a426b5bf01cf29c9b011f63459e2345 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 24 Nov 2021 16:04:51 -0800 Subject: rcutorture: Test RCU Tasks lock-contention detection This commit adjusts the TRACE02 scenario to use a pair of callback-flood kthreads. This in turn forces lock contention on the single RCU Tasks Trace callback queue, which forces use of all CPUs' queues, thus testing this transition. (No, there is not yet any way to transition back. Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot index 9675ad632dcc..c70b5db6c2ae 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot @@ -1 +1,2 @@ rcutorture.torture_type=tasks-tracing +rcutorture.fwd_progress=2 -- cgit v1.2.3-59-g8ed1b From b6c9dbf04f24e6bba91246abf55d1e0a13f5062a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Nov 2021 11:33:01 -0800 Subject: torture: Fix incorrectly redirected "exit" in kvm-remote.sh The "exit 4" in kvm-remote.sh is pointlessly redirected, so this commit removes the redirection. Fixes: 0092eae4cb4e ("torture: Add kvm-remote.sh script for distributed rcutorture test runs") Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-remote.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh index 67ebc56111e3..7ab79b0d5ad1 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh @@ -144,7 +144,7 @@ do if test "$ret" -ne 0 then echo System $i unreachable, giving up. | tee -a "$oldrun/remote-log" - exit 4 | tee -a "$oldrun/remote-log" + exit 4 fi done -- cgit v1.2.3-59-g8ed1b From 90b21bcfb2846625c4f2e4a0bf5969543cef8ba7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Nov 2021 11:47:37 -0800 Subject: torture: Properly redirect kvm-remote.sh "echo" commands The echo commands following initialization of the "oldrun" variable need to be "tee"d to $oldrun/remote-log. This commit fixes several stragglers. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-remote.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh index 7ab79b0d5ad1..e09b1bc78708 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh @@ -184,16 +184,16 @@ checkremotefile () { ret=$? if test "$ret" -eq 255 then - echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date` + echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date` | tee -a "$oldrun/remote-log" elif test "$ret" -eq 0 then return 0 elif test "$ret" -eq 1 then - echo " ---" File \"$2\" not found: ssh $1 test -f \"$2\" + echo " ---" File \"$2\" not found: ssh $1 test -f \"$2\" | tee -a "$oldrun/remote-log" return 1 else - echo " ---" Exit code $ret: ssh $1 test -f \"$2\", retry after $sleeptime seconds. `date` + echo " ---" Exit code $ret: ssh $1 test -f \"$2\", retry after $sleeptime seconds. `date` | tee -a "$oldrun/remote-log" return $ret fi sleep $sleeptime @@ -252,7 +252,7 @@ do sleep 30 fi done -echo All batches started. `date` +echo All batches started. `date` | tee -a "$oldrun/remote-log" # Wait for all remaining scenarios to complete and collect results. for i in $systems @@ -261,7 +261,7 @@ do do sleep 30 done - echo " ---" Collecting results from $i `date` + echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log" ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - ) done -- cgit v1.2.3-59-g8ed1b From a4382659487f84c00b5fbb61df25a9ad59396789 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 16 Sep 2021 14:10:45 +0200 Subject: rcu: Ignore rdp.cpu_no_qs.b.exp on preemptible RCU's rcu_qs() Preemptible RCU does not use the rcu_data structure's ->cpu_no_qs.b.exp, instead using a separate ->exp_deferred_qs field to record the need for an expedited quiescent state. In fact ->cpu_no_qs.b.exp should never be set in preemptible RCU because preemptible RCU's expedited grace periods use other mechanisms to record quiescent states. This commit therefore removes the implicit rcu_qs() reference to ->cpu_no_qs.b.exp in favor of a direct reference to ->cpu_no_qs.b.norm. Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5199559fbbf0..113c19b086ac 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -277,12 +277,16 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * current task, there might be any number of other tasks blocked while * in an RCU read-side critical section. * + * Unlike non-preemptible-RCU, quiescent state reports for expedited + * grace periods are handled separately via deferred quiescent states + * and context switch events. + * * Callers to this function must disable preemption. */ static void rcu_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); - if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { + if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { trace_rcu_grace_period(TPS("rcu_preempt"), __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); -- cgit v1.2.3-59-g8ed1b From 6e16b0f7bae3817ea67f4bef4f84298e880fbf66 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 16 Sep 2021 14:10:47 +0200 Subject: rcu: Move rcu_data.cpu_no_qs.b.exp reset to rcu_export_exp_rdp() On non-preemptible RCU, move clearing of the rcu_data structure's ->cpu_no_qs.b.exp filed to the actual expedited quiescent state report function, matching hw preemptible RCU handles the ->exp_deferred_qs field. This prepares for removing ->exp_deferred_qs in favor of ->cpu_no_qs.b.exp for both preemptible and non-preemptible RCU. Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 1 + kernel/rcu/tree_plugin.h | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index f3947c49eee7..6c6eb3220385 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -256,6 +256,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, static void rcu_report_exp_rdp(struct rcu_data *rdp) { WRITE_ONCE(rdp->exp_deferred_qs, false); + WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 113c19b086ac..6d58b75d2782 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -849,10 +849,8 @@ static void rcu_qs(void) trace_rcu_grace_period(TPS("rcu_sched"), __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); - if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) - return; - __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); - rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); + if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) + rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); } /* -- cgit v1.2.3-59-g8ed1b From 6120b72e25e195b6fa15b0a674479a38166c392a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 16 Sep 2021 14:10:48 +0200 Subject: rcu: Remove rcu_data.exp_deferred_qs and convert to rcu_data.cpu no_qs.b.exp Having two fields for the same purpose with subtle differences on different RCU flavours is confusing, especially when both fields always exist on both RCU flavours. Fortunately, it is now safe for preemptible RCU to rely on the rcu_data structure's ->cpu_no_qs.b.exp field, just like non-preemptible RCU. This commit therefore removes the ad-hoc ->exp_deferred_qs field. Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 1 - kernel/rcu/tree_exp.h | 5 ++--- kernel/rcu/tree_plugin.h | 12 ++++++------ 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 305cf6aeb408..ea46ed40f6bc 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -157,7 +157,6 @@ struct rcu_data { bool core_needs_qs; /* Core waits for quiescent state. */ bool beenonline; /* CPU online at least once. */ bool gpwrap; /* Possible ->gp_seq wrap. */ - bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */ bool cpu_started; /* RCU watching this onlining CPU. */ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ unsigned long grpmask; /* Mask to apply to leaf qsmask. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 6c6eb3220385..fc2ee326a6f7 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -255,7 +255,6 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, */ static void rcu_report_exp_rdp(struct rcu_data *rdp) { - WRITE_ONCE(rdp->exp_deferred_qs, false); WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); } @@ -656,7 +655,7 @@ static void rcu_exp_handler(void *unused) rcu_dynticks_curr_cpu_in_eqs()) { rcu_report_exp_rdp(rdp); } else { - rdp->exp_deferred_qs = true; + WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); set_tsk_need_resched(t); set_preempt_need_resched(); } @@ -678,7 +677,7 @@ static void rcu_exp_handler(void *unused) if (depth > 0) { raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->expmask & rdp->grpmask) { - rdp->exp_deferred_qs = true; + WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); t->rcu_read_unlock_special.b.exp_hint = true; } raw_spin_unlock_irqrestore_rcu_node(rnp, flags); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6d58b75d2782..e1a9fb96e0b9 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -260,10 +260,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * no need to check for a subsequent expedited GP. (Though we are * still in a quiescent state in any case.) */ - if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) + if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) rcu_report_exp_rdp(rdp); else - WARN_ON_ONCE(rdp->exp_deferred_qs); + WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); } /* @@ -354,7 +354,7 @@ void rcu_note_context_switch(bool preempt) * means that we continue to block the current grace period. */ rcu_qs(); - if (rdp->exp_deferred_qs) + if (rdp->cpu_no_qs.b.exp) rcu_report_exp_rdp(rdp); rcu_tasks_qs(current, preempt); trace_rcu_utilization(TPS("End context switch")); @@ -481,7 +481,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) */ special = t->rcu_read_unlock_special; rdp = this_cpu_ptr(&rcu_data); - if (!special.s && !rdp->exp_deferred_qs) { + if (!special.s && !rdp->cpu_no_qs.b.exp) { local_irq_restore(flags); return; } @@ -501,7 +501,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * tasks are handled when removing the task from the * blocked-tasks list below. */ - if (rdp->exp_deferred_qs) + if (rdp->cpu_no_qs.b.exp) rcu_report_exp_rdp(rdp); /* Clean up if blocked during RCU read-side critical section. */ @@ -584,7 +584,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) */ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) { - return (__this_cpu_read(rcu_data.exp_deferred_qs) || + return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) || READ_ONCE(t->rcu_read_unlock_special.s)) && rcu_preempt_depth() == 0; } -- cgit v1.2.3-59-g8ed1b From 5401cc5264fff75aea10cf44b380b265d18d17ba Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 28 Sep 2021 11:06:35 -0700 Subject: rcu: Mark sync_sched_exp_online_cleanup() ->cpu_no_qs.b.exp load The sync_sched_exp_online_cleanup() is called from rcutree_online_cpu(), which can be invoked with interrupts enabled. This means that the ->cpu_no_qs.b.exp field is subject to data races from the rcu_exp_handler() IPI handler, so this commit marks the load from that field. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index fc2ee326a6f7..12f1a18d7b6d 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -759,7 +759,7 @@ static void sync_sched_exp_online_cleanup(int cpu) my_cpu = get_cpu(); /* Quiescent state either not needed or already requested, leave. */ if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || - rdp->cpu_no_qs.b.exp) { + READ_ONCE(rdp->cpu_no_qs.b.exp)) { put_cpu(); return; } -- cgit v1.2.3-59-g8ed1b From 147f04b14adde831eb4a0a1e378667429732f9e8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 29 Sep 2021 09:21:34 -0700 Subject: rcu: Prevent expedited GP from enabling tick on offline CPU If an RCU expedited grace period starts just when a CPU is in the process of going offline, so that the outgoing CPU has completed its pass through stop-machine but has not yet completed its final dive into the idle loop, RCU will attempt to enable that CPU's scheduling-clock tick via a call to tick_dep_set_cpu(). For this to happen, that CPU has to have been online when the expedited grace period completed its CPU-selection phase. This is pointless: The outgoing CPU has interrupts disabled, so it cannot take a scheduling-clock tick anyway. In addition, the tick_dep_set_cpu() function's eventual call to irq_work_queue_on() will splat as follows: smpboot: CPU 1 is now offline WARNING: CPU: 6 PID: 124 at kernel/irq_work.c:95 +irq_work_queue_on+0x57/0x60 Modules linked in: CPU: 6 PID: 124 Comm: kworker/6:2 Not tainted 5.15.0-rc1+ #3 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS +rel-1.14.0-0-g155821a-rebuilt.opensuse.org 04/01/2014 Workqueue: rcu_gp wait_rcu_exp_gp RIP: 0010:irq_work_queue_on+0x57/0x60 Code: 8b 05 1d c7 ea 62 a9 00 00 f0 00 75 21 4c 89 ce 44 89 c7 e8 +9b 37 fa ff ba 01 00 00 00 89 d0 c3 4c 89 cf e8 3b ff ff ff eb ee <0f> 0b eb b7 +0f 0b eb db 90 48 c7 c0 98 2a 02 00 65 48 03 05 91 6f RSP: 0000:ffffb12cc038fe48 EFLAGS: 00010282 RAX: 0000000000000001 RBX: 0000000000005208 RCX: 0000000000000020 RDX: 0000000000000001 RSI: 0000000000000001 RDI: ffff9ad01f45a680 RBP: 000000000004c990 R08: 0000000000000001 R09: ffff9ad01f45a680 R10: ffffb12cc0317db0 R11: 0000000000000001 R12: 00000000fffecee8 R13: 0000000000000001 R14: 0000000000026980 R15: ffffffff9e53ae00 FS: 0000000000000000(0000) GS:ffff9ad01f580000(0000) +knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000000de0c000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: tick_nohz_dep_set_cpu+0x59/0x70 rcu_exp_wait_wake+0x54e/0x870 ? sync_rcu_exp_select_cpus+0x1fc/0x390 process_one_work+0x1ef/0x3c0 ? process_one_work+0x3c0/0x3c0 worker_thread+0x28/0x3c0 ? process_one_work+0x3c0/0x3c0 kthread+0x115/0x140 ? set_kthread_struct+0x40/0x40 ret_from_fork+0x22/0x30 ---[ end trace c5bf75eb6aa80bc6 ]--- This commit therefore avoids invoking tick_dep_set_cpu() on offlined CPUs to limit both futility and false-positive splats. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 12f1a18d7b6d..a96d17206d87 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -506,7 +506,10 @@ static void synchronize_rcu_expedited_wait(void) if (rdp->rcu_forced_tick_exp) continue; rdp->rcu_forced_tick_exp = true; - tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); + preempt_disable(); + if (cpu_online(cpu)) + tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); + preempt_enable(); } } j = READ_ONCE(jiffies_till_first_fqs); -- cgit v1.2.3-59-g8ed1b From 790da248978a0722d92d1471630c881704f7eb0d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 29 Sep 2021 11:09:34 -0700 Subject: rcu: Make idle entry report expedited quiescent states In non-preemptible kernels, an unfortunately timed expedited grace period can result in the rcu_exp_handler() IPI handler setting the rcu_data structure's cpu_no_qs.b.exp field just as the target CPU enters idle. There are situations in which this field will not be checked until after that CPU exits idle. The resulting grace-period latency does not qualify as "expedited". This commit therefore checks this field upon non-preemptible idle entry in the rcu_preempt_deferred_qs() function. It also qualifies the rcu_core() preempt_count() check with IS_ENABLED(CONFIG_PREEMPT_COUNT) to prevent false-positive quiescent states from count-free kernels. Reported-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree_plugin.h | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ef8d36f580fc..a8d1fe35f482 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2714,7 +2714,7 @@ static __latent_entropy void rcu_core(void) WARN_ON_ONCE(!rdp->beenonline); /* Report any deferred quiescent states if preemption enabled. */ - if (!(preempt_count() & PREEMPT_MASK)) { + if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) { rcu_preempt_deferred_qs(current); } else if (rcu_preempt_need_deferred_qs(current)) { set_tsk_need_resched(current); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e1a9fb96e0b9..8fb1612021a0 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -927,7 +927,18 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) { return false; } -static void rcu_preempt_deferred_qs(struct task_struct *t) { } + +// Except that we do need to respond to a request by an expedited grace +// period for a quiescent state from this CPU. Note that requests from +// tasks are handled when removing the task from the blocked-tasks list +// below. +static void rcu_preempt_deferred_qs(struct task_struct *t) +{ + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + + if (rdp->cpu_no_qs.b.exp) + rcu_report_exp_rdp(rdp); +} /* * Because there is no preemptible RCU, there can be no readers blocked, -- cgit v1.2.3-59-g8ed1b From 81f6d49cce2d2fe507e3fddcc4a6db021d9c2e7b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 30 Nov 2021 17:21:08 +0100 Subject: rcu/exp: Mark current CPU as exp-QS in IPI loop second pass Expedited RCU grace periods invoke sync_rcu_exp_select_node_cpus(), which takes two passes over the leaf rcu_node structure's CPUs. The first pass gathers up the current CPU and CPUs that are in dynticks idle mode. The workqueue will report a quiescent state on their behalf later. The second pass sends IPIs to the rest of the CPUs, but excludes the current CPU, incorrectly assuming it has been included in the first pass's list of CPUs. Unfortunately the current CPU may have changed between the first and second pass, due to the fact that the various rcu_node structures' ->lock fields have been dropped, thus momentarily enabling preemption. This means that if the second pass's CPU was not on the first pass's list, it will be ignored completely. There will be no IPI sent to it, and there will be no reporting of quiescent states on its behalf. Unfortunately, the expedited grace period will nevertheless be waiting for that CPU to report a quiescent state, but with that CPU having no reason to believe that such a report is needed. The result will be an expedited grace period stall. Fix this by no longer excluding the current CPU from consideration during the second pass. Fixes: b9ad4d6ed18e ("rcu: Avoid self-IPI in sync_rcu_exp_select_node_cpus()") Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Uladzislau Rezki Cc: Neeraj Upadhyay Cc: Boqun Feng Cc: Josh Triplett Cc: Joel Fernandes Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index a96d17206d87..237a79989aba 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -387,6 +387,7 @@ retry_ipi: continue; } if (get_cpu() == cpu) { + mask_ofl_test |= mask; put_cpu(); continue; } -- cgit v1.2.3-59-g8ed1b From 614ddad17f22a22e035e2ea37a04815f50362017 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 17 Sep 2021 15:04:48 -0700 Subject: rcu: Tighten rcu_advance_cbs_nowake() checks Currently, rcu_advance_cbs_nowake() checks that a grace period is in progress, however, that grace period could end just after the check. This commit rechecks that a grace period is still in progress while holding the rcu_node structure's lock. The grace period cannot end while the current CPU's rcu_node structure's ->lock is held, thus avoiding false positives from the WARN_ON_ONCE(). As Daniel Vacek noted, it is not necessary for the rcu_node structure to have a CPU that has not yet passed through its quiescent state. Tested-by: Guillaume Morin Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ef8d36f580fc..8706b30c2ac8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1590,10 +1590,11 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, struct rcu_data *rdp) { rcu_lockdep_assert_cblist_protected(rdp); - if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || - !raw_spin_trylock_rcu_node(rnp)) + if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) return; - WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); + // The grace period cannot end while we hold the rcu_node lock. + if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) + WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); raw_spin_unlock_rcu_node(rnp); } -- cgit v1.2.3-59-g8ed1b From 118e0d4a1bc85d4ecea0427e440a72d21ffbfa6a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 11 Oct 2021 16:51:30 +0200 Subject: rcu/nocb: Make local rcu_nocb_lock_irqsave() safe against concurrent deoffloading rcu_nocb_lock_irqsave() can be preempted between the call to rcu_segcblist_is_offloaded() and the actual locking. This matters now that rcu_core() is preemptible on PREEMPT_RT and the (de-)offloading process can interrupt the softirq or the rcuc kthread. As a result we may locklessly call into code that requires nocb locking. In practice this is a problem while we accelerate callbacks on rcu_core(). Simply disabling interrupts before (instead of after) checking the NOCB offload state fixes the issue. Reported-and-tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 305cf6aeb408..4f6c67b3ccd5 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -447,12 +447,16 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); #ifdef CONFIG_RCU_NOCB_CPU static void __init rcu_organize_nocb_kthreads(void); -#define rcu_nocb_lock_irqsave(rdp, flags) \ -do { \ - if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ - local_irq_save(flags); \ - else \ - raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \ + +/* + * Disable IRQs before checking offloaded state so that local + * locking is safe against concurrent de-offloading. + */ +#define rcu_nocb_lock_irqsave(rdp, flags) \ +do { \ + local_irq_save(flags); \ + if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ + raw_spin_lock(&(rdp)->nocb_lock); \ } while (0) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) -- cgit v1.2.3-59-g8ed1b From 213d56bf33bdda835bac04046f09256a75c5ca8e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:07 +0200 Subject: rcu/nocb: Prepare state machine for a new step Currently SEGCBLIST_SOFTIRQ_ONLY is a bit of an exception among the segcblist flags because it is an exclusive state that doesn't mix up with the other flags. Remove it in favour of: _ A flag specifying that rcu_core() needs to perform callbacks execution and acceleration and _ A flag specifying we want the nocb lock to be held in any needed circumstances This clarifies the code and is more flexible: It allows to have a state where rcu_core() runs with locking while offloading hasn't started yet. This is a necessary step to prepare for triggering rcu_core() at the very beginning of the de-offloading process so that rcu_core() won't dismiss work while being preempted by the de-offloading process, at least not without a pending subsequent rcu_core() that will quickly catch up. Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 37 +++++++++++++++++++++++-------------- kernel/rcu/rcu_segcblist.c | 6 +++--- kernel/rcu/rcu_segcblist.h | 12 +++++++----- kernel/rcu/tree.c | 2 +- kernel/rcu/tree_nocb.h | 24 ++++++++++++++++-------- 5 files changed, 50 insertions(+), 31 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 3db96c4f45fd..812961b1d064 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -69,7 +69,7 @@ struct rcu_cblist { * * * ---------------------------------------------------------------------------- - * | SEGCBLIST_SOFTIRQ_ONLY | + * | SEGCBLIST_RCU_CORE | * | | * | Callbacks processed by rcu_core() from softirqs or local | * | rcuc kthread, without holding nocb_lock. | @@ -77,7 +77,7 @@ struct rcu_cblist { * | * v * ---------------------------------------------------------------------------- - * | SEGCBLIST_OFFLOADED | + * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED | * | | * | Callbacks processed by rcu_core() from softirqs or local | * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | @@ -89,7 +89,9 @@ struct rcu_cblist { * | | * v v * --------------------------------------- ----------------------------------| - * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | | + * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | | + * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | * | | | | * | | | | @@ -104,9 +106,10 @@ struct rcu_cblist { * | * v * |--------------------------------------------------------------------------| - * | SEGCBLIST_OFFLOADED | | - * | SEGCBLIST_KTHREAD_CB | | - * | SEGCBLIST_KTHREAD_GP | + * | SEGCBLIST_LOCKING | | + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_GP | | + * | SEGCBLIST_KTHREAD_CB | * | | * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | * | handling callbacks. Enable bypass queueing. | @@ -120,7 +123,8 @@ struct rcu_cblist { * * * |--------------------------------------------------------------------------| - * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_LOCKING | | + * | SEGCBLIST_OFFLOADED | | * | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_GP | * | | @@ -130,6 +134,8 @@ struct rcu_cblist { * | * v * |--------------------------------------------------------------------------| + * | SEGCBLIST_RCU_CORE | | + * | SEGCBLIST_LOCKING | | * | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_GP | * | | @@ -143,7 +149,9 @@ struct rcu_cblist { * | | * v v * ---------------------------------------------------------------------------| - * | | + * | | | + * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | | + * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | | * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | * | | | * | GP kthread woke up and | CB kthread woke up and | @@ -159,7 +167,7 @@ struct rcu_cblist { * | * v * ---------------------------------------------------------------------------- - * | 0 | + * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | * | | * | Callbacks processed by rcu_core() from softirqs or local | * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | @@ -168,17 +176,18 @@ struct rcu_cblist { * | * v * ---------------------------------------------------------------------------- - * | SEGCBLIST_SOFTIRQ_ONLY | + * | SEGCBLIST_RCU_CORE | * | | * | Callbacks processed by rcu_core() from softirqs or local | * | rcuc kthread, without holding nocb_lock. | * ---------------------------------------------------------------------------- */ #define SEGCBLIST_ENABLED BIT(0) -#define SEGCBLIST_SOFTIRQ_ONLY BIT(1) -#define SEGCBLIST_KTHREAD_CB BIT(2) -#define SEGCBLIST_KTHREAD_GP BIT(3) -#define SEGCBLIST_OFFLOADED BIT(4) +#define SEGCBLIST_RCU_CORE BIT(1) +#define SEGCBLIST_LOCKING BIT(2) +#define SEGCBLIST_KTHREAD_CB BIT(3) +#define SEGCBLIST_KTHREAD_GP BIT(4) +#define SEGCBLIST_OFFLOADED BIT(5) struct rcu_segcblist { struct rcu_head *head; diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index aaa111237b60..c07aab6e39ef 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -261,14 +261,14 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) } /* - * Mark the specified rcu_segcblist structure as offloaded. + * Mark the specified rcu_segcblist structure as offloaded (or not) */ void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) { if (offload) { - rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); - rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); + rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED); } else { + rcu_segcblist_set_flags(rsclp, SEGCBLIST_RCU_CORE); rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); } } diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 9a19328ff251..e373fbe44da5 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -80,11 +80,14 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); } -/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */ +/* + * Is the specified rcu_segcblist NOCB offloaded (or in the middle of the + * [de]offloading process)? + */ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) { if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY)) + rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING)) return true; return false; @@ -92,9 +95,8 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) { - int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; - - if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags) + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && + !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE)) return true; return false; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8706b30c2ac8..e905d7e4ddb9 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -79,7 +79,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, .dynticks = ATOMIC_INIT(1), #ifdef CONFIG_RCU_NOCB_CPU - .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY, + .cblist.flags = SEGCBLIST_RCU_CORE, #endif }; static struct rcu_state rcu_state = { diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 368ef7b9af4f..b3e07d0bfbbf 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1000,12 +1000,12 @@ static long rcu_nocb_rdp_deoffload(void *arg) */ rcu_nocb_lock_irqsave(rdp, flags); /* - * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb + * Theoretically we could clear SEGCBLIST_LOCKING after the nocb * lock is released but how about being paranoid for once? */ - rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY); + rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING); /* - * With SEGCBLIST_SOFTIRQ_ONLY, we can't use + * Without SEGCBLIST_LOCKING, we can't use * rcu_nocb_unlock_irqrestore() anymore. */ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); @@ -1058,14 +1058,14 @@ static long rcu_nocb_rdp_offload(void *arg) pr_info("Offloading %d\n", rdp->cpu); /* - * Can't use rcu_nocb_lock_irqsave() while we are in - * SEGCBLIST_SOFTIRQ_ONLY mode. + * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING + * is set. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); /* * We didn't take the nocb lock while working on the - * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode. + * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode). * Every modifications that have been done previously on * rdp->cblist must be visible remotely by the nocb kthreads * upon wake up after reading the cblist flags. @@ -1084,6 +1084,14 @@ static long rcu_nocb_rdp_offload(void *arg) rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) && rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); + /* + * All kthreads are ready to work, we can finally relieve rcu_core() and + * enable nocb bypass. + */ + rcu_nocb_lock_irqsave(rdp, flags); + rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE); + rcu_nocb_unlock_irqrestore(rdp, flags); + return ret; } @@ -1154,8 +1162,8 @@ void __init rcu_init_nohz(void) if (rcu_segcblist_empty(&rdp->cblist)) rcu_segcblist_init(&rdp->cblist); rcu_segcblist_offload(&rdp->cblist, true); - rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); - rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); + rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP); + rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE); } rcu_organize_nocb_kthreads(); } -- cgit v1.2.3-59-g8ed1b From fbb94cbd70d41c7511460896dfc7f9ea5da704b3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:08 +0200 Subject: rcu/nocb: Invoke rcu_core() at the start of deoffloading On PREEMPT_RT, if rcu_core() is preempted by the de-offloading process, some work, such as callbacks acceleration and invocation, may be left unattended due to the volatile checks on the offloaded state. In the worst case this work is postponed until the next rcu_pending() check that can take a jiffy to reach, which can be a problem in case of callbacks flooding. Solve that with invoking rcu_core() early in the de-offloading process. This way any work dismissed by an ongoing rcu_core() call fooled by a preempting deoffloading process will be caught up by a nearby future recall to rcu_core(), this time fully aware of the de-offloading state. Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 14 ++++++++++++++ kernel/rcu/rcu_segcblist.c | 6 ++---- kernel/rcu/tree.c | 17 +++++++++++++++++ kernel/rcu/tree_nocb.h | 9 +++++++++ 4 files changed, 42 insertions(+), 4 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 812961b1d064..659d13a7ddaa 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -136,6 +136,20 @@ struct rcu_cblist { * |--------------------------------------------------------------------------| * | SEGCBLIST_RCU_CORE | | * | SEGCBLIST_LOCKING | | + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | + * | handles callbacks concurrently. Bypass enqueue is enabled. | + * | Invoke RCU core so we make sure not to preempt it in the middle with | + * | leaving some urgent work unattended within a jiffy. | + * ---------------------------------------------------------------------------- + * | + * v + * |--------------------------------------------------------------------------| + * | SEGCBLIST_RCU_CORE | | + * | SEGCBLIST_LOCKING | | * | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_GP | * | | diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index c07aab6e39ef..81145c3ece25 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -265,12 +265,10 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) */ void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) { - if (offload) { + if (offload) rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED); - } else { - rcu_segcblist_set_flags(rsclp, SEGCBLIST_RCU_CORE); + else rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); - } } /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e905d7e4ddb9..a329adfece86 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2707,6 +2707,23 @@ static __latent_entropy void rcu_core(void) unsigned long flags; struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; + /* + * On RT rcu_core() can be preempted when IRQs aren't disabled. + * Therefore this function can race with concurrent NOCB (de-)offloading + * on this CPU and the below condition must be considered volatile. + * However if we race with: + * + * _ Offloading: In the worst case we accelerate or process callbacks + * concurrently with NOCB kthreads. We are guaranteed to + * call rcu_nocb_lock() if that happens. + * + * _ Deoffloading: In the worst case we miss callbacks acceleration or + * processing. This is fine because the early stage + * of deoffloading invokes rcu_core() after setting + * SEGCBLIST_RCU_CORE. So we guarantee that we'll process + * what could have been dismissed without the need to wait + * for the next rcu_pending() check in the next jiffy. + */ const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); if (cpu_is_offline(smp_processor_id())) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index b3e07d0bfbbf..2461fe8d0c23 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -990,6 +990,15 @@ static long rcu_nocb_rdp_deoffload(void *arg) * will refuse to put anything into the bypass. */ WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); + /* + * Start with invoking rcu_core() early. This way if the current thread + * happens to preempt an ongoing call to rcu_core() in the middle, + * leaving some work dismissed because rcu_core() still thinks the rdp is + * completely offloaded, we are guaranteed a nearby future instance of + * rcu_core() to catch up. + */ + rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE); + invoke_rcu_core(); ret = rdp_offload_toggle(rdp, false, flags); swait_event_exclusive(rdp->nocb_state_wq, !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | -- cgit v1.2.3-59-g8ed1b From 24ee940d89277602147ce1b8b4fd87b01b9a6660 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 19 Oct 2021 02:08:09 +0200 Subject: rcu/nocb: Make rcu_core() callbacks acceleration preempt-safe While reporting a quiescent state for a given CPU, rcu_core() takes advantage of the freshly loaded grace period sequence number and the locked rnp to accelerate the callbacks whose sequence number have been assigned a stale value. This action is only necessary when the rdp isn't offloaded, otherwise the NOCB kthreads already take care of the callbacks progression. However the check for the offloaded state is volatile because it is performed outside the IRQs disabled section. It's possible for the offloading process to preempt rcu_core() at that point on PREEMPT_RT. This is dangerous because rcu_core() may end up accelerating callbacks concurrently with NOCB kthreads without appropriate locking. Fix this with moving the offloaded check inside the rnp locking section. Reported-and-tested-by: Valentin Schneider Reviewed-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a329adfece86..5985698f3341 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2278,7 +2278,6 @@ rcu_report_qs_rdp(struct rcu_data *rdp) unsigned long flags; unsigned long mask; bool needwake = false; - const bool offloaded = rcu_rdp_is_offloaded(rdp); struct rcu_node *rnp; WARN_ON_ONCE(rdp->cpu != smp_processor_id()); @@ -2305,8 +2304,10 @@ rcu_report_qs_rdp(struct rcu_data *rdp) /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. + * + * NOCB kthreads have their own way to deal with that. */ - if (!offloaded) + if (!rcu_rdp_is_offloaded(rdp)) needwake = rcu_accelerate_cbs(rnp, rdp); rcu_disable_urgency_upon_qs(rdp); -- cgit v1.2.3-59-g8ed1b From b3bb02fe5a2b538ae53eda1fe591dd6c81a91ad4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:10 +0200 Subject: rcu/nocb: Make rcu_core() callbacks acceleration (de-)offloading safe When callbacks are offloaded, the NOCB kthreads handle the callbacks progression on behalf of rcu_core(). However during the (de-)offloading process, the kthread may not be entirely up to the task. As a result some callbacks grace period sequence number may remain stale for a while because rcu_core() won't take care of them either. Fix this with forcing callbacks acceleration from rcu_core() as long as the offloading process isn't complete. Reported-and-tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5985698f3341..cb9abb80377f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2278,6 +2278,7 @@ rcu_report_qs_rdp(struct rcu_data *rdp) unsigned long flags; unsigned long mask; bool needwake = false; + bool needacc = false; struct rcu_node *rnp; WARN_ON_ONCE(rdp->cpu != smp_processor_id()); @@ -2305,16 +2306,29 @@ rcu_report_qs_rdp(struct rcu_data *rdp) * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. * - * NOCB kthreads have their own way to deal with that. + * NOCB kthreads have their own way to deal with that... */ - if (!rcu_rdp_is_offloaded(rdp)) + if (!rcu_rdp_is_offloaded(rdp)) { needwake = rcu_accelerate_cbs(rnp, rdp); + } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { + /* + * ...but NOCB kthreads may miss or delay callbacks acceleration + * if in the middle of a (de-)offloading process. + */ + needacc = true; + } rcu_disable_urgency_upon_qs(rdp); rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); /* ^^^ Released rnp->lock */ if (needwake) rcu_gp_kthread_wake(); + + if (needacc) { + rcu_nocb_lock_irqsave(rdp, flags); + rcu_accelerate_cbs_unlocked(rnp, rdp); + rcu_nocb_unlock_irqrestore(rdp, flags); + } } } -- cgit v1.2.3-59-g8ed1b From 344e219d7d2b28117daaae5fe8da2e054b53d5a2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:11 +0200 Subject: rcu/nocb: Check a stable offloaded state to manipulate qlen_last_fqs_check It's not entirely obvious why rdp->qlen_last_fqs_check is updated before processing the queue only on offloaded rdp. There can be different effect to that, either in favour of triggering the force quiescent state path or not. For example: 1) If the number of callbacks has decreased since the last rdp->qlen_last_fqs_check update (because we recently called rcu_do_batch() and we executed below qhimark callbacks) and the number of processed callbacks on a subsequent do_batch() arranges for exceeding qhimark on non-offloaded but not on offloaded setup, then we may spare a later run to the force quiescent state slow path on __call_rcu_nocb_wake(), as compared to the non-offloaded counterpart scenario. Here is such an offloaded scenario instance: qhimark = 1000 rdp->last_qlen_last_fqs_check = 3000 rcu_segcblist_n_cbs(rdp) = 2000 rcu_do_batch() { if (offloaded) rdp->last_qlen_fqs_check = rcu_segcblist_n_cbs(rdp) // 2000 // run 1000 callback rcu_segcblist_n_cbs(rdp) = 1000 // Not updating rdp->qlen_last_fqs_check if (count < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = count; } call_rcu() * 1001 { __call_rcu_nocb_wake() { // not taking the fqs slowpath: // rcu_segcblist_n_cbs(rdp) == 2001 // rdp->qlen_last_fqs_check == 2000 // qhimark == 1000 if (len > rdp->qlen_last_fqs_check + qhimark) ... } In the case of a non-offloaded scenario, rdp->qlen_last_fqs_check would be 1000 and the fqs slowpath would have executed. 2) If the number of callbacks has increased since the last rdp->qlen_last_fqs_check update (because we recently queued below qhimark callbacks) and the number of callbacks executed in rcu_do_batch() doesn't exceed qhimark for either offloaded or non-offloaded setup, then it's possible that the offloaded scenario later run the force quiescent state slow path on __call_rcu_nocb_wake() while the non-offloaded doesn't. qhimark = 1000 rdp->last_qlen_last_fqs_check = 3000 rcu_segcblist_n_cbs(rdp) = 2000 rcu_do_batch() { if (offloaded) rdp->last_qlen_last_fqs_check = rcu_segcblist_n_cbs(rdp) // 2000 // run 100 callbacks // concurrent queued 100 rcu_segcblist_n_cbs(rdp) = 2000 // Not updating rdp->qlen_last_fqs_check if (count < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = count; } call_rcu() * 1001 { __call_rcu_nocb_wake() { // Taking the fqs slowpath: // rcu_segcblist_n_cbs(rdp) == 3001 // rdp->qlen_last_fqs_check == 2000 // qhimark == 1000 if (len > rdp->qlen_last_fqs_check + qhimark) ... } In the case of a non-offloaded scenario, rdp->qlen_last_fqs_check would be 3000 and the fqs slowpath would have executed. The reason for updating rdp->qlen_last_fqs_check when invoking callbacks for offloaded CPUs is that there is usually no point in waking up either the rcuog or rcuoc kthreads while in this state. After all, both threads are prohibited from indefinite sleeps. The exception is when some huge number of callbacks are enqueued while rcu_do_batch() is in the midst of invoking, in which case interrupting the rcuog kthread's timed sleep might get more callbacks set up for the next grace period. Reported-and-tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Original-patch-by: Thomas Gleixner Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cb9abb80377f..4cbfc4e4fa9e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2498,7 +2498,7 @@ static void rcu_do_batch(struct rcu_data *rdp) trace_rcu_batch_start(rcu_state.name, rcu_segcblist_n_cbs(&rdp->cblist), bl); rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); - if (offloaded) + if (rcu_rdp_is_offloaded(rdp)) rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); -- cgit v1.2.3-59-g8ed1b From 7b65dfa32dca1be0400d43a3d5bb80ed6e04958e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:12 +0200 Subject: rcu/nocb: Use appropriate rcu_nocb_lock_irqsave() Instead of hardcoding IRQ save and nocb lock, use the consolidated API (and fix a comment as per Valentin Schneider's suggestion). Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4cbfc4e4fa9e..20587d035d03 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2478,12 +2478,11 @@ static void rcu_do_batch(struct rcu_data *rdp) } /* - * Extract the list of ready callbacks, disabling to prevent + * Extract the list of ready callbacks, disabling IRQs to prevent * races with call_rcu() from interrupt handlers. Leave the * callback counts, as rcu_barrier() needs to be conservative. */ - local_irq_save(flags); - rcu_nocb_lock(rdp); + rcu_nocb_lock_irqsave(rdp, flags); WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); pending = rcu_segcblist_n_cbs(&rdp->cblist); div = READ_ONCE(rcu_divisor); @@ -2546,8 +2545,7 @@ static void rcu_do_batch(struct rcu_data *rdp) } } - local_irq_save(flags); - rcu_nocb_lock(rdp); + rcu_nocb_lock_irqsave(rdp, flags); rdp->n_cbs_invoked += count; trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); -- cgit v1.2.3-59-g8ed1b From 78ad37a2c50dfdb9a60e42bb9ee1da86d1fe770c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:13 +0200 Subject: rcu/nocb: Limit number of softirq callbacks only on softirq The current condition to limit the number of callbacks executed in a row checks the offloaded state of the rdp. Not only is it volatile but it is also misleading: the rcu_core() may well be executing callbacks concurrently with NOCB kthreads, and the offloaded state would then be verified on both cases. As a result the limit would spuriously not apply anymore on softirq while in the middle of (de-)offloading process. Fix and clarify the condition with those constraints in mind: _ If callbacks are processed either by rcuc or NOCB kthread, the call to cond_resched_tasks_rcu_qs() is enough to take care of the overload. _ If instead callbacks are processed by softirqs: * If need_resched(), exit the callbacks processing * Otherwise if CPU is idle we can continue * Otherwise exit because a softirq shouldn't interrupt a task for too long nor deprive other pending softirq vectors of the CPU. Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 20587d035d03..4f4d9ea6a9cb 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2525,9 +2525,8 @@ static void rcu_do_batch(struct rcu_data *rdp) /* * Stop only if limit reached and CPU has something to do. */ - if (count >= bl && !offloaded && - (need_resched() || - (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) + if (count >= bl && in_serving_softirq() && + (need_resched() || !is_idle_task(current))) break; if (unlikely(tlimit)) { /* only call local_clock() every 32 callbacks */ -- cgit v1.2.3-59-g8ed1b From 3e61e95e2d095e308616cba4ffb640f95a480e01 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:14 +0200 Subject: rcu: Fix callbacks processing time limit retaining cond_resched() The callbacks processing time limit makes sure we are not exceeding a given amount of time executing the queue. However its "continue" clause bypasses the cond_resched() call on rcuc and NOCB kthreads, delaying it until we reach the limit, which can be very long... Make sure the scheduler has a higher priority than the time limit. Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4f4d9ea6a9cb..74e210ccfd38 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2525,9 +2525,21 @@ static void rcu_do_batch(struct rcu_data *rdp) /* * Stop only if limit reached and CPU has something to do. */ - if (count >= bl && in_serving_softirq() && - (need_resched() || !is_idle_task(current))) - break; + if (in_serving_softirq()) { + if (count >= bl && (need_resched() || !is_idle_task(current))) + break; + } else { + local_bh_enable(); + lockdep_assert_irqs_enabled(); + cond_resched_tasks_rcu_qs(); + lockdep_assert_irqs_enabled(); + local_bh_disable(); + } + + /* + * Make sure we don't spend too much time here and deprive other + * softirq vectors of CPU cycles. + */ if (unlikely(tlimit)) { /* only call local_clock() every 32 callbacks */ if (likely((count & 31) || local_clock() < tlimit)) @@ -2535,13 +2547,6 @@ static void rcu_do_batch(struct rcu_data *rdp) /* Exceeded the time limit, so leave. */ break; } - if (!in_serving_softirq()) { - local_bh_enable(); - lockdep_assert_irqs_enabled(); - cond_resched_tasks_rcu_qs(); - lockdep_assert_irqs_enabled(); - local_bh_disable(); - } } rcu_nocb_lock_irqsave(rdp, flags); -- cgit v1.2.3-59-g8ed1b From a554ba288845fd3f6f12311fd76a51694233458a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:15 +0200 Subject: rcu: Apply callbacks processing time limit only on softirq Time limit only makes sense when callbacks are serviced in softirq mode because: _ In case we need to get back to the scheduler, cond_resched_tasks_rcu_qs() is called after each callback. _ In case some other softirq vector needs the CPU, the call to local_bh_enable() before cond_resched_tasks_rcu_qs() takes care about them via a call to do_softirq(). Therefore, make sure the time limit only applies to softirq mode. Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 74e210ccfd38..1b31fad9f1a0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2488,7 +2488,7 @@ static void rcu_do_batch(struct rcu_data *rdp) div = READ_ONCE(rcu_divisor); div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; bl = max(rdp->blimit, pending >> div); - if (unlikely(bl > 100)) { + if (in_serving_softirq() && unlikely(bl > 100)) { long rrn = READ_ONCE(rcu_resched_ns); rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; @@ -2528,6 +2528,17 @@ static void rcu_do_batch(struct rcu_data *rdp) if (in_serving_softirq()) { if (count >= bl && (need_resched() || !is_idle_task(current))) break; + /* + * Make sure we don't spend too much time here and deprive other + * softirq vectors of CPU cycles. + */ + if (unlikely(tlimit)) { + /* only call local_clock() every 32 callbacks */ + if (likely((count & 31) || local_clock() < tlimit)) + continue; + /* Exceeded the time limit, so leave. */ + break; + } } else { local_bh_enable(); lockdep_assert_irqs_enabled(); @@ -2535,18 +2546,6 @@ static void rcu_do_batch(struct rcu_data *rdp) lockdep_assert_irqs_enabled(); local_bh_disable(); } - - /* - * Make sure we don't spend too much time here and deprive other - * softirq vectors of CPU cycles. - */ - if (unlikely(tlimit)) { - /* only call local_clock() every 32 callbacks */ - if (likely((count & 31) || local_clock() < tlimit)) - continue; - /* Exceeded the time limit, so leave. */ - break; - } } rcu_nocb_lock_irqsave(rdp, flags); -- cgit v1.2.3-59-g8ed1b From 0598a4d4429c0a952ac0e99e5280354cf4ccc01c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Oct 2021 02:08:16 +0200 Subject: rcu/nocb: Don't invoke local rcu core on callback overload from nocb kthread rcu_core() tries to ensure that its self-invocation in case of callbacks overload only happen in softirq/rcuc mode. Indeed it doesn't make sense to trigger local RCU core from nocb_cb kthread since it can execute on a CPU different from the target rdp. Also in case of overload, the nocb_cb kthread simply iterates a new loop of callbacks processing. However the "offloaded" check that aims at preventing misplaced rcu_core() invocations is wrong. First of all that state is volatile and second: softirq/rcuc can execute while the target rdp is offloaded. As a result rcu_core() can be invoked on the wrong CPU while in the process of (de-)offloading. Fix that with moving the rcu_core() self-invocation to rcu_core() itself, irrespective of the rdp offloaded state. Tested-by: Valentin Schneider Tested-by: Sebastian Andrzej Siewior Signed-off-by: Frederic Weisbecker Cc: Valentin Schneider Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Josh Triplett Cc: Joel Fernandes Cc: Boqun Feng Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1b31fad9f1a0..03c038f324a0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2460,7 +2460,6 @@ static void rcu_do_batch(struct rcu_data *rdp) int div; bool __maybe_unused empty; unsigned long flags; - const bool offloaded = rcu_rdp_is_offloaded(rdp); struct rcu_head *rhp; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); long bl, count = 0; @@ -2582,9 +2581,6 @@ static void rcu_do_batch(struct rcu_data *rdp) rcu_nocb_unlock_irqrestore(rdp, flags); - /* Re-invoke RCU core processing if there are callbacks remaining. */ - if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) - invoke_rcu_core(); tick_dep_clear_task(current, TICK_DEP_BIT_RCU); } @@ -2771,8 +2767,12 @@ static __latent_entropy void rcu_core(void) /* If there are callbacks ready, invoke them. */ if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && - likely(READ_ONCE(rcu_scheduler_fully_active))) + likely(READ_ONCE(rcu_scheduler_fully_active))) { rcu_do_batch(rdp); + /* Re-invoke RCU core processing if there are callbacks remaining. */ + if (rcu_segcblist_ready_cbs(&rdp->cblist)) + invoke_rcu_core(); + } /* Do any needed deferred wakeups of rcuo kthreads. */ do_nocb_deferred_wakeup(rdp); -- cgit v1.2.3-59-g8ed1b From cafafd67765b21334086b3fb8963ad9c5866c03d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 5 Nov 2021 21:52:00 -0700 Subject: rcu-tasks: Create per-CPU callback lists Currently, RCU Tasks Trace (as well as the other two flavors of RCU Tasks) use a single global callback list. This works well and is simple, but expected changes in workload will cause this list to become a bottleneck. This commit therefore creates per-CPU callback lists for the various flavors of RCU Tasks, but continues queueing on a single list, namely that of CPU 0. Later commits will dynamically vary the number of lists in use to accommodate dynamic changes in workload. Reported-by: Martin Lau Cc: Neeraj Upadhyay Tested-by: kernel test robot Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 106 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 78 insertions(+), 28 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index bd44cd4794d3..30048db7aa49 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -20,11 +20,21 @@ typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); typedef void (*postgp_func_t)(struct rcu_tasks *rtp); /** - * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. + * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. * @cbs_head: Head of callback list. * @cbs_tail: Tail pointer for callback list. + * @cbs_pcpu_lock: Lock protecting per-CPU callback list. + */ +struct rcu_tasks_percpu { + struct rcu_head *cbs_head; + struct rcu_head **cbs_tail; + raw_spinlock_t cbs_pcpu_lock; +}; + +/** + * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. * @cbs_wq: Wait queue allowing new callback to get kthread's attention. - * @cbs_lock: Lock protecting callback list. + * @cbs_gbl_lock: Lock protecting callback list. * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @gp_func: This flavor's grace-period-wait function. * @gp_state: Grace period's most recent state transition (debugging). @@ -41,14 +51,13 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @holdouts_func: This flavor's holdout-list scan function (optional). * @postgp_func: This flavor's post-grace-period function (optional). * @call_func: This flavor's call_rcu()-equivalent function. + * @rtpcpu: This flavor's rcu_tasks_percpu structure. * @name: This flavor's textual name. * @kname: This flavor's kthread name. */ struct rcu_tasks { - struct rcu_head *cbs_head; - struct rcu_head **cbs_tail; struct wait_queue_head cbs_wq; - raw_spinlock_t cbs_lock; + raw_spinlock_t cbs_gbl_lock; int gp_state; int gp_sleep; int init_fract; @@ -65,20 +74,24 @@ struct rcu_tasks { holdouts_func_t holdouts_func; postgp_func_t postgp_func; call_rcu_func_t call_func; + struct rcu_tasks_percpu __percpu *rtpcpu; char *name; char *kname; }; -#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ -static struct rcu_tasks rt_name = \ -{ \ - .cbs_tail = &rt_name.cbs_head, \ - .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \ - .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \ - .gp_func = gp, \ - .call_func = call, \ - .name = n, \ - .kname = #rt_name, \ +#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ +static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ + .cbs_pcpu_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ +}; \ +static struct rcu_tasks rt_name = \ +{ \ + .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \ + .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ + .gp_func = gp, \ + .call_func = call, \ + .rtpcpu = &rt_name ## __percpu, \ + .name = n, \ + .kname = #rt_name, \ } /* Track exiting tasks in order to allow them to be waited for. */ @@ -148,20 +161,51 @@ static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) } #endif /* #ifndef CONFIG_TINY_RCU */ +// Initialize per-CPU callback lists for the specified flavor of +// Tasks RCU. +static void cblist_init_generic(struct rcu_tasks *rtp) +{ + int cpu; + unsigned long flags; + + raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); + for_each_possible_cpu(cpu) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); + + WARN_ON_ONCE(!rtpcp); + if (cpu) + raw_spin_lock_init(&rtpcp->cbs_pcpu_lock); + raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled. + if (!WARN_ON_ONCE(rtpcp->cbs_tail)) + rtpcp->cbs_tail = &rtpcp->cbs_head; + raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. + } + raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); + +} + // Enqueue a callback for the specified flavor of Tasks RCU. static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, struct rcu_tasks *rtp) { unsigned long flags; bool needwake; + struct rcu_tasks_percpu *rtpcp; rhp->next = NULL; rhp->func = func; - raw_spin_lock_irqsave(&rtp->cbs_lock, flags); - needwake = !rtp->cbs_head; - WRITE_ONCE(*rtp->cbs_tail, rhp); - rtp->cbs_tail = &rhp->next; - raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); + local_irq_save(flags); + rtpcp = per_cpu_ptr(rtp->rtpcpu, 0 /* smp_processor_id() */); + raw_spin_lock(&rtpcp->cbs_pcpu_lock); + if (!rtpcp->cbs_tail) { + raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. + cblist_init_generic(rtp); + raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled. + } + needwake = !rtpcp->cbs_head; + WRITE_ONCE(*rtpcp->cbs_tail, rhp); + rtpcp->cbs_tail = &rhp->next; + raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr)) wake_up(&rtp->cbs_wq); @@ -197,21 +241,23 @@ static int __noreturn rcu_tasks_kthread(void *arg) * This loop is terminated by the system going down. ;-) */ for (;;) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each... + set_tasks_gp_state(rtp, RTGS_WAIT_CBS); /* Pick up any new callbacks. */ - raw_spin_lock_irqsave(&rtp->cbs_lock, flags); + raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); smp_mb__after_spinlock(); // Order updates vs. GP. - list = rtp->cbs_head; - rtp->cbs_head = NULL; - rtp->cbs_tail = &rtp->cbs_head; - raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); + list = rtpcp->cbs_head; + rtpcp->cbs_head = NULL; + rtpcp->cbs_tail = &rtpcp->cbs_head; + raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); /* If there were none, wait a bit and start over. */ if (!list) { wait_event_interruptible(rtp->cbs_wq, - READ_ONCE(rtp->cbs_head)); - if (!rtp->cbs_head) { + READ_ONCE(rtpcp->cbs_head)); + if (!rtpcp->cbs_head) { WARN_ON(signal_pending(current)); set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS); schedule_timeout_idle(HZ/10); @@ -279,6 +325,7 @@ static void __init rcu_tasks_bootup_oddness(void) /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each... pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n", rtp->kname, tasks_gp_state_getname(rtp), data_race(rtp->gp_state), @@ -286,7 +333,7 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) data_race(rtp->n_gps), data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), ".k"[!!data_race(rtp->kthread_ptr)], - ".C"[!!data_race(rtp->cbs_head)], + ".C"[!!data_race(rtpcp->cbs_head)], s); } #endif // #ifndef CONFIG_TINY_RCU @@ -593,6 +640,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks); static int __init rcu_spawn_tasks_kthread(void) { + cblist_init_generic(&rcu_tasks); rcu_tasks.gp_sleep = HZ / 10; rcu_tasks.init_fract = HZ / 10; rcu_tasks.pregp_func = rcu_tasks_pregp_step; @@ -731,6 +779,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); static int __init rcu_spawn_tasks_rude_kthread(void) { + cblist_init_generic(&rcu_tasks_rude); rcu_tasks_rude.gp_sleep = HZ / 10; rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); return 0; @@ -1264,6 +1313,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); static int __init rcu_spawn_tasks_trace_kthread(void) { + cblist_init_generic(&rcu_tasks_trace); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { rcu_tasks_trace.gp_sleep = HZ / 10; rcu_tasks_trace.init_fract = HZ / 10; -- cgit v1.2.3-59-g8ed1b From 7a30871b6a27de1a1f418c7fd2c5dde9a46bfd16 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 8 Nov 2021 10:51:13 -0800 Subject: rcu-tasks: Introduce ->percpu_enqueue_shift for dynamic queue selection This commit introduces a ->percpu_enqueue_shift field to the rcu_tasks structure, and uses it to shift down the CPU number in order to select a rcu_tasks_percpu structure. This field is currently set to a sufficiently large shift count to always select the CPU-0 instance of the rcu_tasks_percpu structure, and later commits will adjust this. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 30048db7aa49..2a5fe3e04b36 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -52,6 +52,7 @@ struct rcu_tasks_percpu { * @postgp_func: This flavor's post-grace-period function (optional). * @call_func: This flavor's call_rcu()-equivalent function. * @rtpcpu: This flavor's rcu_tasks_percpu structure. + * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. * @name: This flavor's textual name. * @kname: This flavor's kthread name. */ @@ -75,6 +76,7 @@ struct rcu_tasks { postgp_func_t postgp_func; call_rcu_func_t call_func; struct rcu_tasks_percpu __percpu *rtpcpu; + int percpu_enqueue_shift; char *name; char *kname; }; @@ -91,6 +93,7 @@ static struct rcu_tasks rt_name = \ .call_func = call, \ .rtpcpu = &rt_name ## __percpu, \ .name = n, \ + .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \ .kname = #rt_name, \ } @@ -169,6 +172,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp) unsigned long flags; raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); + rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids); for_each_possible_cpu(cpu) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); @@ -195,7 +199,8 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, rhp->next = NULL; rhp->func = func; local_irq_save(flags); - rtpcp = per_cpu_ptr(rtp->rtpcpu, 0 /* smp_processor_id() */); + rtpcp = per_cpu_ptr(rtp->rtpcpu, + smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); raw_spin_lock(&rtpcp->cbs_pcpu_lock); if (!rtpcp->cbs_tail) { raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. -- cgit v1.2.3-59-g8ed1b From b14fb4fbbcd8ff62f1a7aa2c6e2603424c117943 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 8 Nov 2021 14:14:43 -0800 Subject: rcu-tasks: Convert grace-period counter to grace-period sequence number This commit moves the rcu_tasks structure's ->n_gps grace-period-counter field to a ->task_gp_seq grce-period sequence number in order to enable use of the rcu_segcblist structure for the callback lists. This in turn permits CPUs to lag behind the RCU Tasks grace-period sequence number without suffering long-term slowdowns in callback invocation. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 2a5fe3e04b36..ed84d59b6dbf 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -42,7 +42,7 @@ struct rcu_tasks_percpu { * @init_fract: Initial backoff sleep interval. * @gp_jiffies: Time of last @gp_state transition. * @gp_start: Most recent grace-period start in jiffies. - * @n_gps: Number of grace periods completed since boot. + * @tasks_gp_seq: Number of grace periods completed since boot. * @n_ipis: Number of IPIs sent to encourage grace periods to end. * @n_ipis_fails: Number of IPI-send failures. * @pregp_func: This flavor's pre-grace-period function (optional). @@ -64,7 +64,7 @@ struct rcu_tasks { int init_fract; unsigned long gp_jiffies; unsigned long gp_start; - unsigned long n_gps; + unsigned long tasks_gp_seq; unsigned long n_ipis; unsigned long n_ipis_fails; struct task_struct *kthread_ptr; @@ -273,8 +273,9 @@ static int __noreturn rcu_tasks_kthread(void *arg) // Wait for one grace period. set_tasks_gp_state(rtp, RTGS_WAIT_GP); rtp->gp_start = jiffies; + rcu_seq_start(&rtp->tasks_gp_seq); rtp->gp_func(rtp); - rtp->n_gps++; + rcu_seq_end(&rtp->tasks_gp_seq); /* Invoke the callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); @@ -335,7 +336,7 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) rtp->kname, tasks_gp_state_getname(rtp), data_race(rtp->gp_state), jiffies - data_race(rtp->gp_jiffies), - data_race(rtp->n_gps), + data_race(rcu_seq_current(&rtp->tasks_gp_seq)), data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), ".k"[!!data_race(rtp->kthread_ptr)], ".C"[!!data_race(rtpcp->cbs_head)], -- cgit v1.2.3-59-g8ed1b From 9b073de1c7a354af7cb7100952599dde461aee45 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 8 Nov 2021 16:18:57 -0800 Subject: rcu_tasks: Convert bespoke callback list to rcu_segcblist structure This commit moves from a bespoke head and tail pointer in the rcu_tasks_percpu structure to an rcu_segcblist structure, thus allowing associating the grace-period sequence number with groups of callbacks. This in turn will allow callbacks to be invoked independently on different CPUs. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/Kconfig | 2 +- kernel/rcu/tasks.h | 52 +++++++++++++++++++++++++++++----------------------- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 3128b7cf8e1f..42bcd34312c2 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -112,7 +112,7 @@ config RCU_STALL_COMMON making these warnings mandatory for the tree variants. config RCU_NEED_SEGCBLIST - def_bool ( TREE_RCU || TREE_SRCU ) + def_bool ( TREE_RCU || TREE_SRCU || TASKS_RCU_GENERIC ) config RCU_FANOUT int "Tree-based hierarchical RCU fanout value" diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index ed84d59b6dbf..2e58d7fa2da4 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -6,6 +6,7 @@ */ #ifdef CONFIG_TASKS_RCU_GENERIC +#include "rcu_segcblist.h" //////////////////////////////////////////////////////////////////////// // @@ -21,13 +22,11 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); /** * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. - * @cbs_head: Head of callback list. - * @cbs_tail: Tail pointer for callback list. + * @cblist: Callback list. * @cbs_pcpu_lock: Lock protecting per-CPU callback list. */ struct rcu_tasks_percpu { - struct rcu_head *cbs_head; - struct rcu_head **cbs_tail; + struct rcu_segcblist cblist; raw_spinlock_t cbs_pcpu_lock; }; @@ -180,8 +179,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp) if (cpu) raw_spin_lock_init(&rtpcp->cbs_pcpu_lock); raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled. - if (!WARN_ON_ONCE(rtpcp->cbs_tail)) - rtpcp->cbs_tail = &rtpcp->cbs_head; + if (rcu_segcblist_empty(&rtpcp->cblist)) + rcu_segcblist_init(&rtpcp->cblist); raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); @@ -202,14 +201,13 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, rtpcp = per_cpu_ptr(rtp->rtpcpu, smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); raw_spin_lock(&rtpcp->cbs_pcpu_lock); - if (!rtpcp->cbs_tail) { + if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. cblist_init_generic(rtp); raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled. } - needwake = !rtpcp->cbs_head; - WRITE_ONCE(*rtpcp->cbs_tail, rhp); - rtpcp->cbs_tail = &rhp->next; + needwake = rcu_segcblist_empty(&rtpcp->cblist); + rcu_segcblist_enqueue(&rtpcp->cblist, rhp); raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr)) @@ -231,8 +229,9 @@ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) static int __noreturn rcu_tasks_kthread(void *arg) { unsigned long flags; - struct rcu_head *list; - struct rcu_head *next; + int len; + struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); + struct rcu_head *rhp; struct rcu_tasks *rtp = arg; /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ @@ -253,16 +252,15 @@ static int __noreturn rcu_tasks_kthread(void *arg) /* Pick up any new callbacks. */ raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); smp_mb__after_spinlock(); // Order updates vs. GP. - list = rtpcp->cbs_head; - rtpcp->cbs_head = NULL; - rtpcp->cbs_tail = &rtpcp->cbs_head; + rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); + (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); /* If there were none, wait a bit and start over. */ - if (!list) { + if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { wait_event_interruptible(rtp->cbs_wq, - READ_ONCE(rtpcp->cbs_head)); - if (!rtpcp->cbs_head) { + rcu_segcblist_pend_cbs(&rtpcp->cblist)); + if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { WARN_ON(signal_pending(current)); set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS); schedule_timeout_idle(HZ/10); @@ -279,14 +277,22 @@ static int __noreturn rcu_tasks_kthread(void *arg) /* Invoke the callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); - while (list) { - next = list->next; + raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); + smp_mb__after_spinlock(); // Order updates vs. GP. + rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); + rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); + raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); + len = rcl.len; + for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { local_bh_disable(); - list->func(list); + rhp->func(rhp); local_bh_enable(); - list = next; cond_resched(); } + raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); + rcu_segcblist_add_len(&rtpcp->cblist, -len); + (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); + raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); /* Paranoid sleep to keep this from entering a tight loop */ schedule_timeout_idle(rtp->gp_sleep); } @@ -339,7 +345,7 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) data_race(rcu_seq_current(&rtp->tasks_gp_seq)), data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), ".k"[!!data_race(rtp->kthread_ptr)], - ".C"[!!data_race(rtpcp->cbs_head)], + ".C"[!data_race(rcu_segcblist_empty(&rtpcp->cblist))], s); } #endif // #ifndef CONFIG_TINY_RCU -- cgit v1.2.3-59-g8ed1b From 4feeb9d5f82229bf39621be755bda81521539476 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Mon, 25 Oct 2021 11:26:58 +0800 Subject: refscale: Always log the error message An OOM is a serious error that should be logged even in non-verbose runs. This commit therefore adds an unconditional SCALEOUT_ERRSTRING() macro and uses it instead of VERBOSE_SCALEOUT_ERRSTRING() when reporting an OOM. [ paulmck: Drop do-while from SCALEOUT_ERRSTRING() due to only single statement. ] Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index d8ed1ca3adc0..c47685634640 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -58,8 +58,7 @@ do { \ } \ } while (0) -#define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \ - do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0) +#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Joel Fernandes (Google) "); @@ -651,7 +650,7 @@ static int main_func(void *arg) result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL); buf = kzalloc(800 + 64, GFP_KERNEL); if (!result_avg || !buf) { - VERBOSE_SCALEOUT_ERRSTRING("out of memory"); + SCALEOUT_ERRSTRING("out of memory"); goto oom_exit; } if (holdoff) @@ -837,7 +836,7 @@ ref_scale_init(void) reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]), GFP_KERNEL); if (!reader_tasks) { - VERBOSE_SCALEOUT_ERRSTRING("out of memory"); + SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } -- cgit v1.2.3-59-g8ed1b From f71f22b67d37c91a5c4320f6e821f64eb189627d Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Fri, 29 Oct 2021 17:40:24 +0800 Subject: refscale: Add missing '\n' to flush message Add '\n' to macros to flush message for each call. Acked-by: Davidlohr Bueso Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index c47685634640..5489ff7f478e 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -44,7 +44,10 @@ pr_alert("%s" SCALE_FLAG s, scale_type, ## x) #define VERBOSE_SCALEOUT(s, x...) \ - do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0) + do { \ + if (verbose) \ + pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ + } while (0) static atomic_t verbose_batch_ctr; @@ -54,11 +57,11 @@ do { \ (verbose_batched <= 0 || \ !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \ schedule_timeout_uninterruptible(1); \ - pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \ + pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ } \ } while (0) -#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x) +#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Joel Fernandes (Google) "); @@ -841,7 +844,7 @@ ref_scale_init(void) goto unwind; } - VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders); + VERBOSE_SCALEOUT("Starting %d reader threads", nreaders); for (i = 0; i < nreaders; i++) { firsterr = torture_create_kthread(ref_scale_reader, (void *)i, -- cgit v1.2.3-59-g8ed1b From 71f6ea2a0be06f9d1833852ee4f501a76563acd3 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Fri, 29 Oct 2021 17:40:25 +0800 Subject: scftorture: Add missing '\n' to flush message Add '\n' to macros to flush message for each call. Acked-by: Davidlohr Bueso Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/scftorture.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 5d42f44e3e1a..9cff573b7eb4 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -42,10 +42,10 @@ pr_alert(SCFTORT_FLAG s, ## x) #define VERBOSE_SCFTORTOUT(s, x...) \ - do { if (verbose) pr_alert(SCFTORT_FLAG s, ## x); } while (0) + do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0) #define VERBOSE_SCFTORTOUT_ERRSTRING(s, x...) \ - do { if (verbose) pr_alert(SCFTORT_FLAG "!!! " s, ## x); } while (0) + do { if (verbose) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x); } while (0) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney "); @@ -630,7 +630,7 @@ static int __init scf_torture_init(void) goto unwind; } - VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads\n", nthreads); + VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads); atomic_set(&n_started, nthreads); for (i = 0; i < nthreads; i++) { -- cgit v1.2.3-59-g8ed1b From 04cf8518860167081ee49b5ff2e1616244ab760e Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Fri, 29 Oct 2021 17:40:26 +0800 Subject: scftorture: Remove unused SCFTORTOUT There are no longer any users of SCFTORTOUT(), so this commit removes it. Acked-by: Davidlohr Bueso Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/scftorture.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 9cff573b7eb4..a0df767897a1 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -38,9 +38,6 @@ #define SCFTORT_STRING "scftorture" #define SCFTORT_FLAG SCFTORT_STRING ": " -#define SCFTORTOUT(s, x...) \ - pr_alert(SCFTORT_FLAG s, ## x) - #define VERBOSE_SCFTORTOUT(s, x...) \ do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0) -- cgit v1.2.3-59-g8ed1b From 86e7ed1bd57d020e35d430542bf5d689c3200568 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Fri, 29 Oct 2021 17:40:28 +0800 Subject: rcuscale: Always log error message Unconditionally log messages corresponding to errors. Acked-by: Davidlohr Bueso Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuscale.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 228f143bf935..5e4f1f83d38e 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -50,8 +50,8 @@ MODULE_AUTHOR("Paul E. McKenney "); pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s) #define VERBOSE_SCALEOUT_STRING(s) \ do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0) -#define VERBOSE_SCALEOUT_ERRSTRING(s) \ - do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s); } while (0) +#define SCALEOUT_ERRSTRING(s) \ + pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s) /* * The intended use cases for the nreaders and nwriters module parameters @@ -514,11 +514,11 @@ rcu_scale_cleanup(void) * during the mid-boot phase, so have to wait till the end. */ if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) - VERBOSE_SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!"); + SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!"); if (rcu_gp_is_normal() && gp_exp) - VERBOSE_SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!"); + SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!"); if (gp_exp && gp_async) - VERBOSE_SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!"); + SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!"); if (torture_cleanup_begin()) return; @@ -845,7 +845,7 @@ rcu_scale_init(void) reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), GFP_KERNEL); if (reader_tasks == NULL) { - VERBOSE_SCALEOUT_ERRSTRING("out of memory"); + SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } @@ -865,7 +865,7 @@ rcu_scale_init(void) kcalloc(nrealwriters, sizeof(*writer_n_durations), GFP_KERNEL); if (!writer_tasks || !writer_durations || !writer_n_durations) { - VERBOSE_SCALEOUT_ERRSTRING("out of memory"); + SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } -- cgit v1.2.3-59-g8ed1b From 809da9bf805094e1cf032fd8000e8b77d5eec642 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Wed, 3 Nov 2021 16:30:27 +0800 Subject: scftorture: Always log error message Unconditionally log messages corresponding to errors. Acked-by: Davidlohr Bueso Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- kernel/scftorture.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/scftorture.c b/kernel/scftorture.c index a0df767897a1..dcb0410950e4 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -41,8 +41,7 @@ #define VERBOSE_SCFTORTOUT(s, x...) \ do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0) -#define VERBOSE_SCFTORTOUT_ERRSTRING(s, x...) \ - do { if (verbose) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x); } while (0) +#define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney "); @@ -584,14 +583,14 @@ static int __init scf_torture_init(void) if (weight_resched1 == 0 && weight_single1 == 0 && weight_single_rpc1 == 0 && weight_single_wait1 == 0 && weight_many1 == 0 && weight_many_wait1 == 0 && weight_all1 == 0 && weight_all_wait1 == 0) { - VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense"); + SCFTORTOUT_ERRSTRING("all zero weights makes no sense"); firsterr = -EINVAL; goto unwind; } if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) scf_sel_add(weight_resched1, SCF_PRIM_RESCHED, false); else if (weight_resched1) - VERBOSE_SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored"); + SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored"); scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false); scf_sel_add(weight_single_rpc1, SCF_PRIM_SINGLE_RPC, true); scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true); @@ -622,7 +621,7 @@ static int __init scf_torture_init(void) nthreads = num_online_cpus(); scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL); if (!scf_stats_p) { - VERBOSE_SCFTORTOUT_ERRSTRING("out of memory"); + SCFTORTOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } -- cgit v1.2.3-59-g8ed1b From 81faa4f6fba429334ff72bb5ba7696818509b5b5 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Wed, 3 Nov 2021 16:30:28 +0800 Subject: locktorture,rcutorture,torture: Always log error message Unconditionally log messages corresponding to errors. Acked-by: Davidlohr Bueso Signed-off-by: Li Zhijian Signed-off-by: Paul E. McKenney --- include/linux/torture.h | 9 ++------- kernel/locking/locktorture.c | 4 ++-- kernel/rcu/rcutorture.c | 8 ++++---- kernel/torture.c | 4 ++-- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/include/linux/torture.h b/include/linux/torture.h index 24f58e50a94b..63fa4196e51c 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -38,13 +38,8 @@ do { \ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \ } \ } while (0) -#define VERBOSE_TOROUT_ERRSTRING(s) \ -do { \ - if (verbose) { \ - verbose_torout_sleep(); \ - pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); \ - } \ -} while (0) +#define TOROUT_ERRSTRING(s) \ + pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s) void verbose_torout_sleep(void); #define torture_init_error(firsterr) \ diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 397ac13d2ef7..9c2fb613a55d 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -1047,7 +1047,7 @@ static int __init lock_torture_init(void) sizeof(writer_tasks[0]), GFP_KERNEL); if (writer_tasks == NULL) { - VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); + TOROUT_ERRSTRING("writer_tasks: Out of memory"); firsterr = -ENOMEM; goto unwind; } @@ -1058,7 +1058,7 @@ static int __init lock_torture_init(void) sizeof(reader_tasks[0]), GFP_KERNEL); if (reader_tasks == NULL) { - VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); + TOROUT_ERRSTRING("reader_tasks: Out of memory"); kfree(writer_tasks); writer_tasks = NULL; firsterr = -ENOMEM; diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 503e14e62e8f..36a273589a35 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2774,7 +2774,7 @@ static int rcu_torture_read_exit(void *unused) &trs, "%s", "rcu_torture_read_exit_child"); if (IS_ERR(tsp)) { - VERBOSE_TOROUT_ERRSTRING("out of memory"); + TOROUT_ERRSTRING("out of memory"); errexit = true; tsp = NULL; break; @@ -3101,7 +3101,7 @@ rcu_torture_init(void) sizeof(fakewriter_tasks[0]), GFP_KERNEL); if (fakewriter_tasks == NULL) { - VERBOSE_TOROUT_ERRSTRING("out of memory"); + TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } @@ -3117,7 +3117,7 @@ rcu_torture_init(void) rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), GFP_KERNEL); if (!reader_tasks || !rcu_torture_reader_mbchk) { - VERBOSE_TOROUT_ERRSTRING("out of memory"); + TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } @@ -3136,7 +3136,7 @@ rcu_torture_init(void) if (nrealnocbers > 0) { nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); if (nocb_tasks == NULL) { - VERBOSE_TOROUT_ERRSTRING("out of memory"); + TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } diff --git a/kernel/torture.c b/kernel/torture.c index bb8f411c974b..ef27a6c82451 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -570,7 +570,7 @@ int torture_shuffle_init(long shuffint) shuffle_idle_cpu = -1; if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { - VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); + TOROUT_ERRSTRING("Failed to alloc mask"); return -ENOMEM; } @@ -934,7 +934,7 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, *tp = kthread_run(fn, arg, "%s", s); if (IS_ERR(*tp)) { ret = PTR_ERR(*tp); - VERBOSE_TOROUT_ERRSTRING(f); + TOROUT_ERRSTRING(f); *tp = NULL; } torture_shuffle_task_register(*tp); -- cgit v1.2.3-59-g8ed1b From 5ff7c9f9d7e3e0f6db5b81945fa11b69d62f433a Mon Sep 17 00:00:00 2001 From: Wander Lairson Costa Date: Wed, 10 Nov 2021 11:37:45 -0300 Subject: rcutorture: Avoid soft lockup during cpu stall If we use the module stall_cpu option, we may get a soft lockup warning in case we also don't pass the stall_cpu_block option. Introduce the stall_no_softlockup option to avoid a soft lockup on cpu stall even if we don't use the stall_cpu_block option. Signed-off-by: Wander Lairson Costa Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 36a273589a35..bc854f935548 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "rcu.h" @@ -112,6 +113,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); +torture_param(bool, stall_no_softlockup, false, + "Avoid softlockup warning during cpu stall."); torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); torture_param(int, stall_gp_kthread, 0, @@ -2085,6 +2088,8 @@ static int rcu_torture_stall(void *args) #else schedule_timeout_uninterruptible(HZ); #endif + } else if (stall_no_softlockup) { + touch_softlockup_watchdog(); } if (stall_cpu_irqsoff) local_irq_enable(); -- cgit v1.2.3-59-g8ed1b From 12e885433dbc2e8098cfb62c68f731b18981f3f6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 12 Nov 2021 08:16:42 -0800 Subject: rcutorture: Test RCU-tasks multiqueue callback queueing This commit modifies the TASKS01 scenario to use four callback queues and the TRACE01 scenario to use two queues, thus providing testing of multiple queues by default. Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot | 1 + tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot | 1 + 2 files changed, 2 insertions(+) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot index 22cdeced98ea..30ca5b493c4b 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot @@ -1,2 +1,3 @@ rcutorture.torture_type=tasks rcutree.use_softirq=0 +rcupdate.rcu_task_enqueue_lim=4 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot index 9675ad632dcc..ba6d636a4856 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot @@ -1 +1,2 @@ rcutorture.torture_type=tasks-tracing +rcupdate.rcu_task_enqueue_lim=2 -- cgit v1.2.3-59-g8ed1b From 82e310033d7c21a7a88427f14e0dad78d731a5cd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Nov 2021 20:55:18 -0800 Subject: rcutorture: Enable multiple concurrent callback-flood kthreads This commit converts the rcutorture.fwd_progress module parameter from bool to int, so that it specifies the number of callback-flood kthreads. Values less than zero specify one kthread per CPU, however, the number of kthreads executing concurrently is limited to the number of online CPUs. This commit also reverse the order of the need-resched and callback-flood operations to cause the callback flooding to happen more nearly at the same time. Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 6 +- kernel/rcu/rcutorture.c | 114 ++++++++++++++++++------ 2 files changed, 91 insertions(+), 29 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9725c546a0d4..ba60da85d1c8 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4603,8 +4603,12 @@ in seconds. rcutorture.fwd_progress= [KNL] - Enable RCU grace-period forward-progress testing + Specifies the number of kthreads to be used + for RCU grace-period forward-progress testing for the types of RCU supporting this notion. + Defaults to 1 kthread, values less than zero or + greater than the number of CPUs cause the number + of CPUs to be used. rcutorture.fwd_progress_div= [KNL] Specify the fraction of a CPU-stall-warning diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index bc854f935548..9390f867ba6c 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -79,7 +79,7 @@ torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); -torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); +torture_param(int, fwd_progress, 1, "Test grace-period forward progress"); torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); @@ -146,7 +146,7 @@ static struct task_struct *stats_task; static struct task_struct *fqs_task; static struct task_struct *boost_tasks[NR_CPUS]; static struct task_struct *stall_task; -static struct task_struct *fwd_prog_task; +static struct task_struct **fwd_prog_tasks; static struct task_struct **barrier_cbs_tasks; static struct task_struct *barrier_task; static struct task_struct *read_exit_task; @@ -2161,10 +2161,12 @@ struct rcu_fwd { unsigned long rcu_fwd_startat; struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; unsigned long rcu_launder_gp_seq_start; + int rcu_fwd_id; }; static DEFINE_MUTEX(rcu_fwd_mutex); static struct rcu_fwd *rcu_fwds; +static unsigned long rcu_fwd_seq; static bool rcu_fwd_emergency_stop; static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) @@ -2177,8 +2179,9 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) if (rfp->n_launders_hist[i].n_launders > 0) break; - pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", - __func__, jiffies - rfp->rcu_fwd_startat); + mutex_lock(&rcu_fwd_mutex); // Serialize histograms. + pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", + __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); gps_old = rfp->rcu_launder_gp_seq_start; for (j = 0; j <= i; j++) { gps = rfp->n_launders_hist[j].launder_gp_seq; @@ -2189,6 +2192,7 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) gps_old = gps; } pr_cont("\n"); + mutex_unlock(&rcu_fwd_mutex); } /* Callback function for continuous-flood RCU callbacks. */ @@ -2314,7 +2318,8 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, cver = READ_ONCE(rcu_torture_current_version) - cver; gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); WARN_ON(!cver && gps < 2); - pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); + pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, + rfp->rcu_fwd_id, dur, cver, gps); } if (selfpropcb) { WRITE_ONCE(fcs.stop, 1); @@ -2432,6 +2437,8 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) static int rcutorture_oom_notify(struct notifier_block *self, unsigned long notused, void *nfreed) { + int i; + long ncbs; struct rcu_fwd *rfp; mutex_lock(&rcu_fwd_mutex); @@ -2442,18 +2449,26 @@ static int rcutorture_oom_notify(struct notifier_block *self, } WARN(1, "%s invoked upon OOM during forward-progress testing.\n", __func__); - rcu_torture_fwd_cb_hist(rfp); - rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); + for (i = 0; i < fwd_progress; i++) { + rcu_torture_fwd_cb_hist(&rfp[i]); + rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); + } WRITE_ONCE(rcu_fwd_emergency_stop, true); smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ - pr_info("%s: Freed %lu RCU callbacks.\n", - __func__, rcu_torture_fwd_prog_cbfree(rfp)); + ncbs = 0; + for (i = 0; i < fwd_progress; i++) + ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); + pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); rcu_barrier(); - pr_info("%s: Freed %lu RCU callbacks.\n", - __func__, rcu_torture_fwd_prog_cbfree(rfp)); + ncbs = 0; + for (i = 0; i < fwd_progress; i++) + ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); + pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); rcu_barrier(); - pr_info("%s: Freed %lu RCU callbacks.\n", - __func__, rcu_torture_fwd_prog_cbfree(rfp)); + ncbs = 0; + for (i = 0; i < fwd_progress; i++) + ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); + pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); smp_mb(); /* Frees before return to avoid redoing OOM. */ (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ pr_info("%s returning after OOM processing.\n", __func__); @@ -2469,6 +2484,7 @@ static struct notifier_block rcutorture_oom_nb = { static int rcu_torture_fwd_prog(void *args) { int oldnice = task_nice(current); + unsigned long oldseq = READ_ONCE(rcu_fwd_seq); struct rcu_fwd *rfp = args; int tested = 0; int tested_tries = 0; @@ -2478,21 +2494,31 @@ static int rcu_torture_fwd_prog(void *args) if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) set_user_nice(current, MAX_NICE); do { - schedule_timeout_interruptible(fwd_progress_holdoff * HZ); - WRITE_ONCE(rcu_fwd_emergency_stop, false); + if (!rfp->rcu_fwd_id) { + schedule_timeout_interruptible(fwd_progress_holdoff * HZ); + WRITE_ONCE(rcu_fwd_emergency_stop, false); + WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); + } else { + while (READ_ONCE(rcu_fwd_seq) == oldseq) + schedule_timeout_interruptible(1); + oldseq = READ_ONCE(rcu_fwd_seq); + } + pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); + if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) + rcu_torture_fwd_prog_cr(rfp); if (!IS_ENABLED(CONFIG_TINY_RCU) || - rcu_inkernel_boot_has_ended()) + (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)) rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); - if (rcu_inkernel_boot_has_ended()) - rcu_torture_fwd_prog_cr(rfp); /* Avoid slow periods, better to test when busy. */ if (stutter_wait("rcu_torture_fwd_prog")) sched_set_normal(current, oldnice); } while (!torture_must_stop()); /* Short runs might not contain a valid forward-progress attempt. */ - WARN_ON(!tested && tested_tries >= 5); - pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); + if (!rfp->rcu_fwd_id) { + WARN_ON(!tested && tested_tries >= 5); + pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); + } torture_kthread_stopping("rcu_torture_fwd_prog"); return 0; } @@ -2500,17 +2526,27 @@ static int rcu_torture_fwd_prog(void *args) /* If forward-progress checking is requested and feasible, spawn the thread. */ static int __init rcu_torture_fwd_prog_init(void) { + int i; + int ret = 0; struct rcu_fwd *rfp; if (!fwd_progress) return 0; /* Not requested, so don't do it. */ + if (fwd_progress >= nr_cpu_ids) { + VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); + fwd_progress = nr_cpu_ids; + } else if (fwd_progress < 0) { + fwd_progress = nr_cpu_ids; + } if ((!cur_ops->sync && !cur_ops->call) || !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) { VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); + fwd_progress = 0; return 0; } if (stall_cpu > 0) { VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); + fwd_progress = 0; if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) return -EINVAL; /* In module, can fail back to user. */ WARN_ON(1); /* Make sure rcutorture notices conflict. */ @@ -2520,29 +2556,51 @@ static int __init rcu_torture_fwd_prog_init(void) fwd_progress_holdoff = 1; if (fwd_progress_div <= 0) fwd_progress_div = 4; - rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); - if (!rfp) + rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); + fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); + if (!rfp || !fwd_prog_tasks) { + kfree(rfp); + kfree(fwd_prog_tasks); + fwd_prog_tasks = NULL; + fwd_progress = 0; return -ENOMEM; - spin_lock_init(&rfp->rcu_fwd_lock); - rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; + } + for (i = 0; i < fwd_progress; i++) { + spin_lock_init(&rfp[i].rcu_fwd_lock); + rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; + rfp[i].rcu_fwd_id = i; + } mutex_lock(&rcu_fwd_mutex); rcu_fwds = rfp; mutex_unlock(&rcu_fwd_mutex); register_oom_notifier(&rcutorture_oom_nb); - return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); + for (i = 0; i < fwd_progress; i++) { + ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); + if (ret) { + fwd_progress = i; + return ret; + } + } + return 0; } static void rcu_torture_fwd_prog_cleanup(void) { + int i; struct rcu_fwd *rfp; - torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); - rfp = rcu_fwds; + if (!rcu_fwds || !fwd_prog_tasks) + return; + for (i = 0; i < fwd_progress; i++) + torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); + unregister_oom_notifier(&rcutorture_oom_nb); mutex_lock(&rcu_fwd_mutex); + rfp = rcu_fwds; rcu_fwds = NULL; mutex_unlock(&rcu_fwd_mutex); - unregister_oom_notifier(&rcutorture_oom_nb); kfree(rfp); + kfree(fwd_prog_tasks); + fwd_prog_tasks = NULL; } /* Callback function for RCU barrier testing. */ -- cgit v1.2.3-59-g8ed1b From 613b00fbe64461f1c73e035e07c22cf3de65740b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 23 Nov 2021 11:53:52 -0800 Subject: rcutorture: Add ability to limit callback-flood intensity The RCU tasks flavors of RCU now need concurrent callback flooding to test their ability to switch between single-queue mode and per-CPU queue mode, but their lack of heavy-duty forward-progress features rules out the use of rcutorture's current callback-flooding code. This commit therefore provides the ability to limit the intensity of the callback floods using a new ->cbflood_max field in the rcu_operations structure. When this field is zero, there is no limit, otherwise, each callback-flood kthread allocates at most ->cbflood_max callbacks. Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 9390f867ba6c..1e8360d15920 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -348,6 +348,7 @@ struct rcu_torture_ops { void (*gp_kthread_dbg)(void); bool (*check_boost_failed)(unsigned long gp_state, int *cpup); int (*stall_dur)(void); + long cbflood_max; int irq_capable; int can_boost; int extendables; @@ -841,6 +842,7 @@ static struct rcu_torture_ops tasks_rude_ops = { .call = call_rcu_tasks_rude, .cb_barrier = rcu_barrier_tasks_rude, .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, + .cbflood_max = 50000, .fqs = NULL, .stats = NULL, .irq_capable = 1, @@ -881,6 +883,7 @@ static struct rcu_torture_ops tasks_tracing_ops = { .call = call_rcu_tasks_trace, .cb_barrier = rcu_barrier_tasks_trace, .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, + .cbflood_max = 50000, .fqs = NULL, .stats = NULL, .irq_capable = 1, @@ -2387,7 +2390,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) rfp->rcu_fwd_cb_head = rfcpn; n_launders++; n_launders_sa++; - } else { + } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); if (WARN_ON_ONCE(!rfcp)) { schedule_timeout_interruptible(1); @@ -2397,8 +2400,11 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) n_launders_sa = 0; rfcp->rfc_gps = 0; rfcp->rfc_rfp = rfp; + } else { + rfcp = NULL; } - cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); + if (rfcp) + cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); if (tick_nohz_full_enabled()) { local_irq_save(flags); @@ -2506,8 +2512,10 @@ static int rcu_torture_fwd_prog(void *args) pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) rcu_torture_fwd_prog_cr(rfp); - if (!IS_ENABLED(CONFIG_TINY_RCU) || - (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)) + if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && + (!IS_ENABLED(CONFIG_TINY_RCU) || + (rcu_inkernel_boot_has_ended() && + torture_num_online_cpus() > rfp->rcu_fwd_id))) rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); /* Avoid slow periods, better to test when busy. */ @@ -2539,7 +2547,8 @@ static int __init rcu_torture_fwd_prog_init(void) fwd_progress = nr_cpu_ids; } if ((!cur_ops->sync && !cur_ops->call) || - !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) { + (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || + cur_ops == &rcu_busted_ops) { VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); fwd_progress = 0; return 0; -- cgit v1.2.3-59-g8ed1b From 53b541fbdb9c498db112216a0cc9fe4804f54742 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 23 Nov 2021 13:51:11 -0800 Subject: rcutorture: Combine n_max_cbs from all kthreads in a callback flood With the addition of multiple callback-flood kthreads, the maximum number of callbacks from any one of those kthreads is reported in the rcutorture run summary. This commit changes this to report the sum of each kthread's maximum number of callbacks in a given callback-flooding episode. Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 9 +++++++++ tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 1e8360d15920..33ea446101b3 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2170,6 +2170,7 @@ struct rcu_fwd { static DEFINE_MUTEX(rcu_fwd_mutex); static struct rcu_fwd *rcu_fwds; static unsigned long rcu_fwd_seq; +static atomic_long_t rcu_fwd_max_cbs; static bool rcu_fwd_emergency_stop; static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) @@ -2428,6 +2429,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) n_launders + n_max_cbs - n_launders_cb_snap, n_launders, n_launders_sa, n_max_gps, n_max_cbs, cver, gps); + atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); rcu_torture_fwd_cb_hist(rfp); } schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ @@ -2489,6 +2491,8 @@ static struct notifier_block rcutorture_oom_nb = { /* Carry out grace-period forward-progress testing. */ static int rcu_torture_fwd_prog(void *args) { + bool firsttime = true; + long max_cbs; int oldnice = task_nice(current); unsigned long oldseq = READ_ONCE(rcu_fwd_seq); struct rcu_fwd *rfp = args; @@ -2503,6 +2507,11 @@ static int rcu_torture_fwd_prog(void *args) if (!rfp->rcu_fwd_id) { schedule_timeout_interruptible(fwd_progress_holdoff * HZ); WRITE_ONCE(rcu_fwd_emergency_stop, false); + if (!firsttime) { + max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); + pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); + } + firsttime = false; WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); } else { while (READ_ONCE(rcu_fwd_seq) == oldseq) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh index fbdf162b6acd..1c4c2c727dad 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh @@ -25,7 +25,7 @@ stopstate="`grep 'End-test grace-period state: g' $i/console.log 2> /dev/null | tail -1 | sed -e 's/^\[[ 0-9.]*] //' | awk '{ print \"[\" $1 \" \" $5 \" \" $6 \" \" $7 \"]\"; }' | tr -d '\012\015'`" -fwdprog="`grep 'rcu_torture_fwd_prog_cr Duration' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k15nr | head -1 | awk '{ print $14 " " $15 }'`" +fwdprog="`grep 'rcu_torture_fwd_prog n_max_cbs: ' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k3nr | head -1 | awk '{ print $2 " " $3 }'`" if test -z "$ngps" then echo "$configfile ------- " $stopstate -- cgit v1.2.3-59-g8ed1b From 381a4f3b38603aab47e5500609d5ec733b5d0ecb Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 8 Nov 2021 16:52:02 -0800 Subject: rcu-tasks: Use spin_lock_rcu_node() and friends This commit renames the rcu_tasks_percpu structure's ->cbs_pcpu_lock to ->lock and then uses spin_lock_rcu_node() and friends to acquire and release this lock, preparing for upcoming commits that will spread the grace-period process across multiple CPUs and kthreads. [ paulmck: Apply feedback from kernel test robot. ] Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 2e58d7fa2da4..8cf325146fb0 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -23,11 +23,11 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); /** * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. * @cblist: Callback list. - * @cbs_pcpu_lock: Lock protecting per-CPU callback list. + * @lock: Lock protecting per-CPU callback list. */ struct rcu_tasks_percpu { struct rcu_segcblist cblist; - raw_spinlock_t cbs_pcpu_lock; + raw_spinlock_t __private lock; }; /** @@ -82,7 +82,7 @@ struct rcu_tasks { #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ - .cbs_pcpu_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ }; \ static struct rcu_tasks rt_name = \ { \ @@ -177,11 +177,11 @@ static void cblist_init_generic(struct rcu_tasks *rtp) WARN_ON_ONCE(!rtpcp); if (cpu) - raw_spin_lock_init(&rtpcp->cbs_pcpu_lock); - raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled. + raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); + raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. if (rcu_segcblist_empty(&rtpcp->cblist)) rcu_segcblist_init(&rtpcp->cblist); - raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. + raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); @@ -200,15 +200,15 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, local_irq_save(flags); rtpcp = per_cpu_ptr(rtp->rtpcpu, smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); - raw_spin_lock(&rtpcp->cbs_pcpu_lock); + raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { - raw_spin_unlock(&rtpcp->cbs_pcpu_lock); // irqs remain disabled. + raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. cblist_init_generic(rtp); - raw_spin_lock(&rtpcp->cbs_pcpu_lock); // irqs already disabled. + raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. } needwake = rcu_segcblist_empty(&rtpcp->cblist); rcu_segcblist_enqueue(&rtpcp->cblist, rhp); - raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr)) wake_up(&rtp->cbs_wq); @@ -250,11 +250,10 @@ static int __noreturn rcu_tasks_kthread(void *arg) set_tasks_gp_state(rtp, RTGS_WAIT_CBS); /* Pick up any new callbacks. */ - raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); - smp_mb__after_spinlock(); // Order updates vs. GP. + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); /* If there were none, wait a bit and start over. */ if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { @@ -277,11 +276,10 @@ static int __noreturn rcu_tasks_kthread(void *arg) /* Invoke the callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); - raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); - smp_mb__after_spinlock(); // Order updates vs. GP. + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); - raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); len = rcl.len; for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { local_bh_disable(); @@ -289,10 +287,10 @@ static int __noreturn rcu_tasks_kthread(void *arg) local_bh_enable(); cond_resched(); } - raw_spin_lock_irqsave(&rtpcp->cbs_pcpu_lock, flags); + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); rcu_segcblist_add_len(&rtpcp->cblist, -len); (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - raw_spin_unlock_irqrestore(&rtpcp->cbs_pcpu_lock, flags); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); /* Paranoid sleep to keep this from entering a tight loop */ schedule_timeout_idle(rtp->gp_sleep); } @@ -470,10 +468,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU // read-side critical sections waited for by rcu_tasks_postscan(). // -// Pre-grace-period update-side code is ordered before the grace via the -// ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side -// code is ordered before the grace period via synchronize_rcu() call -// in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt +// Pre-grace-period update-side code is ordered before the grace +// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code +// is ordered before the grace period via synchronize_rcu() call in +// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt // disabling. /* Pre-grace-period preparation. */ -- cgit v1.2.3-59-g8ed1b From 65b629e70489b810a108fe1155da4e41a5010534 Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Tue, 9 Nov 2021 16:52:14 +0530 Subject: rcu-tasks: Inspect stalled task's trc state in locked state On RCU tasks trace stall, inspect the RCU-tasks-trace specific states of stalled task in locked down state, using try_invoke_ on_locked_down_task(), to get reliable trc state of a non-running stalled task. This was tested using the following command: tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 8 --configs TRACE01 \ --bootargs "rcutorture.torture_type=tasks-tracing rcutorture.stall_cpu=10 \ rcutorture.stall_cpu_block=1 rcupdate.rcu_task_stall_timeout=100" --trust-make As expected, this produced the following console output for running and sleeping tasks. [ 21.520291] INFO: rcu_tasks_trace detected stalls on tasks: [ 21.521292] P85: ... nesting: 1N cpu: 2 [ 21.521966] task:rcu_torture_sta state:D stack:15080 pid: 85 ppid: 2 flags:0x00004000 [ 21.523384] Call Trace: [ 21.523808] __schedule+0x273/0x6e0 [ 21.524428] schedule+0x35/0xa0 [ 21.524971] schedule_timeout+0x1ed/0x270 [ 21.525690] ? del_timer_sync+0x30/0x30 [ 21.526371] ? rcu_torture_writer+0x720/0x720 [ 21.527106] rcu_torture_stall+0x24a/0x270 [ 21.527816] kthread+0x115/0x140 [ 21.528401] ? set_kthread_struct+0x40/0x40 [ 21.529136] ret_from_fork+0x22/0x30 [ 21.529766] 1 holdouts [ 21.632300] INFO: rcu_tasks_trace detected stalls on tasks: [ 21.632345] rcu_torture_stall end. [ 21.633293] P85: . [ 21.633294] task:rcu_torture_sta state:R running task stack:15080 pid: 85 ppid: 2 flags:0x00004000 [ 21.633299] Call Trace: [ 21.633301] ? vprintk_emit+0xab/0x180 [ 21.633306] ? vprintk_emit+0x11a/0x180 [ 21.633308] ? _printk+0x4d/0x69 [ 21.633311] ? __default_send_IPI_shortcut+0x1f/0x40 [ paulmck: Update to new v5.16 task_call_func() name. ] Signed-off-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 8cf325146fb0..c11f4e646bf1 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1132,25 +1132,50 @@ static void rcu_tasks_trace_postscan(struct list_head *hop) // Any tasks that exit after this point will set ->trc_reader_checked. } +/* Communicate task state back to the RCU tasks trace stall warning request. */ +struct trc_stall_chk_rdr { + int nesting; + int ipi_to_cpu; + u8 needqs; +}; + +static int trc_check_slow_task(struct task_struct *t, void *arg) +{ + struct trc_stall_chk_rdr *trc_rdrp = arg; + + if (task_curr(t)) + return false; // It is running, so decline to inspect it. + trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); + trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); + trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs); + return true; +} + /* Show the state of a task stalling the current RCU tasks trace GP. */ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) { int cpu; + struct trc_stall_chk_rdr trc_rdr; + bool is_idle_tsk = is_idle_task(t); if (*firstreport) { pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); *firstreport = false; } - // FIXME: This should attempt to use try_invoke_on_nonrunning_task(). cpu = task_cpu(t); - pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", - t->pid, - ".I"[READ_ONCE(t->trc_ipi_to_cpu) >= 0], - ".i"[is_idle_task(t)], - ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], - READ_ONCE(t->trc_reader_nesting), - " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)], - cpu); + if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) + pr_alert("P%d: %c\n", + t->pid, + ".i"[is_idle_tsk]); + else + pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", + t->pid, + ".I"[trc_rdr.ipi_to_cpu >= 0], + ".i"[is_idle_tsk], + ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], + trc_rdr.nesting, + " N"[!!trc_rdr.needqs], + cpu); sched_show_task(t); } -- cgit v1.2.3-59-g8ed1b From 8dd593fddd630e7dcbe79e98ff5e0d0561e9ff27 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 9 Nov 2021 11:11:32 -0800 Subject: rcu-tasks: Add a ->percpu_enqueue_lim to the rcu_tasks structure This commit adds a ->percpu_enqueue_lim field to the rcu_tasks structure. This field contains two to the power of the ->percpu_enqueue_shift field, easing construction of iterators over the per-CPU queues that might contain RCU Tasks callbacks. Such iterators will be introduced in later commits. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index c11f4e646bf1..608af87a5065 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -52,6 +52,7 @@ struct rcu_tasks_percpu { * @call_func: This flavor's call_rcu()-equivalent function. * @rtpcpu: This flavor's rcu_tasks_percpu structure. * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. + * @percpu_enqueue_lim: Number of per-CPU callback queues in use. * @name: This flavor's textual name. * @kname: This flavor's kthread name. */ @@ -76,6 +77,7 @@ struct rcu_tasks { call_rcu_func_t call_func; struct rcu_tasks_percpu __percpu *rtpcpu; int percpu_enqueue_shift; + int percpu_enqueue_lim; char *name; char *kname; }; @@ -93,6 +95,7 @@ static struct rcu_tasks rt_name = \ .rtpcpu = &rt_name ## __percpu, \ .name = n, \ .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \ + .percpu_enqueue_lim = 1, \ .kname = #rt_name, \ } @@ -172,6 +175,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp) raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids); + rtp->percpu_enqueue_lim = 1; for_each_possible_cpu(cpu) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); -- cgit v1.2.3-59-g8ed1b From 4d1114c05467b5f421d99121bff22a9633390722 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 9 Nov 2021 13:37:34 -0800 Subject: rcu-tasks: Abstract checking of callback lists This commit adds a rcu_tasks_need_gpcb() function that returns an indication of whether another grace period is required, and if no grace period is required, whether there are callbacks that need to be invoked. The function scans all per-CPU lists currently in use. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 61 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 608af87a5065..e63987a5af56 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -229,11 +229,38 @@ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) wait_rcu_gp(rtp->call_func); } +// Advance callbacks and indicate whether either a grace period or +// callback invocation is needed. +static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) +{ + int cpu; + unsigned long flags; + int needgpcb = 0; + + for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); + + /* Advance and accelerate any new callbacks. */ + if (rcu_segcblist_empty(&rtpcp->cblist)) + continue; + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); + (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); + if (rcu_segcblist_pend_cbs(&rtpcp->cblist)) + needgpcb |= 0x3; + if (!rcu_segcblist_empty(&rtpcp->cblist)) + needgpcb |= 0x1; + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + } + return needgpcb; +} + /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ static int __noreturn rcu_tasks_kthread(void *arg) { unsigned long flags; int len; + int needgpcb; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); struct rcu_head *rhp; struct rcu_tasks *rtp = arg; @@ -249,37 +276,25 @@ static int __noreturn rcu_tasks_kthread(void *arg) * This loop is terminated by the system going down. ;-) */ for (;;) { - struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each... + struct rcu_tasks_percpu *rtpcp; set_tasks_gp_state(rtp, RTGS_WAIT_CBS); - /* Pick up any new callbacks. */ - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); - (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - /* If there were none, wait a bit and start over. */ - if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { - wait_event_interruptible(rtp->cbs_wq, - rcu_segcblist_pend_cbs(&rtpcp->cblist)); - if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { - WARN_ON(signal_pending(current)); - set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS); - schedule_timeout_idle(HZ/10); - } - continue; + wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp))); + + if (needgpcb & 0x2) { + // Wait for one grace period. + set_tasks_gp_state(rtp, RTGS_WAIT_GP); + rtp->gp_start = jiffies; + rcu_seq_start(&rtp->tasks_gp_seq); + rtp->gp_func(rtp); + rcu_seq_end(&rtp->tasks_gp_seq); } - // Wait for one grace period. - set_tasks_gp_state(rtp, RTGS_WAIT_GP); - rtp->gp_start = jiffies; - rcu_seq_start(&rtp->tasks_gp_seq); - rtp->gp_func(rtp); - rcu_seq_end(&rtp->tasks_gp_seq); - /* Invoke the callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); + rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); raw_spin_lock_irqsave_rcu_node(rtpcp, flags); rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); -- cgit v1.2.3-59-g8ed1b From 57881863ad15fbccbfa637b5e4b67cd3a4520643 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 9 Nov 2021 15:34:56 -0800 Subject: rcu-tasks: Abstract invocations of callbacks This commit adds a rcu_tasks_invoke_cbs() function that invokes all ready callbacks on all of the per-CPU lists that are currently in use. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 56 +++++++++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index e63987a5af56..6b3406a47720 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -255,14 +255,42 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) return needgpcb; } -/* RCU-tasks kthread that detects grace periods and invokes callbacks. */ -static int __noreturn rcu_tasks_kthread(void *arg) +// Advance callbacks and invoke any that are ready. +static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp) { + int cpu; unsigned long flags; int len; - int needgpcb; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); struct rcu_head *rhp; + + for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); + + if (rcu_segcblist_empty(&rtpcp->cblist)) + continue; + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); + rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + len = rcl.len; + for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); + cond_resched(); + } + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_add_len(&rtpcp->cblist, -len); + (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + } +} + +/* RCU-tasks kthread that detects grace periods and invokes callbacks. */ +static int __noreturn rcu_tasks_kthread(void *arg) +{ + int needgpcb; struct rcu_tasks *rtp = arg; /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ @@ -276,8 +304,6 @@ static int __noreturn rcu_tasks_kthread(void *arg) * This loop is terminated by the system going down. ;-) */ for (;;) { - struct rcu_tasks_percpu *rtpcp; - set_tasks_gp_state(rtp, RTGS_WAIT_CBS); /* If there were none, wait a bit and start over. */ @@ -292,24 +318,10 @@ static int __noreturn rcu_tasks_kthread(void *arg) rcu_seq_end(&rtp->tasks_gp_seq); } - /* Invoke the callbacks. */ + /* Invoke callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); - rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); - rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - len = rcl.len; - for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { - local_bh_disable(); - rhp->func(rhp); - local_bh_enable(); - cond_resched(); - } - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_add_len(&rtpcp->cblist, -len); - (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + rcu_tasks_invoke_cbs(rtp); + /* Paranoid sleep to keep this from entering a tight loop */ schedule_timeout_idle(rtp->gp_sleep); } -- cgit v1.2.3-59-g8ed1b From d363f833c6d88331ff013ff0970a96caa8b84653 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 10 Nov 2021 15:56:40 -0800 Subject: rcu-tasks: Use workqueues for multiple rcu_tasks_invoke_cbs() invocations If there is a flood of callbacks, it is necessary to put multiple CPUs to work invoking those callbacks. This commit therefore uses a workqueue-flooding approach to parallelize RCU Tasks callback execution. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 75 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 6b3406a47720..97bd7a667e94 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -24,10 +24,14 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. * @cblist: Callback list. * @lock: Lock protecting per-CPU callback list. + * @rtp_work: Work queue for invoking callbacks. */ struct rcu_tasks_percpu { struct rcu_segcblist cblist; raw_spinlock_t __private lock; + struct work_struct rtp_work; + int cpu; + struct rcu_tasks *rtpp; }; /** @@ -146,6 +150,8 @@ static const char * const rcu_tasks_gp_state_names[] = { // // Generic code. +static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); + /* Record grace-period phase and time. */ static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) { @@ -185,6 +191,9 @@ static void cblist_init_generic(struct rcu_tasks *rtp) raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. if (rcu_segcblist_empty(&rtpcp->cblist)) rcu_segcblist_init(&rtpcp->cblist); + INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); + rtpcp->cpu = cpu; + rtpcp->rtpp = rtp; raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); @@ -256,35 +265,55 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) } // Advance callbacks and invoke any that are ready. -static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp) +static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) { int cpu; + int cpunext; unsigned long flags; int len; - struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); struct rcu_head *rhp; - - for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) { - struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); - - if (rcu_segcblist_empty(&rtpcp->cblist)) - continue; - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); - rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - len = rcl.len; - for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { - local_bh_disable(); - rhp->func(rhp); - local_bh_enable(); - cond_resched(); + struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); + struct rcu_tasks_percpu *rtpcp_next; + + cpu = rtpcp->cpu; + cpunext = cpu * 2 + 1; + if (cpunext < rtp->percpu_enqueue_lim) { + rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); + queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); + cpunext++; + if (cpunext < rtp->percpu_enqueue_lim) { + rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); + queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); } - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_add_len(&rtpcp->cblist, -len); - (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); } + + if (rcu_segcblist_empty(&rtpcp->cblist)) + return; + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); + rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + len = rcl.len; + for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); + cond_resched(); + } + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_add_len(&rtpcp->cblist, -len); + (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); +} + +// Workqueue flood to advance callbacks and invoke any that are ready. +static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) +{ + struct rcu_tasks *rtp; + struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); + + rtp = rtpcp->rtpp; + rcu_tasks_invoke_cbs(rtp, rtpcp); } /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ @@ -320,7 +349,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) /* Invoke callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); - rcu_tasks_invoke_cbs(rtp); + rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); /* Paranoid sleep to keep this from entering a tight loop */ schedule_timeout_idle(rtp->gp_sleep); -- cgit v1.2.3-59-g8ed1b From ce9b1c667f03e0aa30d3eb69d0932e010d131c49 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 11 Nov 2021 14:53:43 -0800 Subject: rcu-tasks: Make rcu_barrier_tasks*() handle multiple callback queues Currently, rcu_barrier_tasks(), rcu_barrier_tasks_rude(), and rcu_barrier_tasks_trace() simply invoke the corresponding synchronize_rcu_tasks*() function. This works because there is only one callback queue. However, there will soon be multiple callback queues. This commit therefore scans the queues currently in use, entraining a callback on each non-empty queue. Sequence numbers and reference counts are used to synchronize this process in a manner similar to the approach taken by rcu_barrier(). Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 70 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 97bd7a667e94..8e6601c5fd2e 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -25,11 +25,15 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @cblist: Callback list. * @lock: Lock protecting per-CPU callback list. * @rtp_work: Work queue for invoking callbacks. + * @barrier_q_head: RCU callback for barrier operation. + * @cpu: CPU number corresponding to this entry. + * @rtpp: Pointer to the rcu_tasks structure. */ struct rcu_tasks_percpu { struct rcu_segcblist cblist; raw_spinlock_t __private lock; struct work_struct rtp_work; + struct rcu_head barrier_q_head; int cpu; struct rcu_tasks *rtpp; }; @@ -57,6 +61,10 @@ struct rcu_tasks_percpu { * @rtpcpu: This flavor's rcu_tasks_percpu structure. * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. * @percpu_enqueue_lim: Number of per-CPU callback queues in use. + * @barrier_q_mutex: Serialize barrier operations. + * @barrier_q_count: Number of queues being waited on. + * @barrier_q_completion: Barrier wait/wakeup mechanism. + * @barrier_q_seq: Sequence number for barrier operations. * @name: This flavor's textual name. * @kname: This flavor's kthread name. */ @@ -82,6 +90,10 @@ struct rcu_tasks { struct rcu_tasks_percpu __percpu *rtpcpu; int percpu_enqueue_shift; int percpu_enqueue_lim; + struct mutex barrier_q_mutex; + atomic_t barrier_q_count; + struct completion barrier_q_completion; + unsigned long barrier_q_seq; char *name; char *kname; }; @@ -100,6 +112,8 @@ static struct rcu_tasks rt_name = \ .name = n, \ .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \ .percpu_enqueue_lim = 1, \ + .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ + .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ .kname = #rt_name, \ } @@ -238,6 +252,53 @@ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) wait_rcu_gp(rtp->call_func); } +// RCU callback function for rcu_barrier_tasks_generic(). +static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) +{ + struct rcu_tasks *rtp; + struct rcu_tasks_percpu *rtpcp; + + rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); + rtp = rtpcp->rtpp; + if (atomic_dec_and_test(&rtp->barrier_q_count)) + complete(&rtp->barrier_q_completion); +} + +// Wait for all in-flight callbacks for the specified RCU Tasks flavor. +// Operates in a manner similar to rcu_barrier(). +static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) +{ + int cpu; + unsigned long flags; + struct rcu_tasks_percpu *rtpcp; + unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); + + mutex_lock(&rtp->barrier_q_mutex); + if (rcu_seq_done(&rtp->barrier_q_seq, s)) { + smp_mb(); + mutex_unlock(&rtp->barrier_q_mutex); + return; + } + rcu_seq_start(&rtp->barrier_q_seq); + init_completion(&rtp->barrier_q_completion); + atomic_set(&rtp->barrier_q_count, 2); + for_each_possible_cpu(cpu) { + if (cpu >= smp_load_acquire(&rtp->percpu_enqueue_lim)) + break; + rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); + rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) + atomic_inc(&rtp->barrier_q_count); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + } + if (atomic_sub_and_test(2, &rtp->barrier_q_count)) + complete(&rtp->barrier_q_completion); + wait_for_completion(&rtp->barrier_q_completion); + rcu_seq_end(&rtp->barrier_q_seq); + mutex_unlock(&rtp->barrier_q_mutex); +} + // Advance callbacks and indicate whether either a grace period or // callback invocation is needed. static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) @@ -703,8 +764,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); */ void rcu_barrier_tasks(void) { - /* There is only one callback queue, so this is easy. ;-) */ - synchronize_rcu_tasks(); + rcu_barrier_tasks_generic(&rcu_tasks); } EXPORT_SYMBOL_GPL(rcu_barrier_tasks); @@ -842,8 +902,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); */ void rcu_barrier_tasks_rude(void) { - /* There is only one callback queue, so this is easy. ;-) */ - synchronize_rcu_tasks_rude(); + rcu_barrier_tasks_generic(&rcu_tasks_rude); } EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); @@ -1401,8 +1460,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); */ void rcu_barrier_tasks_trace(void) { - /* There is only one callback queue, so this is easy. ;-) */ - synchronize_rcu_tasks_trace(); + rcu_barrier_tasks_generic(&rcu_tasks_trace); } EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); -- cgit v1.2.3-59-g8ed1b From 8610b65680390a103b58f46282a1b05f7eebbba4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 12 Nov 2021 07:33:40 -0800 Subject: rcu-tasks: Add rcupdate.rcu_task_enqueue_lim to set initial queueing This commit adds a rcupdate.rcu_task_enqueue_lim module parameter that sets the initial number of callback queues to use for the RCU Tasks family of RCU implementations. This parameter allows testing of various fanout values. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 7 +++++++ kernel/rcu/tasks.h | 24 ++++++++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9725c546a0d4..9b09fc5dfe66 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4805,6 +4805,13 @@ period to instead use normal non-expedited grace-period processing. + rcupdate.rcu_task_enqueue_lim= [KNL] + Set the number of callback queues to use for the + RCU Tasks family of RCU flavors. The default + of -1 allows this to be automatically (and + dynamically) adjusted. This parameter is intended + for use in testing. + rcupdate.rcu_task_ipi_delay= [KNL] Set time in jiffies during which RCU tasks will avoid sending IPIs, starting with the beginning diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 8e6601c5fd2e..9d3eddaecfde 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -130,6 +130,9 @@ module_param(rcu_task_ipi_delay, int, 0644); static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; module_param(rcu_task_stall_timeout, int, 0644); +static int rcu_task_enqueue_lim __read_mostly = -1; +module_param(rcu_task_enqueue_lim, int, 0444); + /* RCU tasks grace-period state for debugging. */ #define RTGS_INIT 0 #define RTGS_WAIT_WAIT_CBS 1 @@ -192,10 +195,19 @@ static void cblist_init_generic(struct rcu_tasks *rtp) { int cpu; unsigned long flags; + int lim; raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); - rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids); - rtp->percpu_enqueue_lim = 1; + if (rcu_task_enqueue_lim < 0) + rcu_task_enqueue_lim = nr_cpu_ids; + else if (rcu_task_enqueue_lim == 0) + rcu_task_enqueue_lim = 1; + lim = rcu_task_enqueue_lim; + + if (lim > nr_cpu_ids) + lim = nr_cpu_ids; + WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim)); + smp_store_release(&rtp->percpu_enqueue_lim, lim); for_each_possible_cpu(cpu) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); @@ -211,7 +223,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp) raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); - + pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); } // Enqueue a callback for the specified flavor of Tasks RCU. @@ -307,7 +319,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) unsigned long flags; int needgpcb = 0; - for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) { + for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_enqueue_lim); cpu++) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); /* Advance and accelerate any new callbacks. */ @@ -338,11 +350,11 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu cpu = rtpcp->cpu; cpunext = cpu * 2 + 1; - if (cpunext < rtp->percpu_enqueue_lim) { + if (cpunext < smp_load_acquire(&rtp->percpu_enqueue_lim)) { rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); cpunext++; - if (cpunext < rtp->percpu_enqueue_lim) { + if (cpunext < smp_load_acquire(&rtp->percpu_enqueue_lim)) { rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); } -- cgit v1.2.3-59-g8ed1b From 7d13d30bb6c54b57d196eab89dea2729a565dbd7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Nov 2021 13:38:42 -0800 Subject: rcu-tasks: Count trylocks to estimate call_rcu_tasks() contention This commit converts the unconditional raw_spin_lock_rcu_node() lock acquisition in call_rcu_tasks_generic() to a trylock followed by an unconditional acquisition if the trylock fails. If the trylock fails, the failure is counted, but the count is reset to zero on each new jiffy. This statistic will be used to determine when to move from a single callback queue to per-CPU callback queues. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 9d3eddaecfde..231a949cdb81 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -24,6 +24,8 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. * @cblist: Callback list. * @lock: Lock protecting per-CPU callback list. + * @rtp_jiffies: Jiffies counter value for statistics. + * @rtp_n_lock_retries: Rough lock-contention statistic. * @rtp_work: Work queue for invoking callbacks. * @barrier_q_head: RCU callback for barrier operation. * @cpu: CPU number corresponding to this entry. @@ -32,6 +34,8 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); struct rcu_tasks_percpu { struct rcu_segcblist cblist; raw_spinlock_t __private lock; + unsigned long rtp_jiffies; + unsigned long rtp_n_lock_retries; struct work_struct rtp_work; struct rcu_head barrier_q_head; int cpu; @@ -231,6 +235,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, struct rcu_tasks *rtp) { unsigned long flags; + unsigned long j; bool needwake; struct rcu_tasks_percpu *rtpcp; @@ -239,7 +244,15 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, local_irq_save(flags); rtpcp = per_cpu_ptr(rtp->rtpcpu, smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); - raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. + if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. + raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. + j = jiffies; + if (rtpcp->rtp_jiffies != j) { + rtpcp->rtp_jiffies = j; + rtpcp->rtp_n_lock_retries = 0; + } + rtpcp->rtp_n_lock_retries++; + } if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. cblist_init_generic(rtp); -- cgit v1.2.3-59-g8ed1b From 3063b33a347c088e87516764d487e46fea3dfc94 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 23 Nov 2021 16:16:50 -0800 Subject: rcu-tasks: Avoid raw-spinlocked wakeups from call_rcu_tasks_generic() If the caller of of call_rcu_tasks(), call_rcu_tasks_rude(), or call_rcu_tasks_trace() holds a raw spinlock, and then if call_rcu_tasks_generic() determines that the grace-period kthread must be awakened, then the wakeup might acquire a normal spinlock while a raw spinlock is held. This results in lockdep splats when the kernel is built with CONFIG_PROVE_RAW_LOCK_NESTING=y. This commit therefore defers the wakeup using irq_work_queue(). It would be nice to directly invoke wakeup when a raw spinlock is not held, but there is currently no way to check for this in all kernels. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 231a949cdb81..6e8e4cf6fff3 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -27,6 +27,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @rtp_jiffies: Jiffies counter value for statistics. * @rtp_n_lock_retries: Rough lock-contention statistic. * @rtp_work: Work queue for invoking callbacks. + * @rtp_irq_work: IRQ work queue for deferred wakeups. * @barrier_q_head: RCU callback for barrier operation. * @cpu: CPU number corresponding to this entry. * @rtpp: Pointer to the rcu_tasks structure. @@ -37,6 +38,7 @@ struct rcu_tasks_percpu { unsigned long rtp_jiffies; unsigned long rtp_n_lock_retries; struct work_struct rtp_work; + struct irq_work rtp_irq_work; struct rcu_head barrier_q_head; int cpu; struct rcu_tasks *rtpp; @@ -102,9 +104,12 @@ struct rcu_tasks { char *kname; }; +static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); + #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ + .rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \ }; \ static struct rcu_tasks rt_name = \ { \ @@ -230,6 +235,16 @@ static void cblist_init_generic(struct rcu_tasks *rtp) pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); } +// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). +static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) +{ + struct rcu_tasks *rtp; + struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); + + rtp = rtpcp->rtpp; + wake_up(&rtp->cbs_wq); +} + // Enqueue a callback for the specified flavor of Tasks RCU. static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, struct rcu_tasks *rtp) @@ -263,7 +278,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr)) - wake_up(&rtp->cbs_wq); + irq_work_queue(&rtpcp->rtp_irq_work); } // Wait for a grace period for the specified flavor of Tasks RCU. -- cgit v1.2.3-59-g8ed1b From ab97152f88a4d580b89f0b7cc3028ffac438216f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 24 Nov 2021 15:12:15 -0800 Subject: rcu-tasks: Use more callback queues if contention encountered The rcupdate.rcu_task_enqueue_lim module parameter allows system administrators to tune the number of callback queues used by the RCU Tasks flavors. However if callback storms are infrequent, it would be better to operate with a single queue on a given system unless and until that system actually needed more queues. Systems not needing more queues can then avoid the overhead of checking the extra queues and especially avoid the overhead of fanning workqueue handlers out to all CPUs to invoke callbacks. This commit therefore switches to using all the CPUs' callback queues if call_rcu_tasks_generic() encounters too much lock contention. The amount of lock contention to tolerate defaults to 100 contended lock acquisitions per jiffy, and can be adjusted using the new rcupdate.rcu_task_contend_lim module parameter. Such switching is undertaken only if the rcupdate.rcu_task_enqueue_lim module parameter is negative, which is its default value (-1). This allows savvy systems administrators to set the number of queues to some known good value and to not have to worry about the kernel doing any second guessing. [ paulmck: Apply feedback from Guillaume Tucker and kernelci. ] Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 8 ++++++++ kernel/rcu/tasks.h | 27 +++++++++++++++++++++---- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9b09fc5dfe66..089f4c5f8225 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4805,6 +4805,14 @@ period to instead use normal non-expedited grace-period processing. + rcupdate.rcu_task_contend_lim= [KNL] + Set the minimum number of callback-queuing-time + lock-contention events per jiffy required to + cause the RCU Tasks flavors to switch to per-CPU + callback queuing. This switching only occurs + when rcupdate.rcu_task_enqueue_lim is set to + the default value of -1. + rcupdate.rcu_task_enqueue_lim= [KNL] Set the number of callback queues to use for the RCU Tasks family of RCU flavors. The default diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 6e8e4cf6fff3..d37ab69b9db8 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -142,6 +142,10 @@ module_param(rcu_task_stall_timeout, int, 0644); static int rcu_task_enqueue_lim __read_mostly = -1; module_param(rcu_task_enqueue_lim, int, 0444); +static bool rcu_task_cb_adjust; +static int rcu_task_contend_lim __read_mostly = 100; +module_param(rcu_task_contend_lim, int, 0444); + /* RCU tasks grace-period state for debugging. */ #define RTGS_INIT 0 #define RTGS_WAIT_WAIT_CBS 1 @@ -207,10 +211,13 @@ static void cblist_init_generic(struct rcu_tasks *rtp) int lim; raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); - if (rcu_task_enqueue_lim < 0) - rcu_task_enqueue_lim = nr_cpu_ids; - else if (rcu_task_enqueue_lim == 0) + if (rcu_task_enqueue_lim < 0) { + rcu_task_enqueue_lim = 1; + rcu_task_cb_adjust = true; + pr_info("%s: Setting adjustable number of callback queues.\n", __func__); + } else if (rcu_task_enqueue_lim == 0) { rcu_task_enqueue_lim = 1; + } lim = rcu_task_enqueue_lim; if (lim > nr_cpu_ids) @@ -251,6 +258,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, { unsigned long flags; unsigned long j; + bool needadjust = false; bool needwake; struct rcu_tasks_percpu *rtpcp; @@ -266,7 +274,9 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, rtpcp->rtp_jiffies = j; rtpcp->rtp_n_lock_retries = 0; } - rtpcp->rtp_n_lock_retries++; + if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && + READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) + needadjust = true; // Defer adjustment to avoid deadlock. } if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. @@ -276,6 +286,15 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, needwake = rcu_segcblist_empty(&rtpcp->cblist); rcu_segcblist_enqueue(&rtpcp->cblist, rhp); raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + if (unlikely(needadjust)) { + raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); + if (rtp->percpu_enqueue_lim != nr_cpu_ids) { + WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids)); + smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); + pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); + } + raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); + } /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr)) irq_work_queue(&rtpcp->rtp_irq_work); -- cgit v1.2.3-59-g8ed1b From 2cee0789b458afa384c422b5969c1a338891fd33 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Nov 2021 11:46:33 -0800 Subject: rcu-tasks: Use separate ->percpu_dequeue_lim for callback dequeueing Decreasing the number of callback queues is a bit tricky because it is necessary to handle callbacks that were queued before the number of queues decreased, but which were not ready to invoke until afterwards. This commit takes a first step in this direction by maintaining a separate ->percpu_dequeue_lim to control callback dequeueing, in addition to the existing ->percpu_enqueue_lim which now controls only enqueueing. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index d37ab69b9db8..b4a2cab6985a 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -66,7 +66,8 @@ struct rcu_tasks_percpu { * @call_func: This flavor's call_rcu()-equivalent function. * @rtpcpu: This flavor's rcu_tasks_percpu structure. * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. - * @percpu_enqueue_lim: Number of per-CPU callback queues in use. + * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. + * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. * @barrier_q_mutex: Serialize barrier operations. * @barrier_q_count: Number of queues being waited on. * @barrier_q_completion: Barrier wait/wakeup mechanism. @@ -96,6 +97,7 @@ struct rcu_tasks { struct rcu_tasks_percpu __percpu *rtpcpu; int percpu_enqueue_shift; int percpu_enqueue_lim; + int percpu_dequeue_lim; struct mutex barrier_q_mutex; atomic_t barrier_q_count; struct completion barrier_q_completion; @@ -121,6 +123,7 @@ static struct rcu_tasks rt_name = \ .name = n, \ .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \ .percpu_enqueue_lim = 1, \ + .percpu_dequeue_lim = 1, \ .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ .kname = #rt_name, \ @@ -223,6 +226,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp) if (lim > nr_cpu_ids) lim = nr_cpu_ids; WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim)); + WRITE_ONCE(rtp->percpu_dequeue_lim, lim); smp_store_release(&rtp->percpu_enqueue_lim, lim); for_each_possible_cpu(cpu) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); @@ -290,6 +294,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); if (rtp->percpu_enqueue_lim != nr_cpu_ids) { WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids)); + WRITE_ONCE(rtp->percpu_enqueue_lim, nr_cpu_ids); smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); } @@ -342,7 +347,7 @@ static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) init_completion(&rtp->barrier_q_completion); atomic_set(&rtp->barrier_q_count, 2); for_each_possible_cpu(cpu) { - if (cpu >= smp_load_acquire(&rtp->percpu_enqueue_lim)) + if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) break; rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; @@ -366,7 +371,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) unsigned long flags; int needgpcb = 0; - for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_enqueue_lim); cpu++) { + for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); /* Advance and accelerate any new callbacks. */ @@ -397,11 +402,11 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu cpu = rtpcp->cpu; cpunext = cpu * 2 + 1; - if (cpunext < smp_load_acquire(&rtp->percpu_enqueue_lim)) { + if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); cpunext++; - if (cpunext < smp_load_acquire(&rtp->percpu_enqueue_lim)) { + if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); } -- cgit v1.2.3-59-g8ed1b From fd796e4139b481733a701c4d406056538f4c73cc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Nov 2021 16:52:31 -0800 Subject: rcu-tasks: Use fewer callbacks queues if callback flood ends By default, when lock contention is encountered, the RCU Tasks flavors of RCU switch to using per-CPU queueing. However, if the callback flood ends, per-CPU queueing continues to be used, which introduces significant additional overhead, especially for callback invocation, which fans out a series of workqueue handlers. This commit therefore switches back to single-queue operation if at the beginning of a grace period there are very few callbacks. The definition of "very few" is set by the rcupdate.rcu_task_collapse_lim module parameter, which defaults to 10. This switch happens in two phases, with the first phase causing future callbacks to be enqueued on CPU 0's queue, but with all queues continuing to be checked for grace periods and callback invocation. The second phase checks to see if an RCU grace period has elapsed and if all remaining RCU-Tasks callbacks are queued on CPU 0. If so, only CPU 0 is checked for future grace periods and callback operation. Of course, the return of contention anywhere during this process will result in returning to per-CPU callback queueing. Reported-by: Martin Lau Cc: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 8 +++++ kernel/rcu/tasks.h | 48 +++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 089f4c5f8225..d1b0542b8564 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4805,6 +4805,14 @@ period to instead use normal non-expedited grace-period processing. + rcupdate.rcu_task_collapse_lim= [KNL] + Set the maximum number of callbacks present + at the beginning of a grace period that allows + the RCU Tasks flavors to collapse back to using + a single callback queue. This switching only + occurs when rcupdate.rcu_task_enqueue_lim is + set to the default value of -1. + rcupdate.rcu_task_contend_lim= [KNL] Set the minimum number of callback-queuing-time lock-contention events per jiffy required to diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index b4a2cab6985a..84f1d91604cc 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -68,6 +68,7 @@ struct rcu_tasks_percpu { * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. + * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. * @barrier_q_mutex: Serialize barrier operations. * @barrier_q_count: Number of queues being waited on. * @barrier_q_completion: Barrier wait/wakeup mechanism. @@ -98,6 +99,7 @@ struct rcu_tasks { int percpu_enqueue_shift; int percpu_enqueue_lim; int percpu_dequeue_lim; + unsigned long percpu_dequeue_gpseq; struct mutex barrier_q_mutex; atomic_t barrier_q_count; struct completion barrier_q_completion; @@ -148,6 +150,8 @@ module_param(rcu_task_enqueue_lim, int, 0444); static bool rcu_task_cb_adjust; static int rcu_task_contend_lim __read_mostly = 100; module_param(rcu_task_contend_lim, int, 0444); +static int rcu_task_collapse_lim __read_mostly = 10; +module_param(rcu_task_collapse_lim, int, 0444); /* RCU tasks grace-period state for debugging. */ #define RTGS_INIT 0 @@ -269,6 +273,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, rhp->next = NULL; rhp->func = func; local_irq_save(flags); + rcu_read_lock(); rtpcp = per_cpu_ptr(rtp->rtpcpu, smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. @@ -294,12 +299,13 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); if (rtp->percpu_enqueue_lim != nr_cpu_ids) { WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids)); - WRITE_ONCE(rtp->percpu_enqueue_lim, nr_cpu_ids); + WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); } + rcu_read_unlock(); /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr)) irq_work_queue(&rtpcp->rtp_irq_work); @@ -369,15 +375,25 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) { int cpu; unsigned long flags; + long n; + long ncbs = 0; + long ncbsnz = 0; int needgpcb = 0; for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); /* Advance and accelerate any new callbacks. */ - if (rcu_segcblist_empty(&rtpcp->cblist)) + if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) continue; raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + // Should we shrink down to a single callback queue? + n = rcu_segcblist_n_cbs(&rtpcp->cblist); + if (n) { + ncbs += n; + if (cpu > 0) + ncbsnz += n; + } rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); if (rcu_segcblist_pend_cbs(&rtpcp->cblist)) @@ -386,6 +402,34 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) needgpcb |= 0x1; raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); } + + // Shrink down to a single callback queue if appropriate. + // This is done in two stages: (1) If there are no more than + // rcu_task_collapse_lim callbacks on CPU 0 and none on any other + // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, + // if there has not been an increase in callbacks, limit dequeuing + // to CPU 0. Note the matching RCU read-side critical section in + // call_rcu_tasks_generic(). + if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { + raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); + if (rtp->percpu_enqueue_lim > 1) { + WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids)); + smp_store_release(&rtp->percpu_enqueue_lim, 1); + rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); + pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); + } + raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); + } + if (rcu_task_cb_adjust && !ncbsnz && + poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) { + raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); + if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { + WRITE_ONCE(rtp->percpu_dequeue_lim, 1); + pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); + } + raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); + } + return needgpcb; } -- cgit v1.2.3-59-g8ed1b From 2ebc45c44c4f3cc4c757430b2409ece4f976892e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Nov 2021 01:37:03 +0100 Subject: rcu/nocb: Remove rcu_node structure from nocb list when de-offloaded The nocb_gp_wait() function iterates over all CPUs in its group, including even those CPUs that have been de-offloaded. This is of course suboptimal, especially if none of the CPUs within the group are currently offloaded. This will become even more of a problem once a nocb kthread is created for all possible CPUs. Therefore use a standard double linked list to link all the offloaded rcu_data structures and safely add or delete these structure as we offload or de-offload them, respectively. Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Boqun Feng Cc: Uladzislau Rezki Cc: Josh Triplett Cc: Joel Fernandes Tested-by: Juri Lelli Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 7 +++++-- kernel/rcu/tree_nocb.h | 45 ++++++++++++++++++++++++++++++++++++++------- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4f6c67b3ccd5..5884380f4039 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -227,8 +227,11 @@ struct rcu_data { struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */ bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */ struct task_struct *nocb_cb_kthread; - struct rcu_data *nocb_next_cb_rdp; - /* Next rcu_data in wakeup chain. */ + struct list_head nocb_head_rdp; /* + * Head of rcu_data list in wakeup chain, + * if rdp_gp. + */ + struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */ /* The following fields are used by CB kthread, hence new cacheline. */ struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 2461fe8d0c23..8e94a5344afe 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -625,7 +625,21 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) * and the global grace-period kthread are awakened if needed. */ WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); - for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { + /* + * An rcu_data structure is removed from the list after its + * CPU is de-offloaded and added to the list before that CPU is + * (re-)offloaded. If the following loop happens to be referencing + * that rcu_data structure during the time that the corresponding + * CPU is de-offloaded and then immediately re-offloaded, this + * loop's rdp pointer will be carried to the end of the list by + * the resulting pair of list operations. This can cause the loop + * to skip over some of the rcu_data structures that were supposed + * to have been scanned. Fortunately a new iteration through the + * entire loop is forced after a given CPU's rcu_data structure + * is added to the list, so the skipped-over rcu_data structures + * won't be ignored for long. + */ + list_for_each_entry_rcu(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp, 1) { bool needwake_state = false; if (!nocb_gp_enabled_cb(rdp)) @@ -1003,6 +1017,8 @@ static long rcu_nocb_rdp_deoffload(void *arg) swait_event_exclusive(rdp->nocb_state_wq, !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP)); + /* Stop nocb_gp_wait() from iterating over this structure. */ + list_del_rcu(&rdp->nocb_entry_rdp); /* * Lock one last time to acquire latest callback updates from kthreads * so we can later handle callbacks locally without locking. @@ -1066,6 +1082,17 @@ static long rcu_nocb_rdp_offload(void *arg) return -EINVAL; pr_info("Offloading %d\n", rdp->cpu); + + /* + * Cause future nocb_gp_wait() invocations to iterate over + * structure, resetting ->nocb_gp_sleep and waking up the related + * "rcuog". Since nocb_gp_wait() in turn locks ->nocb_gp_lock + * before setting ->nocb_gp_sleep again, we are guaranteed to + * iterate this newly added structure before "rcuog" goes to + * sleep again. + */ + list_add_tail_rcu(&rdp->nocb_entry_rdp, &rdp->nocb_gp_rdp->nocb_head_rdp); + /* * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING * is set. @@ -1268,7 +1295,6 @@ static void __init rcu_organize_nocb_kthreads(void) int nl = 0; /* Next GP kthread. */ struct rcu_data *rdp; struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */ - struct rcu_data *rdp_prev = NULL; if (!cpumask_available(rcu_nocb_mask)) return; @@ -1288,8 +1314,8 @@ static void __init rcu_organize_nocb_kthreads(void) /* New GP kthread, set up for CBs & next GP. */ gotnocbs = true; nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; - rdp->nocb_gp_rdp = rdp; rdp_gp = rdp; + INIT_LIST_HEAD(&rdp->nocb_head_rdp); if (dump_tree) { if (!firsttime) pr_cont("%s\n", gotnocbscbs @@ -1302,12 +1328,11 @@ static void __init rcu_organize_nocb_kthreads(void) } else { /* Another CB kthread, link to previous GP kthread. */ gotnocbscbs = true; - rdp->nocb_gp_rdp = rdp_gp; - rdp_prev->nocb_next_cb_rdp = rdp; if (dump_tree) pr_cont(" %d", cpu); } - rdp_prev = rdp; + rdp->nocb_gp_rdp = rdp_gp; + list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); } if (gotnocbs && dump_tree) pr_cont("%s\n", gotnocbscbs ? "" : " (self only)"); @@ -1369,6 +1394,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) { char bufw[20]; char bufr[20]; + struct rcu_data *nocb_next_rdp; struct rcu_segcblist *rsclp = &rdp->cblist; bool waslocked; bool wassleep; @@ -1376,11 +1402,16 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) if (rdp->nocb_gp_rdp == rdp) show_rcu_nocb_gp_state(rdp); + nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp, + &rdp->nocb_entry_rdp, + typeof(*rdp), + nocb_entry_rdp); + sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]); sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", rdp->cpu, rdp->nocb_gp_rdp->cpu, - rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1, + nocb_next_rdp ? nocb_next_rdp->cpu : -1, "kK"[!!rdp->nocb_cb_kthread], "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], "cC"[!!atomic_read(&rdp->nocb_lock_contended)], -- cgit v1.2.3-59-g8ed1b From 8d9703964697340e073305574de4f5df31a28ba9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Nov 2021 01:37:04 +0100 Subject: rcu/nocb: Prepare nocb_cb_wait() to start with a non-offloaded rdp In order to be able to toggle the offloaded state from cpusets, a nocb kthread will need to be created for all possible CPUs whenever either of the "rcu_nocbs=" or "nohz_full=" parameters are specified. Therefore, the nocb_cb_wait() kthread must be prepared to start running on a de-offloaded rdp. To accomplish this, simply move the sleeping condition to the beginning of the nocb_cb_wait() function, which prevents this kthread from attempting to invoke callbacks before the corresponding CPU is offloaded. Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Boqun Feng Cc: Uladzislau Rezki Cc: Josh Triplett Cc: Joel Fernandes Tested-by: Juri Lelli Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_nocb.h | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 8e94a5344afe..cd596cf4ee79 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -803,6 +803,18 @@ static void nocb_cb_wait(struct rcu_data *rdp) bool can_sleep = true; struct rcu_node *rnp = rdp->mynode; + do { + swait_event_interruptible_exclusive(rdp->nocb_cb_wq, + nocb_cb_wait_cond(rdp)); + + // VVV Ensure CB invocation follows _sleep test. + if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^ + WARN_ON(signal_pending(current)); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); + } + } while (!nocb_cb_can_run(rdp)); + + local_irq_save(flags); rcu_momentary_dyntick_idle(); local_irq_restore(flags); @@ -855,17 +867,6 @@ static void nocb_cb_wait(struct rcu_data *rdp) if (needwake_state) swake_up_one(&rdp->nocb_state_wq); - - do { - swait_event_interruptible_exclusive(rdp->nocb_cb_wq, - nocb_cb_wait_cond(rdp)); - - // VVV Ensure CB invocation follows _sleep test. - if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^ - WARN_ON(signal_pending(current)); - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); - } - } while (!nocb_cb_can_run(rdp)); } /* -- cgit v1.2.3-59-g8ed1b From a81aeaf7a1de51400374a8e3982a3cc3ff130dd1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Nov 2021 01:37:05 +0100 Subject: rcu/nocb: Optimize kthreads and rdp initialization Currently cpumask_available() is used to prevent from unwanted NOCB initialization. However if neither "rcu_nocbs=" nor "nohz_full=" parameters are passed to a kernel built with CONFIG_CPUMASK_OFFSTACK=n, the initialization path is still taken, running through all sorts of needless operations and iterations on an empty cpumask. Fix this by relying on a real initialization state instead. This also optimizes kthread creation, preventing needless iteration over all online CPUs when the kernel is booted without any offloaded CPUs. Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Boqun Feng Cc: Uladzislau Rezki Cc: Josh Triplett Cc: Joel Fernandes Tested-by: Juri Lelli Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_nocb.h | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index cd596cf4ee79..42092de67705 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -60,6 +60,9 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. * If the list is invalid, a warning is emitted and all CPUs are offloaded. */ + +static bool rcu_nocb_is_setup; + static int __init rcu_nocb_setup(char *str) { alloc_bootmem_cpumask_var(&rcu_nocb_mask); @@ -67,6 +70,7 @@ static int __init rcu_nocb_setup(char *str) pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); cpumask_setall(rcu_nocb_mask); } + rcu_nocb_is_setup = true; return 1; } __setup("rcu_nocbs=", rcu_nocb_setup); @@ -1167,13 +1171,17 @@ void __init rcu_init_nohz(void) need_rcu_nocb_mask = true; #endif /* #if defined(CONFIG_NO_HZ_FULL) */ - if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) { - if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { - pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); - return; + if (need_rcu_nocb_mask) { + if (!cpumask_available(rcu_nocb_mask)) { + if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { + pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); + return; + } } + rcu_nocb_is_setup = true; } - if (!cpumask_available(rcu_nocb_mask)) + + if (!rcu_nocb_is_setup) return; #if defined(CONFIG_NO_HZ_FULL) @@ -1275,8 +1283,10 @@ static void __init rcu_spawn_nocb_kthreads(void) { int cpu; - for_each_online_cpu(cpu) - rcu_spawn_cpu_nocb_kthread(cpu); + if (rcu_nocb_is_setup) { + for_each_online_cpu(cpu) + rcu_spawn_cpu_nocb_kthread(cpu); + } } /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */ -- cgit v1.2.3-59-g8ed1b From 2cf4528d6dd6f5a7f34ae07e26176a7932310eeb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Nov 2021 01:37:06 +0100 Subject: rcu/nocb: Create kthreads on all CPUs if "rcu_nocbs=" or "nohz_full=" are passed In order to be able to (de-)offload any CPU using cpusets in the future, create the NOCB data structures for all possible CPUs. For now this is done only as long as the "rcu_nocbs=" or "nohz_full=" kernel parameters are passed to avoid the unnecessary overhead for most users. Note that the rcuog and rcuoc kthreads are not created until at least one of the corresponding CPUs comes online. This approach avoids the creation of excess kthreads when firmware lies about the number of CPUs present on the system. Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Boqun Feng Cc: Uladzislau Rezki Cc: Josh Triplett Cc: Joel Fernandes Tested-by: Juri Lelli Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_nocb.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 42092de67705..f580a6b2e74e 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1237,11 +1237,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu) struct rcu_data *rdp_gp; struct task_struct *t; - /* - * If this isn't a no-CBs CPU or if it already has an rcuo kthread, - * then nothing to do. - */ - if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread) + /* If there already is an rcuo kthread, then nothing to do. */ + if (rdp->nocb_cb_kthread) return; /* If we didn't spawn the GP kthread first, reorganize! */ @@ -1269,7 +1266,7 @@ static void rcu_spawn_one_nocb_kthread(int cpu) */ static void rcu_spawn_cpu_nocb_kthread(int cpu) { - if (rcu_scheduler_fully_active) + if (rcu_scheduler_fully_active && rcu_nocb_is_setup) rcu_spawn_one_nocb_kthread(cpu); } @@ -1319,7 +1316,7 @@ static void __init rcu_organize_nocb_kthreads(void) * Should the corresponding CPU come online in the future, then * we will spawn the needed set of rcu_nocb_kthread() kthreads. */ - for_each_cpu(cpu, rcu_nocb_mask) { + for_each_possible_cpu(cpu) { rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->cpu >= nl) { /* New GP kthread, set up for CBs & next GP. */ @@ -1343,7 +1340,8 @@ static void __init rcu_organize_nocb_kthreads(void) pr_cont(" %d", cpu); } rdp->nocb_gp_rdp = rdp_gp; - list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); + if (cpumask_test_cpu(cpu, rcu_nocb_mask)) + list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); } if (gotnocbs && dump_tree) pr_cont("%s\n", gotnocbscbs ? "" : " (self only)"); -- cgit v1.2.3-59-g8ed1b From d2cf0854d728c42524efc169edb3505de8c1a9dc Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Nov 2021 01:37:07 +0100 Subject: rcu/nocb: Allow empty "rcu_nocbs" kernel parameter Allow the rcu_nocbs kernel parameter to be specified just by itself, without specifying any CPUs. This allows systems administrators to use "rcu_nocbs" to specify that none of the CPUs are to be offloaded at boot time, but than any of them may be offloaded at runtime via cpusets. In contrast, if the "rcu_nocbs" or "nohz_full" kernel parameters are not specified at all, then not only are none of the CPUs offloaded at boot, none of them can be offloaded at runtime, either. While in the area, modernize the description of the "rcuo" kthreads' naming scheme. Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Boqun Feng Cc: Uladzislau Rezki Cc: Josh Triplett Cc: Joel Fernandes Tested-by: Juri Lelli Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 37 ++++++++++++++++--------- kernel/rcu/tree_nocb.h | 10 ++++--- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9725c546a0d4..8b5da10f932e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4343,19 +4343,30 @@ Disable the Correctable Errors Collector, see CONFIG_RAS_CEC help text. - rcu_nocbs= [KNL] - The argument is a cpu list, as described above. - - In kernels built with CONFIG_RCU_NOCB_CPU=y, set - the specified list of CPUs to be no-callback CPUs. - Invocation of these CPUs' RCU callbacks will be - offloaded to "rcuox/N" kthreads created for that - purpose, where "x" is "p" for RCU-preempt, and - "s" for RCU-sched, and "N" is the CPU number. - This reduces OS jitter on the offloaded CPUs, - which can be useful for HPC and real-time - workloads. It can also improve energy efficiency - for asymmetric multiprocessors. + rcu_nocbs[=cpu-list] + [KNL] The optional argument is a cpu list, + as described above. + + In kernels built with CONFIG_RCU_NOCB_CPU=y, + enable the no-callback CPU mode, which prevents + such CPUs' callbacks from being invoked in + softirq context. Invocation of such CPUs' RCU + callbacks will instead be offloaded to "rcuox/N" + kthreads created for that purpose, where "x" is + "p" for RCU-preempt, "s" for RCU-sched, and "g" + for the kthreads that mediate grace periods; and + "N" is the CPU number. This reduces OS jitter on + the offloaded CPUs, which can be useful for HPC + and real-time workloads. It can also improve + energy efficiency for asymmetric multiprocessors. + + If a cpulist is passed as an argument, the specified + list of CPUs is set to no-callback mode from boot. + + Otherwise, if the '=' sign and the cpulist + arguments are omitted, no CPU will be set to + no-callback mode from boot but the mode may be + toggled at runtime via cpusets. rcu_nocb_poll [KNL] Rather than requiring that offloaded CPUs diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index f580a6b2e74e..0c1802ce4764 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -66,14 +66,16 @@ static bool rcu_nocb_is_setup; static int __init rcu_nocb_setup(char *str) { alloc_bootmem_cpumask_var(&rcu_nocb_mask); - if (cpulist_parse(str, rcu_nocb_mask)) { - pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); - cpumask_setall(rcu_nocb_mask); + if (*str == '=') { + if (cpulist_parse(++str, rcu_nocb_mask)) { + pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); + cpumask_setall(rcu_nocb_mask); + } } rcu_nocb_is_setup = true; return 1; } -__setup("rcu_nocbs=", rcu_nocb_setup); +__setup("rcu_nocbs", rcu_nocb_setup); static int __init parse_rcu_nocb_poll(char *arg) { -- cgit v1.2.3-59-g8ed1b From 10d4703154a72fb4f30fc90a4a7212bf138c17a2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Nov 2021 01:37:08 +0100 Subject: rcu/nocb: Merge rcu_spawn_cpu_nocb_kthread() and rcu_spawn_one_nocb_kthread() The rcu_spawn_one_nocb_kthread() function is called only from rcu_spawn_cpu_nocb_kthread(). Therefore, inline the former into the latter, saving a few lines of code. Reviewed-by: Neeraj Upadhyay Signed-off-by: Frederic Weisbecker Cc: Boqun Feng Cc: Uladzislau Rezki Cc: Josh Triplett Cc: Joel Fernandes Tested-by: Juri Lelli Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_nocb.h | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 0c1802ce4764..eeafb546a7a0 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1233,12 +1233,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread * for this CPU's group has not yet been created, spawn it as well. */ -static void rcu_spawn_one_nocb_kthread(int cpu) +static void rcu_spawn_cpu_nocb_kthread(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_data *rdp_gp; struct task_struct *t; + if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup) + return; + /* If there already is an rcuo kthread, then nothing to do. */ if (rdp->nocb_cb_kthread) return; @@ -1262,16 +1265,6 @@ static void rcu_spawn_one_nocb_kthread(int cpu) WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); } -/* - * If the specified CPU is a no-CBs CPU that does not already have its - * rcuo kthread, spawn it. - */ -static void rcu_spawn_cpu_nocb_kthread(int cpu) -{ - if (rcu_scheduler_fully_active && rcu_nocb_is_setup) - rcu_spawn_one_nocb_kthread(cpu); -} - /* * Once the scheduler is running, spawn rcuo kthreads for all online * no-CBs CPUs. This assumes that the early_initcall()s happen before -- cgit v1.2.3-59-g8ed1b