aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--MAINTAINERS7
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/lockdep_internals.h3
-rw-r--r--kernel/locking/test-ww_mutex.c9
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--rust/helpers/mutex.c5
-rw-r--r--rust/helpers/spinlock.c5
-rw-r--r--rust/kernel/sync.rs4
-rw-r--r--rust/kernel/sync/lock.rs35
-rw-r--r--rust/kernel/sync/lock/mutex.rs13
-rw-r--r--rust/kernel/sync/lock/spinlock.rs13
11 files changed, 95 insertions, 21 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index baf0eeb9a355..9bcd4e72a2dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13425,8 +13425,8 @@ LOCKING PRIMITIVES
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Will Deacon <will@kernel.org>
+M: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP & RUST)
R: Waiman Long <longman@redhat.com>
-R: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP)
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@@ -13440,6 +13440,11 @@ F: include/linux/seqlock.h
F: include/linux/spinlock*.h
F: kernel/locking/
F: lib/locking*.[ch]
+F: rust/helpers/mutex.c
+F: rust/helpers/spinlock.c
+F: rust/kernel/sync/lock.rs
+F: rust/kernel/sync/lock/
+F: rust/kernel/sync/locked_by.rs
X: kernel/locking/locktorture.c
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2d8ec0351ef9..29acd238dad7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -157,10 +157,12 @@ static inline void lockdep_unlock(void)
__this_cpu_dec(lockdep_recursion);
}
+#ifdef CONFIG_PROVE_LOCKING
static inline bool lockdep_assert_locked(void)
{
return DEBUG_LOCKS_WARN_ON(__owner != current);
}
+#endif
static struct task_struct *lockdep_selftest_task_struct;
@@ -430,7 +432,7 @@ static inline u16 hlock_id(struct held_lock *hlock)
return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
}
-static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
+static inline __maybe_unused unsigned int chain_hlock_class_idx(u16 hlock_id)
{
return hlock_id & (MAX_LOCKDEP_KEYS - 1);
}
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index bbe9000260d0..20f9ef58d3d0 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -119,7 +119,8 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
-#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
+#define AVG_LOCKDEP_CHAIN_DEPTH 5
+#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS * AVG_LOCKDEP_CHAIN_DEPTH)
extern struct lock_chain lock_chains[];
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5d58b2c0ef98..bcb1b9fea588 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -404,7 +404,7 @@ static inline u32 prandom_u32_below(u32 ceil)
static int *get_random_order(int count)
{
int *order;
- int n, r, tmp;
+ int n, r;
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
if (!order)
@@ -415,11 +415,8 @@ static int *get_random_order(int count)
for (n = count - 1; n > 1; n--) {
r = prandom_u32_below(n + 1);
- if (r != n) {
- tmp = order[n];
- order[n] = order[r];
- order[r] = tmp;
- }
+ if (r != n)
+ swap(order[n], order[r]);
}
return order;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 49a3819d4d7c..cf2a41dc7682 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1502,15 +1502,15 @@ config LOCKDEP_SMALL
bool
config LOCKDEP_BITS
- int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ int "Size for MAX_LOCKDEP_ENTRIES (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 24
default 15
help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
config LOCKDEP_CHAINS_BITS
- int "Bitsize for MAX_LOCKDEP_CHAINS"
+ int "Size for MAX_LOCKDEP_CHAINS (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
range 10 21
default 16
@@ -1518,25 +1518,25 @@ config LOCKDEP_CHAINS_BITS
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
config LOCKDEP_STACK_TRACE_BITS
- int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ int "Size for MAX_STACK_TRACE_ENTRIES (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 26
default 19
help
Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
config LOCKDEP_STACK_TRACE_HASH_BITS
- int "Bitsize for STACK_TRACE_HASH_SIZE"
+ int "Size for STACK_TRACE_HASH_SIZE (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 26
default 14
help
Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
config LOCKDEP_CIRCULAR_QUEUE_BITS
- int "Bitsize for elements in circular_queue struct"
+ int "Size for elements in circular_queue struct (as Nth power of 2)"
depends on LOCKDEP
- range 10 30
+ range 10 26
default 12
help
Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
index 7e00680958ef..06575553eda5 100644
--- a/rust/helpers/mutex.c
+++ b/rust/helpers/mutex.c
@@ -12,3 +12,8 @@ void rust_helper___mutex_init(struct mutex *mutex, const char *name,
{
__mutex_init(mutex, name, key);
}
+
+void rust_helper_mutex_assert_is_held(struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
index 5971fdf6f755..42c4bf01a23e 100644
--- a/rust/helpers/spinlock.c
+++ b/rust/helpers/spinlock.c
@@ -30,3 +30,8 @@ int rust_helper_spin_trylock(spinlock_t *lock)
{
return spin_trylock(lock);
}
+
+void rust_helper_spin_assert_is_held(spinlock_t *lock)
+{
+ lockdep_assert_held(lock);
+}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index 1eab7ebf25fd..dffdaad972ce 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -16,8 +16,8 @@ pub mod poll;
pub use arc::{Arc, ArcBorrow, UniqueArc};
pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
-pub use lock::mutex::{new_mutex, Mutex};
-pub use lock::spinlock::{new_spinlock, SpinLock};
+pub use lock::mutex::{new_mutex, Mutex, MutexGuard};
+pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard};
pub use locked_by::LockedBy;
/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 41dcddac69e2..eb80048e0110 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -90,12 +90,20 @@ pub unsafe trait Backend {
// SAFETY: The safety requirements ensure that the lock is initialised.
*guard_state = unsafe { Self::lock(ptr) };
}
+
+ /// Asserts that the lock is held using lockdep.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn assert_is_held(ptr: *mut Self::State);
}
/// A mutual exclusion primitive.
///
/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
/// [`Backend`] specified as the generic parameter `B`.
+#[repr(C)]
#[pin_data]
pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object.
@@ -134,6 +142,28 @@ impl<T, B: Backend> Lock<T, B> {
}
}
+impl<B: Backend> Lock<(), B> {
+ /// Constructs a [`Lock`] from a raw pointer.
+ ///
+ /// This can be useful for interacting with a lock which was initialised outside of Rust.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
+ /// the whole lifetime of `'a`.
+ ///
+ /// [`State`]: Backend::State
+ pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
+ // SAFETY:
+ // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
+ // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
+ // the struct
+ // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
+ // `B::State`.
+ unsafe { &*ptr.cast() }
+ }
+}
+
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
pub fn lock(&self) -> Guard<'_, T, B> {
@@ -211,7 +241,10 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
- pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
+ unsafe { B::assert_is_held(lock.state.get()) };
+
Self {
lock,
state,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 0e946ebefce1..70cadbc2e8e2 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -86,6 +86,14 @@ pub use new_mutex;
/// [`struct mutex`]: srctree/include/linux/mutex.h
pub type Mutex<T> = super::Lock<T, MutexBackend>;
+/// A [`Guard`] acquired from locking a [`Mutex`].
+///
+/// This is simply a type alias for a [`Guard`] returned from locking a [`Mutex`]. It will unlock
+/// the [`Mutex`] upon being dropped.
+///
+/// [`Guard`]: super::Guard
+pub type MutexGuard<'a, T> = super::Guard<'a, T, MutexBackend>;
+
/// A kernel `struct mutex` lock backend.
pub struct MutexBackend;
@@ -126,4 +134,9 @@ unsafe impl super::Backend for MutexBackend {
None
}
}
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe { bindings::mutex_assert_is_held(ptr) }
+ }
}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 9f4d128bed98..ab2f8d075311 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -87,6 +87,14 @@ pub type SpinLock<T> = super::Lock<T, SpinLockBackend>;
/// A kernel `spinlock_t` lock backend.
pub struct SpinLockBackend;
+/// A [`Guard`] acquired from locking a [`SpinLock`].
+///
+/// This is simply a type alias for a [`Guard`] returned from locking a [`SpinLock`]. It will unlock
+/// the [`SpinLock`] upon being dropped.
+///
+/// [`Guard`]: super::Guard
+pub type SpinLockGuard<'a, T> = super::Guard<'a, T, SpinLockBackend>;
+
// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the
// default implementation that always calls the same locking method.
unsafe impl super::Backend for SpinLockBackend {
@@ -125,4 +133,9 @@ unsafe impl super::Backend for SpinLockBackend {
None
}
}
+
+ unsafe fn assert_is_held(ptr: *mut Self::State) {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ unsafe { bindings::spin_assert_is_held(ptr) }
+ }
}