mirror of
https://github.com/torvalds/linux.git
synced 2026-01-24 23:16:46 +00:00
Merge tag 'locking-core-2025-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Locking primitives:
- Mark devm_mutex_init() as __must_check and fix drivers that didn't
check the return code (Thomas Weißschuh)
- Reorganize <linux/local_lock.h> to better expose the internal APIs
to local variables (Sebastian Andrzej Siewior)
- Remove OWNER_SPINNABLE in rwsem (Jinliang Zheng)
- Remove redundant #ifdefs in the mutex code (Ran Xiaokai)
Lockdep:
- Avoid returning struct in lock_stats() (Arnd Bergmann)
- Change `static const` into enum for LOCKF_*_IRQ_* (Arnd Bergmann)
- Temporarily use synchronize_rcu_expedited() in
lockdep_unregister_key() to speed things up. (Breno Leitao)
Rust runtime:
- Add #[must_use] to Lock::try_lock() (Jason Devers)"
* tag 'locking-core-2025-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Speed up lockdep_unregister_key() with expedited RCU synchronization
locking/mutex: Remove redundant #ifdefs
locking/lockdep: Change 'static const' variables to enum values
locking/lockdep: Avoid struct return in lock_stats()
locking/rwsem: Use OWNER_NONSPINNABLE directly instead of OWNER_SPINNABLE
rust: sync: Add #[must_use] to Lock::try_lock()
locking/mutex: Mark devm_mutex_init() as __must_check
leds: lp8860: Check return value of devm_mutex_init()
spi: spi-nxp-fspi: Check return value of devm_mutex_init()
local_lock: Move this_cpu_ptr() notation from internal to main header
This commit is contained in:
@@ -307,7 +307,9 @@ static int lp8860_probe(struct i2c_client *client)
|
||||
led->client = client;
|
||||
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
|
||||
|
||||
devm_mutex_init(&client->dev, &led->lock);
|
||||
ret = devm_mutex_init(&client->dev, &led->lock);
|
||||
if (ret)
|
||||
return dev_err_probe(&client->dev, ret, "Failed to initialize lock\n");
|
||||
|
||||
led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
|
||||
if (IS_ERR(led->regmap)) {
|
||||
|
||||
@@ -1272,7 +1272,9 @@ static int nxp_fspi_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to request irq\n");
|
||||
|
||||
devm_mutex_init(dev, &f->lock);
|
||||
ret = devm_mutex_init(dev, &f->lock);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to initialize lock\n");
|
||||
|
||||
ctlr->bus_num = -1;
|
||||
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
* local_lock - Acquire a per CPU local lock
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock(lock) __local_lock(lock)
|
||||
#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock_irq(lock) __local_lock_irq(lock)
|
||||
#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
|
||||
@@ -28,19 +28,19 @@
|
||||
* @flags: Storage for interrupt flags
|
||||
*/
|
||||
#define local_lock_irqsave(lock, flags) \
|
||||
__local_lock_irqsave(lock, flags)
|
||||
__local_lock_irqsave(this_cpu_ptr(lock), flags)
|
||||
|
||||
/**
|
||||
* local_unlock - Release a per CPU local lock
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_unlock(lock) __local_unlock(lock)
|
||||
#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_unlock_irq - Release a per CPU local lock and enable interrupts
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_unlock_irq(lock) __local_unlock_irq(lock)
|
||||
#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_unlock_irqrestore - Release a per CPU local lock and restore
|
||||
@@ -49,7 +49,7 @@
|
||||
* @flags: Interrupt flags to restore
|
||||
*/
|
||||
#define local_unlock_irqrestore(lock, flags) \
|
||||
__local_unlock_irqrestore(lock, flags)
|
||||
__local_unlock_irqrestore(this_cpu_ptr(lock), flags)
|
||||
|
||||
/**
|
||||
* local_lock_init - Runtime initialize a lock instance
|
||||
@@ -64,7 +64,7 @@
|
||||
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
||||
* HARDIRQ context on PREEMPT_RT.
|
||||
*/
|
||||
#define local_trylock(lock) __local_trylock(lock)
|
||||
#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
|
||||
@@ -77,7 +77,7 @@
|
||||
* HARDIRQ context on PREEMPT_RT.
|
||||
*/
|
||||
#define local_trylock_irqsave(lock, flags) \
|
||||
__local_trylock_irqsave(lock, flags)
|
||||
__local_trylock_irqsave(this_cpu_ptr(lock), flags)
|
||||
|
||||
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
|
||||
local_lock(_T),
|
||||
@@ -91,10 +91,10 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
|
||||
unsigned long flags)
|
||||
|
||||
#define local_lock_nested_bh(_lock) \
|
||||
__local_lock_nested_bh(_lock)
|
||||
__local_lock_nested_bh(this_cpu_ptr(_lock))
|
||||
|
||||
#define local_unlock_nested_bh(_lock) \
|
||||
__local_unlock_nested_bh(_lock)
|
||||
__local_unlock_nested_bh(this_cpu_ptr(_lock))
|
||||
|
||||
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
|
||||
local_lock_nested_bh(_T),
|
||||
|
||||
@@ -99,14 +99,14 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
local_lock_t *l; \
|
||||
\
|
||||
l = (local_lock_t *)this_cpu_ptr(lock); \
|
||||
l = (local_lock_t *)(lock); \
|
||||
tl = (local_trylock_t *)l; \
|
||||
_Generic((lock), \
|
||||
__percpu local_trylock_t *: ({ \
|
||||
local_trylock_t *: ({ \
|
||||
lockdep_assert(tl->acquired == 0); \
|
||||
WRITE_ONCE(tl->acquired, 1); \
|
||||
}), \
|
||||
__percpu local_lock_t *: (void)0); \
|
||||
local_lock_t *: (void)0); \
|
||||
local_lock_acquire(l); \
|
||||
} while (0)
|
||||
|
||||
@@ -133,7 +133,7 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
tl = this_cpu_ptr(lock); \
|
||||
tl = (lock); \
|
||||
if (READ_ONCE(tl->acquired)) { \
|
||||
preempt_enable(); \
|
||||
tl = NULL; \
|
||||
@@ -150,7 +150,7 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
\
|
||||
local_irq_save(flags); \
|
||||
tl = this_cpu_ptr(lock); \
|
||||
tl = (lock); \
|
||||
if (READ_ONCE(tl->acquired)) { \
|
||||
local_irq_restore(flags); \
|
||||
tl = NULL; \
|
||||
@@ -167,15 +167,15 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
local_lock_t *l; \
|
||||
\
|
||||
l = (local_lock_t *)this_cpu_ptr(lock); \
|
||||
l = (local_lock_t *)(lock); \
|
||||
tl = (local_trylock_t *)l; \
|
||||
local_lock_release(l); \
|
||||
_Generic((lock), \
|
||||
__percpu local_trylock_t *: ({ \
|
||||
local_trylock_t *: ({ \
|
||||
lockdep_assert(tl->acquired == 1); \
|
||||
WRITE_ONCE(tl->acquired, 0); \
|
||||
}), \
|
||||
__percpu local_lock_t *: (void)0); \
|
||||
local_lock_t *: (void)0); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock(lock) \
|
||||
@@ -199,11 +199,11 @@ do { \
|
||||
#define __local_lock_nested_bh(lock) \
|
||||
do { \
|
||||
lockdep_assert_in_softirq(); \
|
||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
||||
local_lock_acquire((lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock_nested_bh(lock) \
|
||||
local_lock_release(this_cpu_ptr(lock))
|
||||
local_lock_release((lock))
|
||||
|
||||
#else /* !CONFIG_PREEMPT_RT */
|
||||
|
||||
@@ -227,7 +227,7 @@ typedef spinlock_t local_trylock_t;
|
||||
#define __local_lock(__lock) \
|
||||
do { \
|
||||
migrate_disable(); \
|
||||
spin_lock(this_cpu_ptr((__lock))); \
|
||||
spin_lock((__lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_lock_irq(lock) __local_lock(lock)
|
||||
@@ -241,7 +241,7 @@ typedef spinlock_t local_trylock_t;
|
||||
|
||||
#define __local_unlock(__lock) \
|
||||
do { \
|
||||
spin_unlock(this_cpu_ptr((__lock))); \
|
||||
spin_unlock((__lock)); \
|
||||
migrate_enable(); \
|
||||
} while (0)
|
||||
|
||||
@@ -252,12 +252,12 @@ typedef spinlock_t local_trylock_t;
|
||||
#define __local_lock_nested_bh(lock) \
|
||||
do { \
|
||||
lockdep_assert_in_softirq_func(); \
|
||||
spin_lock(this_cpu_ptr(lock)); \
|
||||
spin_lock((lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock_nested_bh(lock) \
|
||||
do { \
|
||||
spin_unlock(this_cpu_ptr((lock))); \
|
||||
spin_unlock((lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_trylock(lock) \
|
||||
@@ -268,7 +268,7 @@ do { \
|
||||
__locked = 0; \
|
||||
} else { \
|
||||
migrate_disable(); \
|
||||
__locked = spin_trylock(this_cpu_ptr((lock))); \
|
||||
__locked = spin_trylock((lock)); \
|
||||
if (!__locked) \
|
||||
migrate_enable(); \
|
||||
} \
|
||||
|
||||
@@ -175,7 +175,7 @@ struct lock_class_stats {
|
||||
unsigned long bounces[nr_bounce_types];
|
||||
};
|
||||
|
||||
struct lock_class_stats lock_stats(struct lock_class *class);
|
||||
void lock_stats(struct lock_class *class, struct lock_class_stats *stats);
|
||||
void clear_lock_stats(struct lock_class *class);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -126,11 +126,11 @@ do { \
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
|
||||
int __devm_mutex_init(struct device *dev, struct mutex *lock);
|
||||
int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock);
|
||||
|
||||
#else
|
||||
|
||||
static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
|
||||
static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock)
|
||||
{
|
||||
/*
|
||||
* When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
|
||||
@@ -141,14 +141,17 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
|
||||
|
||||
#endif
|
||||
|
||||
#define devm_mutex_init(dev, mutex) \
|
||||
#define __mutex_init_ret(mutex) \
|
||||
({ \
|
||||
typeof(mutex) mutex_ = (mutex); \
|
||||
\
|
||||
mutex_init(mutex_); \
|
||||
__devm_mutex_init(dev, mutex_); \
|
||||
mutex_; \
|
||||
})
|
||||
|
||||
#define devm_mutex_init(dev, mutex) \
|
||||
__devm_mutex_init(dev, __mutex_init_ret(mutex))
|
||||
|
||||
/*
|
||||
* See kernel/locking/mutex.c for detailed documentation of these APIs.
|
||||
* Also see Documentation/locking/mutex-design.rst.
|
||||
|
||||
@@ -297,33 +297,30 @@ static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
|
||||
dst->nr += src->nr;
|
||||
}
|
||||
|
||||
struct lock_class_stats lock_stats(struct lock_class *class)
|
||||
void lock_stats(struct lock_class *class, struct lock_class_stats *stats)
|
||||
{
|
||||
struct lock_class_stats stats;
|
||||
int cpu, i;
|
||||
|
||||
memset(&stats, 0, sizeof(struct lock_class_stats));
|
||||
memset(stats, 0, sizeof(struct lock_class_stats));
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct lock_class_stats *pcs =
|
||||
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
|
||||
stats.contention_point[i] += pcs->contention_point[i];
|
||||
for (i = 0; i < ARRAY_SIZE(stats->contention_point); i++)
|
||||
stats->contention_point[i] += pcs->contention_point[i];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
|
||||
stats.contending_point[i] += pcs->contending_point[i];
|
||||
for (i = 0; i < ARRAY_SIZE(stats->contending_point); i++)
|
||||
stats->contending_point[i] += pcs->contending_point[i];
|
||||
|
||||
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
|
||||
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
|
||||
lock_time_add(&pcs->read_waittime, &stats->read_waittime);
|
||||
lock_time_add(&pcs->write_waittime, &stats->write_waittime);
|
||||
|
||||
lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
|
||||
lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
|
||||
lock_time_add(&pcs->read_holdtime, &stats->read_holdtime);
|
||||
lock_time_add(&pcs->write_holdtime, &stats->write_holdtime);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
|
||||
stats.bounces[i] += pcs->bounces[i];
|
||||
for (i = 0; i < ARRAY_SIZE(stats->bounces); i++)
|
||||
stats->bounces[i] += pcs->bounces[i];
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
void clear_lock_stats(struct lock_class *class)
|
||||
@@ -6619,8 +6616,16 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
||||
if (need_callback)
|
||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||
|
||||
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
|
||||
synchronize_rcu();
|
||||
/*
|
||||
* Wait until is_dynamic_key() has finished accessing k->hash_entry.
|
||||
*
|
||||
* Some operations like __qdisc_destroy() will call this in a debug
|
||||
* kernel, and the network traffic is disabled while waiting, hence
|
||||
* the delay of the wait matters in debugging cases. Currently use a
|
||||
* synchronize_rcu_expedited() to speed up the wait at the cost of
|
||||
* system IPIs. TODO: Replace RCU with hazptr for this.
|
||||
*/
|
||||
synchronize_rcu_expedited();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_unregister_key);
|
||||
|
||||
|
||||
@@ -47,29 +47,31 @@ enum {
|
||||
__LOCKF(USED_READ)
|
||||
};
|
||||
|
||||
enum {
|
||||
#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
|
||||
static const unsigned long LOCKF_ENABLED_IRQ =
|
||||
LOCKF_ENABLED_IRQ =
|
||||
#include "lockdep_states.h"
|
||||
0;
|
||||
0,
|
||||
#undef LOCKDEP_STATE
|
||||
|
||||
#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
|
||||
static const unsigned long LOCKF_USED_IN_IRQ =
|
||||
LOCKF_USED_IN_IRQ =
|
||||
#include "lockdep_states.h"
|
||||
0;
|
||||
0,
|
||||
#undef LOCKDEP_STATE
|
||||
|
||||
#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
|
||||
static const unsigned long LOCKF_ENABLED_IRQ_READ =
|
||||
LOCKF_ENABLED_IRQ_READ =
|
||||
#include "lockdep_states.h"
|
||||
0;
|
||||
0,
|
||||
#undef LOCKDEP_STATE
|
||||
|
||||
#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
|
||||
static const unsigned long LOCKF_USED_IN_IRQ_READ =
|
||||
LOCKF_USED_IN_IRQ_READ =
|
||||
#include "lockdep_states.h"
|
||||
0;
|
||||
0,
|
||||
#undef LOCKDEP_STATE
|
||||
};
|
||||
|
||||
#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
|
||||
#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
|
||||
|
||||
@@ -657,7 +657,7 @@ static int lock_stat_open(struct inode *inode, struct file *file)
|
||||
if (!test_bit(idx, lock_classes_in_use))
|
||||
continue;
|
||||
iter->class = class;
|
||||
iter->stats = lock_stats(class);
|
||||
lock_stats(class, &iter->stats);
|
||||
iter++;
|
||||
}
|
||||
|
||||
|
||||
@@ -191,9 +191,7 @@ static void
|
||||
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct list_head *list)
|
||||
{
|
||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
|
||||
hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
|
||||
#endif
|
||||
debug_mutex_add_waiter(lock, waiter, current);
|
||||
|
||||
list_add_tail(&waiter->list, list);
|
||||
@@ -209,9 +207,7 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
|
||||
__mutex_clear_flag(lock, MUTEX_FLAGS);
|
||||
|
||||
debug_mutex_remove_waiter(lock, waiter, current);
|
||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
|
||||
hung_task_clear_blocker();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -727,8 +727,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
|
||||
|
||||
static inline enum owner_state
|
||||
rwsem_owner_state(struct task_struct *owner, unsigned long flags)
|
||||
{
|
||||
@@ -835,7 +833,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
||||
enum owner_state owner_state;
|
||||
|
||||
owner_state = rwsem_spin_on_owner(sem);
|
||||
if (!(owner_state & OWNER_SPINNABLE))
|
||||
if (owner_state == OWNER_NONSPINNABLE)
|
||||
break;
|
||||
|
||||
/*
|
||||
|
||||
@@ -175,6 +175,8 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
|
||||
/// Tries to acquire the lock.
|
||||
///
|
||||
/// Returns a guard that can be used to access the data protected by the lock if successful.
|
||||
// `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
|
||||
#[must_use = "if unused, the lock will be immediately unlocked"]
|
||||
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
|
||||
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
|
||||
// that `init` was called.
|
||||
|
||||
Reference in New Issue
Block a user