timers/migration: Use scoped_guard on available flag set/clear

Cleanup tmigr_clear_cpu_available() and tmigr_set_cpu_available() to
prepare for easier checks on the available flag.

Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://patch.msgid.link/20251120145653.296659-4-gmonaco@redhat.com
This commit is contained in:
Gabriele Monaco
2025-11-20 15:56:49 +01:00
committed by Thomas Gleixner
parent a048ca5f00
commit 4c2374ed86

View File

@@ -1440,17 +1440,17 @@ static int tmigr_clear_cpu_available(unsigned int cpu)
u64 firstexp;
cpumask_clear_cpu(cpu, tmigr_available_cpumask);
raw_spin_lock_irq(&tmc->lock);
tmc->available = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
tmc->available = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
/*
* CPU has to handle the local events on his own, when on the way to
* offline; Therefore nextevt value is set to KTIME_MAX
*/
firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
trace_tmigr_cpu_unavailable(tmc);
raw_spin_unlock_irq(&tmc->lock);
/*
* CPU has to handle the local events on his own, when on the way to
* offline; Therefore nextevt value is set to KTIME_MAX
*/
firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
trace_tmigr_cpu_unavailable(tmc);
}
if (firstexp != KTIME_MAX) {
migrator = cpumask_any(tmigr_available_cpumask);
@@ -1469,13 +1469,13 @@ static int tmigr_set_cpu_available(unsigned int cpu)
return -EINVAL;
cpumask_set_cpu(cpu, tmigr_available_cpumask);
raw_spin_lock_irq(&tmc->lock);
trace_tmigr_cpu_available(tmc);
tmc->idle = timer_base_is_idle();
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
tmc->available = true;
raw_spin_unlock_irq(&tmc->lock);
scoped_guard(raw_spinlock_irq, &tmc->lock) {
trace_tmigr_cpu_available(tmc);
tmc->idle = timer_base_is_idle();
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
tmc->available = true;
}
return 0;
}