mirror of
https://github.com/torvalds/linux.git
synced 2026-01-24 23:16:46 +00:00
Merge tag 'timers-core-2025-09-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner: - Address the inconsistent shutdown sequence of per CPU clockevents on CPU hotplug, which only removed it from the core but failed to invoke the actual device driver shutdown callback. This kept the timer active, which prevented power savings and caused pointless noise in virtualization. - Encapsulate the open coded access to the hrtimer clock base, which is a private implementation detail, so that the implementation can be changed without breaking a lot of usage sites. - Enhance the debug output of the clocksource watchdog to provide better information for analysis. - The usual set of cleanups and enhancements all over the place * tag 'timers-core-2025-09-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: time: Fix spelling mistakes in comments clocksource: Print durations for sync check unconditionally LoongArch: Remove clockevents shutdown call on offlining tick: Do not set device to detached state in tick_shutdown() hrtimer: Reorder branches in hrtimer_clockid_to_base() hrtimer: Remove hrtimer_clock_base:: Get_time hrtimer: Use hrtimer_cb_get_time() helper media: pwm-ir-tx: Avoid direct access to hrtimer clockbase ALSA: hrtimer: Avoid direct access to hrtimer clockbase lib: test_objpool: Avoid direct access to hrtimer clockbase sched/core: Avoid direct access to hrtimer clockbase timers/itimer: Avoid direct access to hrtimer clockbase posix-timers: Avoid direct access to hrtimer clockbase jiffies: Remove obsolete SHIFTED_HZ comment
This commit is contained in:
@@ -112,8 +112,6 @@ static int arch_timer_starting(unsigned int cpu)
|
||||
|
||||
static int arch_timer_dying(unsigned int cpu)
|
||||
{
|
||||
constant_set_state_shutdown(this_cpu_ptr(&constant_clockevent_device));
|
||||
|
||||
/* Clear Timer Interrupt */
|
||||
write_csr_tintclear(CSR_TINTCLR_TI);
|
||||
|
||||
|
||||
@@ -117,7 +117,6 @@ static int pwm_ir_tx_atomic(struct rc_dev *dev, unsigned int *txbuf,
|
||||
static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
|
||||
{
|
||||
struct pwm_ir *pwm_ir = container_of(timer, struct pwm_ir, timer);
|
||||
ktime_t now;
|
||||
|
||||
/*
|
||||
* If we happen to hit an odd latency spike, loop through the
|
||||
@@ -139,9 +138,7 @@ static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
|
||||
hrtimer_add_expires_ns(timer, ns);
|
||||
|
||||
pwm_ir->txbuf_index++;
|
||||
|
||||
now = timer->base->get_time();
|
||||
} while (hrtimer_get_expires_tv64(timer) < now);
|
||||
} while (hrtimer_expires_remaining(timer) > 0);
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
@@ -154,14 +154,11 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
|
||||
return ktime_to_ns(timer->node.expires);
|
||||
}
|
||||
|
||||
ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
|
||||
|
||||
static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
|
||||
{
|
||||
return ktime_sub(timer->node.expires, timer->base->get_time());
|
||||
}
|
||||
|
||||
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
|
||||
{
|
||||
return timer->base->get_time();
|
||||
return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
|
||||
}
|
||||
|
||||
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
|
||||
@@ -200,8 +197,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
|
||||
static inline ktime_t
|
||||
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
|
||||
{
|
||||
return __hrtimer_expires_remaining_adjusted(timer,
|
||||
timer->base->get_time());
|
||||
return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMERFD
|
||||
@@ -363,7 +359,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
|
||||
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
|
||||
ktime_t interval)
|
||||
{
|
||||
return hrtimer_forward(timer, timer->base->get_time(), interval);
|
||||
return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
|
||||
}
|
||||
|
||||
/* Precise sleep: */
|
||||
|
||||
@@ -41,7 +41,6 @@
|
||||
* @seq: seqcount around __run_hrtimer
|
||||
* @running: pointer to the currently running hrtimer
|
||||
* @active: red black tree root node for the active timers
|
||||
* @get_time: function to retrieve the current time of the clock
|
||||
* @offset: offset of this clock to the monotonic base
|
||||
*/
|
||||
struct hrtimer_clock_base {
|
||||
@@ -51,7 +50,6 @@ struct hrtimer_clock_base {
|
||||
seqcount_raw_spinlock_t seq;
|
||||
struct hrtimer *running;
|
||||
struct timerqueue_head active;
|
||||
ktime_t (*get_time)(void);
|
||||
ktime_t offset;
|
||||
} __hrtimer_clock_base_align;
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
|
||||
extern void register_refined_jiffies(long clock_tick_rate);
|
||||
|
||||
/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
|
||||
/* TICK_USEC is the time between ticks in usec */
|
||||
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
|
||||
|
||||
/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <asm/param.h> /* for HZ */
|
||||
#include <vdso/time64.h>
|
||||
|
||||
/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
|
||||
/* TICK_NSEC is the time between ticks in nsec */
|
||||
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
|
||||
|
||||
#endif /* __VDSO_JIFFIES_H */
|
||||
|
||||
@@ -919,7 +919,7 @@ void hrtick_start(struct rq *rq, u64 delay)
|
||||
* doesn't make sense and can cause timer DoS.
|
||||
*/
|
||||
delta = max_t(s64, delay, 10000LL);
|
||||
rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
|
||||
rq->hrtick_time = ktime_add_ns(hrtimer_cb_get_time(timer), delta);
|
||||
|
||||
if (rq == this_rq())
|
||||
__hrtick_restart(rq);
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
|
||||
/**
|
||||
* struct alarm_base - Alarm timer bases
|
||||
* @lock: Lock for syncrhonized access to the base
|
||||
* @lock: Lock for synchronized access to the base
|
||||
* @timerqueue: Timerqueue head managing the list of events
|
||||
* @get_ktime: Function to read the time correlating to the base
|
||||
* @get_timespec: Function to read the namespace time correlating to the base
|
||||
|
||||
@@ -633,7 +633,7 @@ void tick_offline_cpu(unsigned int cpu)
|
||||
raw_spin_lock(&clockevents_lock);
|
||||
|
||||
tick_broadcast_offline(cpu);
|
||||
tick_shutdown(cpu);
|
||||
tick_shutdown();
|
||||
|
||||
/*
|
||||
* Unregister the clock event devices which were
|
||||
|
||||
@@ -144,7 +144,7 @@ static u64 suspend_start;
|
||||
* Default for maximum permissible skew when cs->uncertainty_margin is
|
||||
* not specified, and the lower bound even when cs->uncertainty_margin
|
||||
* is specified. This is also the default that is used when registering
|
||||
* clocks with unspecifed cs->uncertainty_margin, so this macro is used
|
||||
* clocks with unspecified cs->uncertainty_margin, so this macro is used
|
||||
* even in CONFIG_CLOCKSOURCE_WATCHDOG=n kernels.
|
||||
*/
|
||||
#define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
|
||||
@@ -407,9 +407,8 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
||||
if (!cpumask_empty(&cpus_behind))
|
||||
pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
|
||||
cpumask_pr_args(&cpus_behind), testcpu, cs->name);
|
||||
if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
|
||||
pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
|
||||
testcpu, cs_nsec_min, cs_nsec_max, cs->name);
|
||||
pr_info(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
|
||||
testcpu, cs_nsec_min, cs_nsec_max, cs->name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
|
||||
|
||||
static void retrigger_next_event(void *arg);
|
||||
static ktime_t __hrtimer_cb_get_time(clockid_t clock_id);
|
||||
|
||||
/*
|
||||
* The timer bases:
|
||||
@@ -76,42 +77,34 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
||||
{
|
||||
.index = HRTIMER_BASE_MONOTONIC,
|
||||
.clockid = CLOCK_MONOTONIC,
|
||||
.get_time = &ktime_get,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_REALTIME,
|
||||
.clockid = CLOCK_REALTIME,
|
||||
.get_time = &ktime_get_real,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_BOOTTIME,
|
||||
.clockid = CLOCK_BOOTTIME,
|
||||
.get_time = &ktime_get_boottime,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_TAI,
|
||||
.clockid = CLOCK_TAI,
|
||||
.get_time = &ktime_get_clocktai,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_MONOTONIC_SOFT,
|
||||
.clockid = CLOCK_MONOTONIC,
|
||||
.get_time = &ktime_get,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_REALTIME_SOFT,
|
||||
.clockid = CLOCK_REALTIME,
|
||||
.get_time = &ktime_get_real,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_BOOTTIME_SOFT,
|
||||
.clockid = CLOCK_BOOTTIME,
|
||||
.get_time = &ktime_get_boottime,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_TAI_SOFT,
|
||||
.clockid = CLOCK_TAI,
|
||||
.get_time = &ktime_get_clocktai,
|
||||
},
|
||||
},
|
||||
.csd = CSD_INIT(retrigger_next_event, NULL)
|
||||
@@ -208,7 +201,7 @@ static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_
|
||||
/*
|
||||
* The offline local CPU can't be the default target if the
|
||||
* next remote target event is after this timer. Keep the
|
||||
* elected new base. An IPI will we issued to reprogram
|
||||
* elected new base. An IPI will be issued to reprogram
|
||||
* it as a last resort.
|
||||
*/
|
||||
if (!hrtimer_base_is_online(this_cpu_base))
|
||||
@@ -1253,7 +1246,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
remove_hrtimer(timer, base, true, force_local);
|
||||
|
||||
if (mode & HRTIMER_MODE_REL)
|
||||
tim = ktime_add_safe(tim, base->get_time());
|
||||
tim = ktime_add_safe(tim, __hrtimer_cb_get_time(base->clockid));
|
||||
|
||||
tim = hrtimer_update_lowres(timer, tim, mode);
|
||||
|
||||
@@ -1574,10 +1567,10 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
|
||||
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
|
||||
{
|
||||
switch (clock_id) {
|
||||
case CLOCK_REALTIME:
|
||||
return HRTIMER_BASE_REALTIME;
|
||||
case CLOCK_MONOTONIC:
|
||||
return HRTIMER_BASE_MONOTONIC;
|
||||
case CLOCK_REALTIME:
|
||||
return HRTIMER_BASE_REALTIME;
|
||||
case CLOCK_BOOTTIME:
|
||||
return HRTIMER_BASE_BOOTTIME;
|
||||
case CLOCK_TAI:
|
||||
@@ -1588,6 +1581,29 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
|
||||
}
|
||||
}
|
||||
|
||||
static ktime_t __hrtimer_cb_get_time(clockid_t clock_id)
|
||||
{
|
||||
switch (clock_id) {
|
||||
case CLOCK_MONOTONIC:
|
||||
return ktime_get();
|
||||
case CLOCK_REALTIME:
|
||||
return ktime_get_real();
|
||||
case CLOCK_BOOTTIME:
|
||||
return ktime_get_boottime();
|
||||
case CLOCK_TAI:
|
||||
return ktime_get_clocktai();
|
||||
default:
|
||||
WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
|
||||
return ktime_get();
|
||||
}
|
||||
}
|
||||
|
||||
ktime_t hrtimer_cb_get_time(const struct hrtimer *timer)
|
||||
{
|
||||
return __hrtimer_cb_get_time(timer->base->clockid);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hrtimer_cb_get_time);
|
||||
|
||||
static void __hrtimer_setup(struct hrtimer *timer,
|
||||
enum hrtimer_restart (*function)(struct hrtimer *),
|
||||
clockid_t clock_id, enum hrtimer_mode mode)
|
||||
|
||||
@@ -163,8 +163,7 @@ void posixtimer_rearm_itimer(struct task_struct *tsk)
|
||||
struct hrtimer *tmr = &tsk->signal->real_timer;
|
||||
|
||||
if (!hrtimer_is_queued(tmr) && tsk->signal->it_real_incr != 0) {
|
||||
hrtimer_forward(tmr, tmr->base->get_time(),
|
||||
tsk->signal->it_real_incr);
|
||||
hrtimer_forward_now(tmr, tsk->signal->it_real_incr);
|
||||
hrtimer_restart(tmr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,8 +299,7 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
|
||||
{
|
||||
struct hrtimer *timer = &timr->it.real.timer;
|
||||
|
||||
timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
|
||||
timr->it_interval);
|
||||
timr->it_overrun += hrtimer_forward_now(timer, timr->it_interval);
|
||||
hrtimer_restart(timer);
|
||||
}
|
||||
|
||||
@@ -535,7 +534,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* After succesful copy out, the timer ID is visible to user space
|
||||
* After successful copy out, the timer ID is visible to user space
|
||||
* now but not yet valid because new_timer::signal low order bit is 1.
|
||||
*
|
||||
* Complete the initialization with the clock specific create
|
||||
@@ -825,7 +824,7 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
|
||||
hrtimer_setup(&timr->it.real.timer, posix_timer_fn, timr->it_clock, mode);
|
||||
|
||||
if (!absolute)
|
||||
expires = ktime_add_safe(expires, timer->base->get_time());
|
||||
expires = ktime_add_safe(expires, hrtimer_cb_get_time(timer));
|
||||
hrtimer_set_expires(timer, expires);
|
||||
|
||||
if (!sigev_none)
|
||||
|
||||
@@ -411,24 +411,18 @@ int tick_cpu_dying(unsigned int dying_cpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Shutdown an event device on a given cpu:
|
||||
* Shutdown an event device on the outgoing CPU:
|
||||
*
|
||||
* This is called on a life CPU, when a CPU is dead. So we cannot
|
||||
* access the hardware device itself.
|
||||
* We just set the mode and remove it from the lists.
|
||||
* Called by the dying CPU during teardown, with clockevents_lock held
|
||||
* and interrupts disabled.
|
||||
*/
|
||||
void tick_shutdown(unsigned int cpu)
|
||||
void tick_shutdown(void)
|
||||
{
|
||||
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
struct clock_event_device *dev = td->evtdev;
|
||||
|
||||
td->mode = TICKDEV_MODE_PERIODIC;
|
||||
if (dev) {
|
||||
/*
|
||||
* Prevent that the clock events layer tries to call
|
||||
* the set mode function!
|
||||
*/
|
||||
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
|
||||
clockevents_exchange_device(dev, NULL);
|
||||
dev->event_handler = clockevents_handle_noop;
|
||||
td->evtdev = NULL;
|
||||
|
||||
@@ -26,7 +26,7 @@ extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
|
||||
extern void tick_handle_periodic(struct clock_event_device *dev);
|
||||
extern void tick_check_new_device(struct clock_event_device *dev);
|
||||
extern void tick_offline_cpu(unsigned int cpu);
|
||||
extern void tick_shutdown(unsigned int cpu);
|
||||
extern void tick_shutdown(void);
|
||||
extern void tick_suspend(void);
|
||||
extern void tick_resume(void);
|
||||
extern bool tick_check_replacement(struct clock_event_device *curdev,
|
||||
|
||||
@@ -102,8 +102,6 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
|
||||
SEQ_printf(m, " .index: %d\n", base->index);
|
||||
|
||||
SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution);
|
||||
|
||||
SEQ_printf(m, " .get_time: %ps\n", base->get_time);
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
SEQ_printf(m, " .offset: %Lu nsecs\n",
|
||||
(unsigned long long) ktime_to_ns(base->offset));
|
||||
|
||||
@@ -164,7 +164,7 @@ static enum hrtimer_restart ot_hrtimer_handler(struct hrtimer *hrt)
|
||||
/* do bulk-testings for objects pop/push */
|
||||
item->worker(item, 1);
|
||||
|
||||
hrtimer_forward(hrt, hrt->base->get_time(), item->hrtcycle);
|
||||
hrtimer_forward_now(hrt, item->hrtcycle);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
|
||||
@@ -56,8 +56,6 @@ def print_base(base):
|
||||
text += " .index: {}\n".format(base['index'])
|
||||
|
||||
text += " .resolution: {} nsecs\n".format(constants.LX_hrtimer_resolution)
|
||||
|
||||
text += " .get_time: {}\n".format(base['get_time'])
|
||||
if constants.LX_CONFIG_HIGH_RES_TIMERS:
|
||||
text += " .offset: {} nsecs\n".format(base['offset'])
|
||||
text += "active timers:\n"
|
||||
|
||||
@@ -44,7 +44,7 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
|
||||
}
|
||||
|
||||
/* calculate the drift */
|
||||
delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt));
|
||||
delta = ktime_sub(hrtimer_cb_get_time(hrt), hrtimer_get_expires(hrt));
|
||||
if (delta > 0)
|
||||
ticks += ktime_divns(delta, ticks * resolution);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user