Lines Matching +full:next +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <[email protected]>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API, aka timer wheel,
50 #include "tick-internal.h"
54 * cpu_base->active
57 #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
122 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
135 return likely(base->online); in hrtimer_base_is_online()
147 * timer->base->cpu_base
161 * means that all timers which are tied to this base via timer->base are
168 * possible to set timer->base = &migration_base and drop the lock: the timer
174 __acquires(&timer->base->lock) in lock_hrtimer_base()
179 base = READ_ONCE(timer->base); in lock_hrtimer_base()
181 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
182 if (likely(base == timer->base)) in lock_hrtimer_base()
185 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
192 * Check if the elected target is suitable considering its next
195 * If the elected target is remote and its next event is after the timer
203 * Called with cpu_base->lock of target cpu held.
220 * next remote target event is after this timer. Keep the in hrtimer_suitable_target()
227 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); in hrtimer_suitable_target()
229 return expires >= new_base->cpu_base->expires_next; in hrtimer_suitable_target()
248 * We switch the timer base to a power-optimized selected CPU target,
250 * - NO_HZ_COMMON is enabled
251 * - timer migration is enabled
252 * - the timer callback is not running
253 * - the timer is not the first expiring timer on the new target
265 int basenum = base->index; in switch_hrtimer_base()
270 new_base = &new_cpu_base->clock_base[basenum]; in switch_hrtimer_base()
286 WRITE_ONCE(timer->base, &migration_base); in switch_hrtimer_base()
287 raw_spin_unlock(&base->cpu_base->lock); in switch_hrtimer_base()
288 raw_spin_lock(&new_base->cpu_base->lock); in switch_hrtimer_base()
292 raw_spin_unlock(&new_base->cpu_base->lock); in switch_hrtimer_base()
293 raw_spin_lock(&base->cpu_base->lock); in switch_hrtimer_base()
295 WRITE_ONCE(timer->base, base); in switch_hrtimer_base()
298 WRITE_ONCE(timer->base, new_base); in switch_hrtimer_base()
312 __acquires(&timer->base->cpu_base->lock) in lock_hrtimer_base()
314 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base()
316 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
340 tmp = dclc < 0 ? -dclc : dclc; in __ktime_divns()
349 return dclc < 0 ? -tmp : tmp; in __ktime_divns()
379 return ((struct hrtimer *) addr)->function; in hrtimer_debug_hint()
384 * - an active object is initialized
402 * - an active object is activated
403 * - an unknown non-static object is activated
418 * - an active object is freed
453 enum hrtimer_mode mode) in debug_hrtimer_activate() argument
474 enum hrtimer_mode mode) { } in debug_hrtimer_activate() argument
480 enum hrtimer_mode mode) in debug_init() argument
483 trace_hrtimer_init(timer, clockid, mode); in debug_init()
487 enum hrtimer_mode mode) in debug_init_on_stack() argument
490 trace_hrtimer_init(timer, clockid, mode); in debug_init_on_stack()
494 enum hrtimer_mode mode) in debug_activate() argument
496 debug_hrtimer_activate(timer, mode); in debug_activate()
497 trace_hrtimer_start(timer, mode); in debug_activate()
517 return &cpu_base->clock_base[idx]; in __next_base()
532 struct timerqueue_node *next; in __hrtimer_next_event_base() local
535 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base()
536 timer = container_of(next, struct hrtimer, node); in __hrtimer_next_event_base()
538 /* Get to the next timer in the queue. */ in __hrtimer_next_event_base()
539 next = timerqueue_iterate_next(next); in __hrtimer_next_event_base()
540 if (!next) in __hrtimer_next_event_base()
543 timer = container_of(next, struct hrtimer, node); in __hrtimer_next_event_base()
545 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in __hrtimer_next_event_base()
553 if (timer->is_soft) in __hrtimer_next_event_base()
554 cpu_base->softirq_next_timer = timer; in __hrtimer_next_event_base()
556 cpu_base->next_timer = timer; in __hrtimer_next_event_base()
560 * clock_was_set() might have changed base->offset of any of in __hrtimer_next_event_base()
578 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
585 * - HRTIMER_ACTIVE_ALL,
586 * - HRTIMER_ACTIVE_SOFT, or
587 * - HRTIMER_ACTIVE_HARD.
596 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { in __hrtimer_get_next_event()
597 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; in __hrtimer_get_next_event()
598 cpu_base->softirq_next_timer = NULL; in __hrtimer_get_next_event()
602 next_timer = cpu_base->softirq_next_timer; in __hrtimer_get_next_event()
606 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; in __hrtimer_get_next_event()
607 cpu_base->next_timer = next_timer; in __hrtimer_get_next_event()
624 if (!cpu_base->softirq_activated) { in hrtimer_update_next_event()
630 cpu_base->softirq_expires_next = soft; in hrtimer_update_next_event()
635 * If a softirq timer is expiring first, update cpu_base->next_timer in hrtimer_update_next_event()
639 cpu_base->next_timer = cpu_base->softirq_next_timer; in hrtimer_update_next_event()
648 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; in hrtimer_update_base()
649 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; in hrtimer_update_base()
650 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; in hrtimer_update_base()
652 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, in hrtimer_update_base()
655 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; in hrtimer_update_base()
656 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; in hrtimer_update_base()
657 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; in hrtimer_update_base()
663 * Is the high resolution mode active ?
668 cpu_base->hres_active : 0; in hrtimer_hres_active()
675 cpu_base->expires_next = expires_next; in __hrtimer_reprogram()
694 if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) in __hrtimer_reprogram()
702 * next event
703 * Called with interrupts disabled and base->lock held
712 if (skip_equal && expires_next == cpu_base->expires_next) in hrtimer_force_reprogram()
715 __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next); in hrtimer_force_reprogram()
729 * Enable / Disable high resolution mode
739 * hrtimer_high_res_enabled - query, if the highres mode is enabled
747 * Switch to high resolution mode
754 pr_warn("Could not switch to high resolution mode on CPU %u\n", in hrtimer_switch_to_hres()
755 base->cpu); in hrtimer_switch_to_hres()
758 base->hres_active = 1; in hrtimer_switch_to_hres()
773 * Retrigger next event is called after clock was set with interrupts
778 * - CONFIG_HIGH_RES_TIMERS is enabled.
779 * - CONFIG_NOHZ_COMMON is enabled
790 * When high resolution mode or nohz is active, then the offsets of in retrigger_next_event()
792 * next tick will take care of that. in retrigger_next_event()
794 * If high resolution mode is active then the next expiring timer in retrigger_next_event()
799 * of the next expiring timer is enough. The return from the SMP in retrigger_next_event()
806 raw_spin_lock(&base->lock); in retrigger_next_event()
812 raw_spin_unlock(&base->lock); in retrigger_next_event()
820 * Called with interrupts disabled and base->cpu_base.lock held
825 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram()
826 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in hrtimer_reprogram()
832 * expiry time which is less than base->offset. Set it to 0. in hrtimer_reprogram()
837 if (timer->is_soft) { in hrtimer_reprogram()
842 * first hard hrtimer on the remote CPU - in hrtimer_reprogram()
845 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; in hrtimer_reprogram()
847 if (timer_cpu_base->softirq_activated) in hrtimer_reprogram()
850 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) in hrtimer_reprogram()
853 timer_cpu_base->softirq_next_timer = timer; in hrtimer_reprogram()
854 timer_cpu_base->softirq_expires_next = expires; in hrtimer_reprogram()
856 if (!ktime_before(expires, timer_cpu_base->expires_next) || in hrtimer_reprogram()
865 if (base->cpu_base != cpu_base) in hrtimer_reprogram()
868 if (expires >= cpu_base->expires_next) in hrtimer_reprogram()
875 if (cpu_base->in_hrtirq) in hrtimer_reprogram()
878 cpu_base->next_timer = timer; in hrtimer_reprogram()
898 * the next expiring timer. in update_needs_ipi()
900 seq = cpu_base->clock_was_set_seq; in update_needs_ipi()
907 if (seq == cpu_base->clock_was_set_seq) in update_needs_ipi()
915 if (cpu_base->in_hrtirq) in update_needs_ipi()
924 active &= cpu_base->active_bases; in update_needs_ipi()
927 struct timerqueue_node *next; in update_needs_ipi() local
929 next = timerqueue_getnext(&base->active); in update_needs_ipi()
930 expires = ktime_sub(next->expires, base->offset); in update_needs_ipi()
931 if (expires < cpu_base->expires_next) in update_needs_ipi()
935 if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) in update_needs_ipi()
937 if (cpu_base->softirq_activated) in update_needs_ipi()
939 if (expires < cpu_base->softirq_expires_next) in update_needs_ipi()
955 * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets
979 raw_spin_lock_irqsave(&cpu_base->lock, flags); in clock_was_set()
984 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in clock_was_set()
1030 __releases(&timer->base->cpu_base->lock) in unlock_hrtimer_base()
1032 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); in unlock_hrtimer_base()
1036 * hrtimer_forward() - forward the timer expiry
1064 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) in hrtimer_forward()
1090 * enqueue_hrtimer - internal function to (re)start a timer
1098 enum hrtimer_mode mode) in enqueue_hrtimer() argument
1100 debug_activate(timer, mode); in enqueue_hrtimer()
1101 WARN_ON_ONCE(!base->cpu_base->online); in enqueue_hrtimer()
1103 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer()
1106 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); in enqueue_hrtimer()
1108 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
1112 * __remove_hrtimer - internal function to remove a timer
1116 * High resolution timer mode reprograms the clock event device when the
1117 * timer is the one which expires next. The caller can disable this by setting
1125 struct hrtimer_cpu_base *cpu_base = base->cpu_base; in __remove_hrtimer()
1126 u8 state = timer->state; in __remove_hrtimer()
1129 WRITE_ONCE(timer->state, newstate); in __remove_hrtimer()
1133 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1134 cpu_base->active_bases &= ~(1 << base->index); in __remove_hrtimer()
1138 * cpu_base->next_timer. This happens when we remove the first in __remove_hrtimer()
1140 * cpu_base->next_timer. So the worst thing what can happen is in __remove_hrtimer()
1144 if (reprogram && timer == cpu_base->next_timer) in __remove_hrtimer()
1155 u8 state = timer->state; in remove_hrtimer()
1162 * resolution mode is active and the timer is on the current in remove_hrtimer()
1169 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in remove_hrtimer()
1189 const enum hrtimer_mode mode) in hrtimer_update_lowres() argument
1197 timer->is_rel = mode & HRTIMER_MODE_REL; in hrtimer_update_lowres()
1198 if (timer->is_rel) in hrtimer_update_lowres()
1210 * Find the next SOFT expiration. in hrtimer_update_softirq_timer()
1215 * reprogramming needs to be triggered, even if the next soft in hrtimer_update_softirq_timer()
1216 * hrtimer expires at the same time than the next hard in hrtimer_update_softirq_timer()
1217 * hrtimer. cpu_base->softirq_expires_next needs to be updated! in hrtimer_update_softirq_timer()
1223 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() in hrtimer_update_softirq_timer()
1224 * cpu_base->*expires_next is only set by hrtimer_reprogram() in hrtimer_update_softirq_timer()
1226 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); in hrtimer_update_softirq_timer()
1230 u64 delta_ns, const enum hrtimer_mode mode, in __hrtimer_start_range_ns() argument
1245 force_local = base->cpu_base == this_cpu_base; in __hrtimer_start_range_ns()
1246 force_local &= base->cpu_base->next_timer == timer; in __hrtimer_start_range_ns()
1252 force_local &= this_cpu_base->online; in __hrtimer_start_range_ns()
1267 if (mode & HRTIMER_MODE_REL) in __hrtimer_start_range_ns()
1268 tim = ktime_add_safe(tim, base->get_time()); in __hrtimer_start_range_ns()
1270 tim = hrtimer_update_lowres(timer, tim, mode); in __hrtimer_start_range_ns()
1277 mode & HRTIMER_MODE_PINNED); in __hrtimer_start_range_ns()
1282 first = enqueue_hrtimer(timer, new_base, mode); in __hrtimer_start_range_ns()
1298 struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base; in __hrtimer_start_range_ns()
1300 smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd); in __hrtimer_start_range_ns()
1310 hrtimer_force_reprogram(new_base->cpu_base, 1); in __hrtimer_start_range_ns()
1315 * hrtimer_start_range_ns - (re)start an hrtimer
1319 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1321 * softirq based mode is considered for debug purpose only!
1324 u64 delta_ns, const enum hrtimer_mode mode) in hrtimer_start_range_ns() argument
1329 if (WARN_ON_ONCE(!timer->function)) in hrtimer_start_range_ns()
1334 * expiry mode because unmarked timers are moved to softirq expiry. in hrtimer_start_range_ns()
1337 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); in hrtimer_start_range_ns()
1339 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard); in hrtimer_start_range_ns()
1343 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) in hrtimer_start_range_ns()
1351 * hrtimer_try_to_cancel - try to deactivate a timer
1358 * * -1 when the timer is currently executing the callback function and
1365 int ret = -1; in hrtimer_try_to_cancel()
1391 spin_lock_init(&base->softirq_expiry_lock); in hrtimer_cpu_base_init_expiry_lock()
1395 __acquires(&base->softirq_expiry_lock) in hrtimer_cpu_base_lock_expiry()
1397 spin_lock(&base->softirq_expiry_lock); in hrtimer_cpu_base_lock_expiry()
1401 __releases(&base->softirq_expiry_lock) in hrtimer_cpu_base_unlock_expiry()
1403 spin_unlock(&base->softirq_expiry_lock); in hrtimer_cpu_base_unlock_expiry()
1409 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1416 if (atomic_read(&cpu_base->timer_waiters)) { in hrtimer_sync_wait_running()
1417 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_sync_wait_running()
1418 spin_unlock(&cpu_base->softirq_expiry_lock); in hrtimer_sync_wait_running()
1419 spin_lock(&cpu_base->softirq_expiry_lock); in hrtimer_sync_wait_running()
1420 raw_spin_lock_irq(&cpu_base->lock); in hrtimer_sync_wait_running()
1445 * - If the caller is on a remote CPU then it has to spin wait for the timer
1448 * - If the caller originates from the task which preempted the timer
1455 struct hrtimer_clock_base *base = READ_ONCE(timer->base); in hrtimer_cancel_wait_running()
1461 if (!timer->is_soft || is_migration_base(base)) { in hrtimer_cancel_wait_running()
1469 * immediately so the softirq can expire the next timer. In theory in hrtimer_cancel_wait_running()
1473 atomic_inc(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1474 spin_lock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1475 atomic_dec(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1476 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1490 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1512 * __hrtimer_get_remaining - get remaining time for the timer
1534 * hrtimer_get_next_event - get the time until next expiry event
1536 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1544 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_get_next_event()
1549 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_get_next_event()
1555 * hrtimer_next_event_without - time until next expiry event w/o one timer
1558 * Returns the next expiry time over all timers except for the @exclude one or
1567 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_next_event_without()
1572 if (!cpu_base->softirq_activated) { in hrtimer_next_event_without()
1573 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; in hrtimer_next_event_without()
1577 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; in hrtimer_next_event_without()
1582 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_next_event_without()
1606 enum hrtimer_mode mode) in __hrtimer_init() argument
1608 bool softtimer = !!(mode & HRTIMER_MODE_SOFT); in __hrtimer_init()
1614 * marked for hard interrupt expiry mode are moved into soft in __hrtimer_init()
1618 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD)) in __hrtimer_init()
1630 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL) in __hrtimer_init()
1635 timer->is_soft = softtimer; in __hrtimer_init()
1636 timer->is_hard = !!(mode & HRTIMER_MODE_HARD); in __hrtimer_init()
1637 timer->base = &cpu_base->clock_base[base]; in __hrtimer_init()
1638 timerqueue_init(&timer->node); in __hrtimer_init()
1643 clockid_t clock_id, enum hrtimer_mode mode) in __hrtimer_setup() argument
1645 __hrtimer_init(timer, clock_id, mode); in __hrtimer_setup()
1648 timer->function = hrtimer_dummy_timeout; in __hrtimer_setup()
1650 timer->function = function; in __hrtimer_setup()
1654 * hrtimer_init - initialize a timer to the given clock
1657 * @mode: The modes which are relevant for initialization:
1666 enum hrtimer_mode mode) in hrtimer_init() argument
1668 debug_init(timer, clock_id, mode); in hrtimer_init()
1669 __hrtimer_init(timer, clock_id, mode); in hrtimer_init()
1674 * hrtimer_setup - initialize a timer to the given clock
1678 * @mode: The modes which are relevant for initialization:
1687 clockid_t clock_id, enum hrtimer_mode mode) in hrtimer_setup() argument
1689 debug_init(timer, clock_id, mode); in hrtimer_setup()
1690 __hrtimer_setup(timer, function, clock_id, mode); in hrtimer_setup()
1695 * hrtimer_setup_on_stack - initialize a timer on stack memory
1699 * @mode: The timer mode
1706 clockid_t clock_id, enum hrtimer_mode mode) in hrtimer_setup_on_stack() argument
1708 debug_init_on_stack(timer, clock_id, mode); in hrtimer_setup_on_stack()
1709 __hrtimer_setup(timer, function, clock_id, mode); in hrtimer_setup_on_stack()
1726 base = READ_ONCE(timer->base); in hrtimer_active()
1727 seq = raw_read_seqcount_begin(&base->seq); in hrtimer_active()
1729 if (timer->state != HRTIMER_STATE_INACTIVE || in hrtimer_active()
1730 base->running == timer) in hrtimer_active()
1733 } while (read_seqcount_retry(&base->seq, seq) || in hrtimer_active()
1734 base != READ_ONCE(timer->base)); in hrtimer_active()
1744 * - queued: the timer is queued
1745 * - callback: the timer is being ran
1746 * - post: the timer is inactive or (re)queued
1748 * On the read side we ensure we observe timer->state and cpu_base->running
1750 * This includes timer->base changing because sequence numbers alone are
1761 unsigned long flags) __must_hold(&cpu_base->lock) in __run_hrtimer()
1767 lockdep_assert_held(&cpu_base->lock); in __run_hrtimer()
1770 base->running = timer; in __run_hrtimer()
1773 * Separate the ->running assignment from the ->state assignment. in __run_hrtimer()
1776 * hrtimer_active() cannot observe base->running == NULL && in __run_hrtimer()
1777 * timer->state == INACTIVE. in __run_hrtimer()
1779 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1782 fn = timer->function; in __run_hrtimer()
1790 timer->is_rel = false; in __run_hrtimer()
1797 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in __run_hrtimer()
1805 raw_spin_lock_irq(&cpu_base->lock); in __run_hrtimer()
1812 * Note: Because we dropped the cpu_base->lock above, in __run_hrtimer()
1817 !(timer->state & HRTIMER_STATE_ENQUEUED)) in __run_hrtimer()
1821 * Separate the ->running assignment from the ->state assignment. in __run_hrtimer()
1824 * hrtimer_active() cannot observe base->running.timer == NULL && in __run_hrtimer()
1825 * timer->state == INACTIVE. in __run_hrtimer()
1827 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1829 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1830 base->running = NULL; in __run_hrtimer()
1837 unsigned int active = cpu_base->active_bases & active_mask; in __hrtimer_run_queues()
1843 basenow = ktime_add(now, base->offset); in __hrtimer_run_queues()
1845 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1859 * are right-of a not yet expired timer, because that in __hrtimer_run_queues()
1879 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_run_softirq()
1884 cpu_base->softirq_activated = 0; in hrtimer_run_softirq()
1887 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_run_softirq()
1904 BUG_ON(!cpu_base->hres_active); in hrtimer_interrupt()
1905 cpu_base->nr_events++; in hrtimer_interrupt()
1906 dev->next_event = KTIME_MAX; in hrtimer_interrupt()
1908 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_interrupt()
1911 cpu_base->in_hrtirq = 1; in hrtimer_interrupt()
1913 * We set expires_next to KTIME_MAX here with cpu_base->lock in hrtimer_interrupt()
1919 cpu_base->expires_next = KTIME_MAX; in hrtimer_interrupt()
1921 if (!ktime_before(now, cpu_base->softirq_expires_next)) { in hrtimer_interrupt()
1922 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimer_interrupt()
1923 cpu_base->softirq_activated = 1; in hrtimer_interrupt()
1929 /* Reevaluate the clock bases for the [soft] next expiry */ in hrtimer_interrupt()
1935 cpu_base->expires_next = expires_next; in hrtimer_interrupt()
1936 cpu_base->in_hrtirq = 0; in hrtimer_interrupt()
1937 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_interrupt()
1941 cpu_base->hang_detected = 0; in hrtimer_interrupt()
1946 * The next timer was already expired due to: in hrtimer_interrupt()
1947 * - tracing in hrtimer_interrupt()
1948 * - long lasting callbacks in hrtimer_interrupt()
1949 * - being scheduled away when running in a VM in hrtimer_interrupt()
1958 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_interrupt()
1960 cpu_base->nr_retries++; in hrtimer_interrupt()
1966 * we spent here. We schedule the next event this amount of in hrtimer_interrupt()
1969 cpu_base->nr_hangs++; in hrtimer_interrupt()
1970 cpu_base->hang_detected = 1; in hrtimer_interrupt()
1971 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_interrupt()
1974 if ((unsigned int)delta > cpu_base->max_hang_time) in hrtimer_interrupt()
1975 cpu_base->max_hang_time = (unsigned int) delta; in hrtimer_interrupt()
2003 * can switch to highres and / or nohz mode. The clocksource in hrtimer_run_queues()
2013 raw_spin_lock_irqsave(&cpu_base->lock, flags); in hrtimer_run_queues()
2016 if (!ktime_before(now, cpu_base->softirq_expires_next)) { in hrtimer_run_queues()
2017 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimer_run_queues()
2018 cpu_base->softirq_activated = 1; in hrtimer_run_queues()
2023 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); in hrtimer_run_queues()
2033 struct task_struct *task = t->task; in hrtimer_wakeup()
2035 t->task = NULL; in hrtimer_wakeup()
2043 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
2045 * @mode: timer mode abs/rel
2048 * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
2051 enum hrtimer_mode mode) in hrtimer_sleeper_start_expires() argument
2054 * Make the enqueue delivery mode check work on RT. If the sleeper in hrtimer_sleeper_start_expires()
2055 * was initialized for hard interrupt delivery, force the mode bit. in hrtimer_sleeper_start_expires()
2057 * __hrtimer_init_sleeper() determines the delivery mode on RT so the in hrtimer_sleeper_start_expires()
2060 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) in hrtimer_sleeper_start_expires()
2061 mode |= HRTIMER_MODE_HARD; in hrtimer_sleeper_start_expires()
2063 hrtimer_start_expires(&sl->timer, mode); in hrtimer_sleeper_start_expires()
2068 clockid_t clock_id, enum hrtimer_mode mode) in __hrtimer_init_sleeper() argument
2072 * marked for hard interrupt expiry mode are moved into soft in __hrtimer_init_sleeper()
2084 * OTOH, privileged real-time user space applications rely on the in __hrtimer_init_sleeper()
2086 * a real-time scheduling class, mark the mode for hard interrupt in __hrtimer_init_sleeper()
2090 if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT)) in __hrtimer_init_sleeper()
2091 mode |= HRTIMER_MODE_HARD; in __hrtimer_init_sleeper()
2094 __hrtimer_init(&sl->timer, clock_id, mode); in __hrtimer_init_sleeper()
2095 sl->timer.function = hrtimer_wakeup; in __hrtimer_init_sleeper()
2096 sl->task = current; in __hrtimer_init_sleeper()
2100 * hrtimer_setup_sleeper_on_stack - initialize a sleeper in stack memory
2103 * @mode: timer mode abs/rel
2106 clockid_t clock_id, enum hrtimer_mode mode) in hrtimer_setup_sleeper_on_stack() argument
2108 debug_init_on_stack(&sl->timer, clock_id, mode); in hrtimer_setup_sleeper_on_stack()
2109 __hrtimer_init_sleeper(sl, clock_id, mode); in hrtimer_setup_sleeper_on_stack()
2115 switch(restart->nanosleep.type) { in nanosleep_copyout()
2118 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) in nanosleep_copyout()
2119 return -EFAULT; in nanosleep_copyout()
2123 if (put_timespec64(ts, restart->nanosleep.rmtp)) in nanosleep_copyout()
2124 return -EFAULT; in nanosleep_copyout()
2129 return -ERESTART_RESTARTBLOCK; in nanosleep_copyout()
2132 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) in do_nanosleep() argument
2138 hrtimer_sleeper_start_expires(t, mode); in do_nanosleep()
2140 if (likely(t->task)) in do_nanosleep()
2143 hrtimer_cancel(&t->timer); in do_nanosleep()
2144 mode = HRTIMER_MODE_ABS; in do_nanosleep()
2146 } while (t->task && !signal_pending(current)); in do_nanosleep()
2150 if (!t->task) in do_nanosleep()
2153 restart = &current->restart_block; in do_nanosleep()
2154 if (restart->nanosleep.type != TT_NONE) { in do_nanosleep()
2155 ktime_t rem = hrtimer_expires_remaining(&t->timer); in do_nanosleep()
2164 return -ERESTART_RESTARTBLOCK; in do_nanosleep()
2172 hrtimer_setup_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS); in hrtimer_nanosleep_restart()
2173 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); in hrtimer_nanosleep_restart()
2179 long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, in hrtimer_nanosleep() argument
2186 hrtimer_setup_sleeper_on_stack(&t, clockid, mode); in hrtimer_nanosleep()
2187 hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns); in hrtimer_nanosleep()
2188 ret = do_nanosleep(&t, mode); in hrtimer_nanosleep()
2189 if (ret != -ERESTART_RESTARTBLOCK) in hrtimer_nanosleep()
2193 if (mode == HRTIMER_MODE_ABS) { in hrtimer_nanosleep()
2194 ret = -ERESTARTNOHAND; in hrtimer_nanosleep()
2198 restart = &current->restart_block; in hrtimer_nanosleep()
2199 restart->nanosleep.clockid = t.timer.base->clockid; in hrtimer_nanosleep()
2200 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); in hrtimer_nanosleep()
2215 return -EFAULT; in SYSCALL_DEFINE2()
2218 return -EINVAL; in SYSCALL_DEFINE2()
2220 current->restart_block.fn = do_no_restart_syscall; in SYSCALL_DEFINE2()
2221 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; in SYSCALL_DEFINE2()
2222 current->restart_block.nanosleep.rmtp = rmtp; in SYSCALL_DEFINE2()
2237 return -EFAULT; in SYSCALL_DEFINE2()
2240 return -EINVAL; in SYSCALL_DEFINE2()
2242 current->restart_block.fn = do_no_restart_syscall; in SYSCALL_DEFINE2()
2243 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; in SYSCALL_DEFINE2()
2244 current->restart_block.nanosleep.compat_rmtp = rmtp; in SYSCALL_DEFINE2()
2251 * Functions related to boot-time initialization:
2259 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i]; in hrtimers_prepare_cpu()
2261 clock_b->cpu_base = cpu_base; in hrtimers_prepare_cpu()
2262 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock); in hrtimers_prepare_cpu()
2263 timerqueue_init_head(&clock_b->active); in hrtimers_prepare_cpu()
2266 cpu_base->cpu = cpu; in hrtimers_prepare_cpu()
2276 cpu_base->active_bases = 0; in hrtimers_cpu_starting()
2277 cpu_base->hres_active = 0; in hrtimers_cpu_starting()
2278 cpu_base->hang_detected = 0; in hrtimers_cpu_starting()
2279 cpu_base->next_timer = NULL; in hrtimers_cpu_starting()
2280 cpu_base->softirq_next_timer = NULL; in hrtimers_cpu_starting()
2281 cpu_base->expires_next = KTIME_MAX; in hrtimers_cpu_starting()
2282 cpu_base->softirq_expires_next = KTIME_MAX; in hrtimers_cpu_starting()
2283 cpu_base->online = 1; in hrtimers_cpu_starting()
2295 while ((node = timerqueue_getnext(&old_base->active))) { in migrate_hrtimer_list()
2306 timer->base = new_base; in migrate_hrtimer_list()
2331 raw_spin_lock(&old_base->lock); in hrtimers_cpu_dying()
2332 raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING); in hrtimers_cpu_dying()
2335 migrate_hrtimer_list(&old_base->clock_base[i], in hrtimers_cpu_dying()
2336 &new_base->clock_base[i]); in hrtimers_cpu_dying()
2344 /* Tell the other CPU to retrigger the next event */ in hrtimers_cpu_dying()
2347 raw_spin_unlock(&new_base->lock); in hrtimers_cpu_dying()
2348 old_base->online = 0; in hrtimers_cpu_dying()
2349 raw_spin_unlock(&old_base->lock); in hrtimers_cpu_dying()