1 /*
2  * Copyright (c) 2008-2014 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * @file
26  * @brief  Kernel timer subsystem
27  * @defgroup timer Timers
28  *
29  * The timer subsystem allows functions to be scheduled for later
30  * execution.  Each timer object is used to cause one function to
31  * be executed at a later time.
32  *
33  * Timer callback functions are called in interrupt context.
34  *
35  * @{
36  */
37 #include <debug.h>
38 #include <trace.h>
39 #include <assert.h>
40 #include <list.h>
41 #include <kernel/thread.h>
42 #include <kernel/timer.h>
43 #include <kernel/debug.h>
44 #include <kernel/spinlock.h>
45 #include <platform/timer.h>
46 #include <platform.h>
47 
48 #define LOCAL_TRACE 0
49 
50 spin_lock_t timer_lock;
51 
52 struct timer_state {
53     struct list_node timer_queue;
54 } __CPU_ALIGN;
55 
56 static struct timer_state timers[SMP_MAX_CPUS];
57 
58 static enum handler_return timer_tick(void *arg, lk_time_ns_t now);
59 
60 /**
61  * @brief  Initialize a timer object
62  */
timer_initialize(timer_t * timer)63 void timer_initialize(timer_t *timer)
64 {
65     *timer = (timer_t)TIMER_INITIAL_VALUE(*timer);
66 }
67 
insert_timer_in_queue(uint cpu,timer_t * timer)68 static void insert_timer_in_queue(uint cpu, timer_t *timer)
69 {
70     timer_t *entry;
71 
72     DEBUG_ASSERT(arch_ints_disabled());
73 
74     LTRACEF("timer %p, cpu %u, scheduled %llu, periodic %llu\n", timer, cpu,
75             timer->scheduled_time, timer->periodic_time);
76 
77     list_for_every_entry(&timers[cpu].timer_queue, entry, timer_t, node) {
78         if (time_gt(entry->scheduled_time, timer->scheduled_time)) {
79             list_add_before(&entry->node, &timer->node);
80             return;
81         }
82     }
83 
84     /* walked off the end of the list */
85     list_add_tail(&timers[cpu].timer_queue, &timer->node);
86 }
87 
timer_set(timer_t * timer,lk_time_ns_t delay,lk_time_ns_t period,timer_callback callback,void * arg)88 static void timer_set(timer_t *timer, lk_time_ns_t delay, lk_time_ns_t period,
89                       timer_callback callback, void *arg)
90 {
91     lk_time_ns_t now;
92 
93     LTRACEF("timer %p, delay %llu, period %llu, callback %p, arg %p\n", timer,
94             delay, period, callback, arg);
95 
96     DEBUG_ASSERT(timer->magic == TIMER_MAGIC);
97 
98     spin_lock_saved_state_t state;
99     spin_lock_irqsave(&timer_lock, state);
100 
101     if (list_in_list(&timer->node)) {
102         panic("timer %p already in list\n", timer);
103     }
104 
105     now = current_time_ns();
106     timer->scheduled_time = now + delay;
107     timer->periodic_time = period;
108     timer->callback = callback;
109     timer->arg = arg;
110 
111     LTRACEF("scheduled time %llu\n", timer->scheduled_time);
112 
113     uint cpu = arch_curr_cpu_num();
114 
115     /*
116      * It is not safe to move the timer to a new cpu while the callback is
117      * running.
118      */
119     DEBUG_ASSERT(!timer->running || timer->cpu == cpu);
120     DEBUG_ASSERT(cpu < SMP_MAX_CPUS);
121 
122     timer->cpu = cpu;
123 
124     insert_timer_in_queue(cpu, timer);
125 
126 #if PLATFORM_HAS_DYNAMIC_TIMER
127     if (list_peek_head_type(&timers[cpu].timer_queue, timer_t, node) == timer) {
128         /* we just modified the head of the timer queue */
129         LTRACEF("setting new timer for %llu nanosecs\n", delay);
130         platform_set_oneshot_timer(timer_tick, timer->scheduled_time);
131     }
132 #endif
133 
134     spin_unlock_irqrestore(&timer_lock, state);
135 }
136 
137 /**
138  * @brief  Set up a timer that executes once
139  *
140  * This function specifies a callback function to be called after a specified
141  * delay.  The function will be called one time.
142  *
143  * @param  timer The timer to use
144  * @param  delay The delay, in ns, before the timer is executed
145  * @param  callback  The function to call when the timer expires
146  * @param  arg  The argument to pass to the callback
147  *
148  * The timer function is declared as:
149  *   enum handler_return callback(struct timer *, lk_time_t now, void *arg) { ... }
150  */
timer_set_oneshot_ns(timer_t * timer,lk_time_ns_t delay,timer_callback callback,void * arg)151 void timer_set_oneshot_ns(timer_t *timer, lk_time_ns_t delay,
152                           timer_callback callback, void *arg)
153 {
154     if (delay == 0)
155         delay = 1;
156     timer_set(timer, delay, 0, callback, arg);
157 }
158 
159 /**
160  * @brief  Set up a timer that executes repeatedly
161  *
162  * This function specifies a callback function to be called after a specified
163  * delay.  The function will be called repeatedly.
164  *
165  * @param  timer The timer to use
166  * @param  period The delay, in ns, before the timer is executed
167  * @param  callback  The function to call when the timer expires
168  * @param  arg  The argument to pass to the callback
169  *
170  * The timer function is declared as:
171  *   enum handler_return callback(struct timer *, lk_time_t now, void *arg) { ... }
172  */
timer_set_periodic_ns(timer_t * timer,lk_time_ns_t period,timer_callback callback,void * arg)173 void timer_set_periodic_ns(timer_t *timer, lk_time_ns_t period,
174                            timer_callback callback, void *arg)
175 {
176     if (period == 0)
177         period = 1;
178     timer_set(timer, period, period, callback, arg);
179 }
180 
181 /**
182  * @brief  Cancel a pending timer
183  */
timer_cancel_etc(timer_t * timer,bool wait)184 void timer_cancel_etc(timer_t *timer, bool wait)
185 {
186     DEBUG_ASSERT(timer->magic == TIMER_MAGIC);
187 
188     /* Interrupt must be enabled when waiting */
189     DEBUG_ASSERT(!wait || !arch_ints_disabled());
190 
191     /*
192      * If interrupt are enabled, we should wait for the callback to finish.
193      * This is not a strict requirement, but helps find call sites that should
194      * be updated. We must wait before freeing the timer.
195      */
196     DEBUG_ASSERT(arch_ints_disabled() || wait);
197 
198     spin_lock_saved_state_t state;
199     spin_lock_irqsave(&timer_lock, state);
200 
201     /*
202      * It is safe to cancel the timer without waiting on the same cpu that the
203      * callback runs on.
204      */
205     DEBUG_ASSERT(wait || arch_curr_cpu_num() == timer->cpu);
206 
207     while (wait && timer->running) {
208         spin_unlock_irqrestore(&timer_lock, state);
209         thread_yield();
210         spin_lock_irqsave(&timer_lock, state);
211     }
212 
213 #if PLATFORM_HAS_DYNAMIC_TIMER
214     uint cpu = arch_curr_cpu_num(); /* cpu could have changed in thread_yield */
215     DEBUG_ASSERT(cpu < SMP_MAX_CPUS);
216 
217     timer_t *oldhead = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
218 #endif
219 
220     if (list_in_list(&timer->node))
221         list_delete(&timer->node);
222 
223     /* to keep it from being reinserted into the queue if called from
224      * periodic timer callback.
225      */
226     timer->periodic_time = 0;
227 
228 #if PLATFORM_HAS_DYNAMIC_TIMER
229     /* see if we've just modified the head of the current cpu timer queue */
230     timer_t *newhead = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
231     if (newhead == NULL) {
232         LTRACEF("clearing old hw timer, nothing in the queue\n");
233         platform_stop_timer();
234     } else if (newhead != oldhead) {
235         lk_time_ns_t wakeup_time;
236         lk_time_ns_t now = current_time_ns();
237 
238         if (time_lt(newhead->scheduled_time, now))
239             wakeup_time = now;
240         else
241             wakeup_time = newhead->scheduled_time;
242 
243         LTRACEF("setting new timer to %llu\n", wakeup_time);
244         platform_set_oneshot_timer(timer_tick, wakeup_time);
245     }
246 #endif
247 
248     spin_unlock_irqrestore(&timer_lock, state);
249 }
250 
251 /* called at interrupt time to process any pending timers */
timer_tick(void * arg,lk_time_ns_t now)252 static enum handler_return timer_tick(void *arg, lk_time_ns_t now)
253 {
254     timer_t *timer;
255     enum handler_return ret = INT_NO_RESCHEDULE;
256 
257     DEBUG_ASSERT(arch_ints_disabled());
258 
259     THREAD_STATS_INC(timer_ints);
260 //  KEVLOG_TIMER_TICK(); // enable only if necessary
261 
262     uint cpu = arch_curr_cpu_num();
263     DEBUG_ASSERT(cpu < SMP_MAX_CPUS);
264 
265     LTRACEF("cpu %u now %llu, fp %p\n", cpu, now, __GET_FRAME());
266 
267     spin_lock(&timer_lock);
268 
269     for (;;) {
270         /* see if there's an event to process */
271         timer = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
272         if (likely(timer == 0))
273             break;
274         LTRACEF("next item on timer queue %p at %llu now %llu (%p, arg %p)\n",
275                 timer, timer->scheduled_time, now, timer->callback, timer->arg);
276         if (likely(time_lt(now, timer->scheduled_time)))
277             break;
278 
279         /* process it */
280         LTRACEF("timer %p\n", timer);
281         DEBUG_ASSERT(timer && timer->magic == TIMER_MAGIC);
282         list_delete(&timer->node);
283         timer->running = true;
284 
285         /* we pulled it off the list, release the list lock to handle it */
286         spin_unlock(&timer_lock);
287 
288         LTRACEF("dequeued timer %p, scheduled %llu periodic %llu\n", timer,
289                 timer->scheduled_time, timer->periodic_time);
290 
291         THREAD_STATS_INC(timers);
292 
293         bool periodic = timer->periodic_time > 0;
294 
295         LTRACEF("timer %p firing callback %p, arg %p\n", timer, timer->callback, timer->arg);
296         KEVLOG_TIMER_CALL(timer->callback, timer->arg);
297         if (timer->callback(timer, now, timer->arg) == INT_RESCHEDULE)
298             ret = INT_RESCHEDULE;
299 
300         /* it may have been requeued or periodic, grab the lock so we can safely inspect it */
301         spin_lock(&timer_lock);
302 
303         /*
304          * Check that timer did not get freed and overwritten while the callback
305          * was running.
306          */
307         DEBUG_ASSERT(timer->magic == TIMER_MAGIC);
308 
309         timer->running = false;
310 
311         /* if it was a periodic timer and it hasn't been requeued
312          * by the callback put it back in the list
313          */
314         if (periodic && !list_in_list(&timer->node) && timer->periodic_time > 0) {
315             LTRACEF("periodic timer, period %llu\n", timer->periodic_time);
316             timer->scheduled_time = now + timer->periodic_time;
317             insert_timer_in_queue(cpu, timer);
318         }
319     }
320 
321 #if PLATFORM_HAS_DYNAMIC_TIMER
322     /* reset the timer to the next event */
323     timer = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
324     if (timer) {
325         /* has to be the case or it would have fired already */
326         DEBUG_ASSERT(time_gt(timer->scheduled_time, now));
327 
328         LTRACEF("setting new timer for %llu nanosecs from now (%llu) for event %p\n",
329                 timer->scheduled_time - now, timer->scheduled_time, timer);
330         platform_set_oneshot_timer(timer_tick, timer->scheduled_time);
331     }
332 
333     /* we're done manipulating the timer queue */
334     spin_unlock(&timer_lock);
335 #endif
336 
337     return ret;
338 }
339 
timer_init(void)340 void timer_init(void)
341 {
342     timer_lock = SPIN_LOCK_INITIAL_VALUE;
343     for (uint i = 0; i < SMP_MAX_CPUS; i++) {
344         list_initialize(&timers[i].timer_queue);
345     }
346 #if !PLATFORM_HAS_DYNAMIC_TIMER
347     /* register for a periodic timer tick */
348     platform_set_periodic_timer(timer_tick, NULL, 10); /* 10ms */
349 #endif
350 }
351