1 /******************************************************************************
2 *
3 * Copyright 2014 Google, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18
19 #define LOG_TAG "bt_osi_alarm"
20
21 #include "osi/include/alarm.h"
22
23 #include <android_bluetooth_sysprop.h>
24 #include <base/cancelable_callback.h>
25 #include <bluetooth/log.h>
26 #include <fcntl.h>
27 #include <hardware/bluetooth.h>
28 #include <malloc.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <time.h>
33
34 #include <mutex>
35
36 #include "osi/include/allocator.h"
37 #include "osi/include/fixed_queue.h"
38 #include "osi/include/list.h"
39 #include "osi/include/thread.h"
40 #include "osi/include/wakelock.h"
41 #include "osi/semaphore.h"
42 #include "stack/include/main_thread.h"
43
44 using base::Bind;
45 using base::CancelableClosure;
46 using namespace bluetooth;
47
48 // Callback and timer threads should run at RT priority in order to ensure they
49 // meet audio deadlines. Use this priority for all audio/timer related thread.
50 static const int THREAD_RT_PRIORITY = 1;
51
52 typedef struct {
53 size_t count;
54 uint64_t total_ms;
55 uint64_t max_ms;
56 } stat_t;
57
58 // Alarm-related information and statistics
59 typedef struct {
60 const char* name;
61 size_t scheduled_count;
62 size_t canceled_count;
63 size_t rescheduled_count;
64 size_t total_updates;
65 uint64_t last_update_ms;
66 stat_t overdue_scheduling;
67 stat_t premature_scheduling;
68 } alarm_stats_t;
69
70 /* Wrapper around CancellableClosure that let it be embedded in structs, without
71 * need to define copy operator. */
72 struct CancelableClosureInStruct {
73 base::CancelableClosure i;
74
operator =CancelableClosureInStruct75 CancelableClosureInStruct& operator=(const CancelableClosureInStruct& in) {
76 if (!in.i.callback().is_null()) {
77 i.Reset(in.i.callback());
78 }
79 return *this;
80 }
81 };
82
83 struct alarm_t {
84 // The mutex is held while the callback for this alarm is being executed.
85 // It allows us to release the coarse-grained monitor lock while a
86 // potentially long-running callback is executing. |alarm_cancel| uses this
87 // mutex to provide a guarantee to its caller that the callback will not be
88 // in progress when it returns.
89 std::shared_ptr<std::recursive_mutex> callback_mutex;
90 uint64_t creation_time_ms;
91 uint64_t period_ms;
92 uint64_t deadline_ms;
93 uint64_t prev_deadline_ms; // Previous deadline - used for accounting of
94 // periodic timers
95 bool is_periodic;
96 fixed_queue_t* queue; // The processing queue to add this alarm to
97 alarm_callback_t callback;
98 void* data;
99 alarm_stats_t stats;
100
101 bool for_msg_loop; // True, if the alarm should be processed on message loop
102 CancelableClosureInStruct closure; // posted to message loop for processing
103 };
104
105 // If the next wakeup time is less than this threshold, we should acquire
106 // a wakelock instead of setting a wake alarm so we're not bouncing in
107 // and out of suspend frequently. This value is externally visible to allow
108 // unit tests to run faster. It should not be modified by production code.
109 int64_t TIMER_INTERVAL_FOR_WAKELOCK_IN_MS = 3000;
110 static const clockid_t CLOCK_ID = CLOCK_BOOTTIME;
111
112 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback
113 // functions execute serially and not concurrently. As a result, this mutex
114 // also protects the |alarms| list.
115 static std::mutex alarms_mutex;
116 static list_t* alarms;
117 static timer_t timer;
118 static timer_t wakeup_timer;
119 static bool timer_set;
120
121 // All alarm callbacks are dispatched from |dispatcher_thread|
122 static thread_t* dispatcher_thread;
123 static bool dispatcher_thread_active;
124 static semaphore_t* alarm_expired;
125
126 // Default alarm callback thread and queue
127 static thread_t* default_callback_thread;
128 static fixed_queue_t* default_callback_queue;
129
130 static alarm_t* alarm_new_internal(const char* name, bool is_periodic);
131 static bool lazy_initialize(void);
132 static uint64_t now_ms(void);
133 static void alarm_set_internal(alarm_t* alarm, uint64_t period_ms, alarm_callback_t cb, void* data,
134 fixed_queue_t* queue, bool for_msg_loop);
135 static void alarm_cancel_internal(alarm_t* alarm);
136 static void remove_pending_alarm(alarm_t* alarm);
137 static void schedule_next_instance(alarm_t* alarm);
138 static void reschedule_root_alarm(void);
139 static void alarm_queue_ready(fixed_queue_t* queue, void* context);
140 static void timer_callback(void* data);
141 static void callback_dispatch(void* context);
142 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer);
143 static void update_scheduling_stats(alarm_stats_t* stats, uint64_t now_ms, uint64_t deadline_ms);
144 // Registers |queue| for processing alarm callbacks on |thread|.
145 // |queue| may not be NULL. |thread| may not be NULL.
146 static void alarm_register_processing_queue(fixed_queue_t* queue, thread_t* thread);
147
update_stat(stat_t * stat,uint64_t delta_ms)148 static void update_stat(stat_t* stat, uint64_t delta_ms) {
149 if (stat->max_ms < delta_ms) {
150 stat->max_ms = delta_ms;
151 }
152 stat->total_ms += delta_ms;
153 stat->count++;
154 }
155
alarm_new(const char * name)156 alarm_t* alarm_new(const char* name) { return alarm_new_internal(name, false); }
157
alarm_new_periodic(const char * name)158 alarm_t* alarm_new_periodic(const char* name) { return alarm_new_internal(name, true); }
159
alarm_new_internal(const char * name,bool is_periodic)160 static alarm_t* alarm_new_internal(const char* name, bool is_periodic) {
161 // Make sure we have a list we can insert alarms into.
162 if (!alarms && !lazy_initialize()) {
163 log::fatal("initialization failed"); // if initialization failed, we
164 // should not continue
165 return NULL;
166 }
167
168 alarm_t* ret = static_cast<alarm_t*>(osi_calloc(sizeof(alarm_t)));
169
170 std::shared_ptr<std::recursive_mutex> ptr(new std::recursive_mutex());
171 ret->callback_mutex = ptr;
172 ret->is_periodic = is_periodic;
173 ret->stats.name = osi_strdup(name);
174
175 ret->for_msg_loop = false;
176 // placement new
177 new (&ret->closure) CancelableClosureInStruct();
178
179 // NOTE: The stats were reset by osi_calloc() above
180
181 return ret;
182 }
183
alarm_free(alarm_t * alarm)184 void alarm_free(alarm_t* alarm) {
185 if (!alarm) {
186 return;
187 }
188
189 alarm_cancel(alarm);
190
191 osi_free((void*)alarm->stats.name);
192 alarm->closure.~CancelableClosureInStruct();
193 alarm->callback_mutex.reset();
194 osi_free(alarm);
195 }
196
alarm_get_remaining_ms(const alarm_t * alarm)197 uint64_t alarm_get_remaining_ms(const alarm_t* alarm) {
198 log::assert_that(alarm != NULL, "assert failed: alarm != NULL");
199 uint64_t remaining_ms = 0;
200 uint64_t just_now_ms = now_ms();
201
202 std::lock_guard<std::mutex> lock(alarms_mutex);
203 if (alarm->deadline_ms > just_now_ms) {
204 remaining_ms = alarm->deadline_ms - just_now_ms;
205 }
206
207 return remaining_ms;
208 }
209
alarm_set(alarm_t * alarm,uint64_t interval_ms,alarm_callback_t cb,void * data)210 void alarm_set(alarm_t* alarm, uint64_t interval_ms, alarm_callback_t cb, void* data) {
211 alarm_set_internal(alarm, interval_ms, cb, data, default_callback_queue, false);
212 }
213
alarm_set_on_mloop(alarm_t * alarm,uint64_t interval_ms,alarm_callback_t cb,void * data)214 void alarm_set_on_mloop(alarm_t* alarm, uint64_t interval_ms, alarm_callback_t cb, void* data) {
215 alarm_set_internal(alarm, interval_ms, cb, data, NULL, true);
216 }
217
218 // Runs in exclusion with alarm_cancel and timer_callback.
alarm_set_internal(alarm_t * alarm,uint64_t period_ms,alarm_callback_t cb,void * data,fixed_queue_t * queue,bool for_msg_loop)219 static void alarm_set_internal(alarm_t* alarm, uint64_t period_ms, alarm_callback_t cb, void* data,
220 fixed_queue_t* queue, bool for_msg_loop) {
221 log::assert_that(alarms != NULL, "assert failed: alarms != NULL");
222 log::assert_that(alarm != NULL, "assert failed: alarm != NULL");
223 log::assert_that(cb != NULL, "assert failed: cb != NULL");
224
225 std::lock_guard<std::mutex> lock(alarms_mutex);
226
227 alarm->creation_time_ms = now_ms();
228 alarm->period_ms = period_ms;
229 alarm->queue = queue;
230 alarm->callback = cb;
231 alarm->data = data;
232 alarm->for_msg_loop = for_msg_loop;
233
234 schedule_next_instance(alarm);
235 alarm->stats.scheduled_count++;
236 }
237
alarm_cancel(alarm_t * alarm)238 void alarm_cancel(alarm_t* alarm) {
239 log::assert_that(alarms != NULL, "assert failed: alarms != NULL");
240 if (!alarm) {
241 return;
242 }
243
244 std::shared_ptr<std::recursive_mutex> local_mutex_ref;
245 {
246 std::lock_guard<std::mutex> lock(alarms_mutex);
247 local_mutex_ref = alarm->callback_mutex;
248 alarm_cancel_internal(alarm);
249 }
250
251 // If the callback for |alarm| is in progress, wait here until it completes.
252 std::lock_guard<std::recursive_mutex> lock(*local_mutex_ref);
253 }
254
255 // Internal implementation of canceling an alarm.
256 // The caller must hold the |alarms_mutex|
alarm_cancel_internal(alarm_t * alarm)257 static void alarm_cancel_internal(alarm_t* alarm) {
258 bool needs_reschedule = (!list_is_empty(alarms) && list_front(alarms) == alarm);
259
260 remove_pending_alarm(alarm);
261
262 alarm->deadline_ms = 0;
263 alarm->prev_deadline_ms = 0;
264 alarm->callback = NULL;
265 alarm->data = NULL;
266 alarm->stats.canceled_count++;
267 alarm->queue = NULL;
268
269 if (needs_reschedule) {
270 reschedule_root_alarm();
271 }
272 }
273
alarm_is_scheduled(const alarm_t * alarm)274 bool alarm_is_scheduled(const alarm_t* alarm) {
275 if ((alarms == NULL) || (alarm == NULL)) {
276 return false;
277 }
278 return alarm->callback != NULL;
279 }
280
alarm_cleanup(void)281 void alarm_cleanup(void) {
282 // If lazy_initialize never ran there is nothing else to do
283 if (!alarms) {
284 return;
285 }
286
287 dispatcher_thread_active = false;
288 semaphore_post(alarm_expired);
289 thread_free(dispatcher_thread);
290 dispatcher_thread = NULL;
291
292 std::lock_guard<std::mutex> lock(alarms_mutex);
293
294 fixed_queue_free(default_callback_queue, NULL);
295 default_callback_queue = NULL;
296 thread_free(default_callback_thread);
297 default_callback_thread = NULL;
298
299 timer_delete(wakeup_timer);
300 timer_delete(timer);
301 semaphore_free(alarm_expired);
302 alarm_expired = NULL;
303
304 list_free(alarms);
305 alarms = NULL;
306 }
307
lazy_initialize(void)308 static bool lazy_initialize(void) {
309 log::assert_that(alarms == NULL, "assert failed: alarms == NULL");
310
311 // timer_t doesn't have an invalid value so we must track whether
312 // the |timer| variable is valid ourselves.
313 bool timer_initialized = false;
314 bool wakeup_timer_initialized = false;
315
316 // some platforms are not wired up to be woken up by the controller.
317 // on those platforms, if we go to sleep with a timer armed, it will
318 // continue counting during sleep. to prevent unwanted timer fires on
319 // those platforms, use CLOCK_MONOTONIC and don't count up during sleep.
320 bool wakeup_supported = android::sysprop::bluetooth::hardware::wakeup_supported().value_or(true);
321 clockid_t alarm_clockid = wakeup_supported ? CLOCK_BOOTTIME_ALARM : CLOCK_MONOTONIC;
322
323 std::lock_guard<std::mutex> lock(alarms_mutex);
324
325 alarms = list_new(NULL);
326 if (!alarms) {
327 log::error("unable to allocate alarm list.");
328 goto error;
329 }
330
331 if (!timer_create_internal(CLOCK_ID, &timer)) {
332 goto error;
333 }
334 timer_initialized = true;
335
336 if (!timer_create_internal(alarm_clockid, &wakeup_timer)) {
337 if (!timer_create_internal(CLOCK_BOOTTIME, &wakeup_timer)) {
338 goto error;
339 }
340 }
341 wakeup_timer_initialized = true;
342
343 alarm_expired = semaphore_new(0);
344 if (!alarm_expired) {
345 log::error("unable to create alarm expired semaphore");
346 goto error;
347 }
348
349 default_callback_thread = thread_new_sized("alarm_default_callbacks", SIZE_MAX);
350 if (default_callback_thread == NULL) {
351 log::error("unable to create default alarm callbacks thread.");
352 goto error;
353 }
354 thread_set_rt_priority(default_callback_thread, THREAD_RT_PRIORITY);
355 default_callback_queue = fixed_queue_new(SIZE_MAX);
356 if (default_callback_queue == NULL) {
357 log::error("unable to create default alarm callbacks queue.");
358 goto error;
359 }
360 alarm_register_processing_queue(default_callback_queue, default_callback_thread);
361
362 dispatcher_thread_active = true;
363 dispatcher_thread = thread_new("alarm_dispatcher");
364 if (!dispatcher_thread) {
365 log::error("unable to create alarm callback thread.");
366 goto error;
367 }
368 thread_set_rt_priority(dispatcher_thread, THREAD_RT_PRIORITY);
369 thread_post(dispatcher_thread, callback_dispatch, NULL);
370 return true;
371
372 error:
373 fixed_queue_free(default_callback_queue, NULL);
374 default_callback_queue = NULL;
375 thread_free(default_callback_thread);
376 default_callback_thread = NULL;
377
378 thread_free(dispatcher_thread);
379 dispatcher_thread = NULL;
380
381 dispatcher_thread_active = false;
382
383 semaphore_free(alarm_expired);
384 alarm_expired = NULL;
385
386 if (wakeup_timer_initialized) {
387 timer_delete(wakeup_timer);
388 }
389
390 if (timer_initialized) {
391 timer_delete(timer);
392 }
393
394 list_free(alarms);
395 alarms = NULL;
396
397 return false;
398 }
399
now_ms(void)400 static uint64_t now_ms(void) {
401 log::assert_that(alarms != NULL, "assert failed: alarms != NULL");
402
403 struct timespec ts;
404 if (clock_gettime(CLOCK_ID, &ts) == -1) {
405 log::error("unable to get current time: {}", strerror(errno));
406 return 0;
407 }
408
409 return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL);
410 }
411
412 // Remove alarm from internal alarm list and the processing queue
413 // The caller must hold the |alarms_mutex|
remove_pending_alarm(alarm_t * alarm)414 static void remove_pending_alarm(alarm_t* alarm) {
415 list_remove(alarms, alarm);
416
417 if (alarm->for_msg_loop) {
418 alarm->closure.i.Cancel();
419 } else {
420 while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) {
421 // Remove all repeated alarm instances from the queue.
422 // NOTE: We are defensive here - we shouldn't have repeated alarm
423 // instances
424 }
425 }
426 }
427
428 // Must be called with |alarms_mutex| held
schedule_next_instance(alarm_t * alarm)429 static void schedule_next_instance(alarm_t* alarm) {
430 // If the alarm is currently set and it's at the start of the list,
431 // we'll need to re-schedule since we've adjusted the earliest deadline.
432 bool needs_reschedule = (!list_is_empty(alarms) && list_front(alarms) == alarm);
433 if (alarm->callback) {
434 remove_pending_alarm(alarm);
435 }
436
437 // Calculate the next deadline for this alarm
438 uint64_t just_now_ms = now_ms();
439 uint64_t ms_into_period = 0;
440 if ((alarm->is_periodic) && (alarm->period_ms != 0)) {
441 ms_into_period = ((just_now_ms - alarm->creation_time_ms) % alarm->period_ms);
442 }
443 alarm->deadline_ms = just_now_ms + (alarm->period_ms - ms_into_period);
444
445 // Add it into the timer list sorted by deadline (earliest deadline first).
446 if (list_is_empty(alarms) || ((alarm_t*)list_front(alarms))->deadline_ms > alarm->deadline_ms) {
447 list_prepend(alarms, alarm);
448 } else {
449 for (list_node_t* node = list_begin(alarms); node != list_end(alarms); node = list_next(node)) {
450 list_node_t* next = list_next(node);
451 if (next == list_end(alarms) ||
452 ((alarm_t*)list_node(next))->deadline_ms > alarm->deadline_ms) {
453 list_insert_after(alarms, node, alarm);
454 break;
455 }
456 }
457 }
458
459 // If the new alarm has the earliest deadline, we need to re-evaluate our
460 // schedule.
461 if (needs_reschedule || (!list_is_empty(alarms) && list_front(alarms) == alarm)) {
462 reschedule_root_alarm();
463 }
464 }
465
466 // NOTE: must be called with |alarms_mutex| held
reschedule_root_alarm(void)467 static void reschedule_root_alarm(void) {
468 log::assert_that(alarms != NULL, "assert failed: alarms != NULL");
469
470 const bool timer_was_set = timer_set;
471 alarm_t* next;
472 int64_t next_expiration;
473
474 // If used in a zeroed state, disarms the timer.
475 struct itimerspec timer_time;
476 memset(&timer_time, 0, sizeof(timer_time));
477
478 if (list_is_empty(alarms)) {
479 goto done;
480 }
481
482 next = static_cast<alarm_t*>(list_front(alarms));
483 next_expiration = next->deadline_ms - now_ms();
484 if (next_expiration < TIMER_INTERVAL_FOR_WAKELOCK_IN_MS) {
485 if (!timer_set) {
486 if (!wakelock_acquire()) {
487 log::error("unable to acquire wake lock");
488 }
489 }
490
491 timer_time.it_value.tv_sec = (next->deadline_ms / 1000);
492 timer_time.it_value.tv_nsec = (next->deadline_ms % 1000) * 1000000LL;
493
494 // It is entirely unsafe to call timer_settime(2) with a zeroed timerspec
495 // for timers with *_ALARM clock IDs. Although the man page states that the
496 // timer would be canceled, the current behavior (as of Linux kernel 3.17)
497 // is that the callback is issued immediately. The only way to cancel an
498 // *_ALARM timer is to delete the timer. But unfortunately, deleting and
499 // re-creating a timer is rather expensive; every timer_create(2) spawns a
500 // new thread. So we simply set the timer to fire at the largest possible
501 // time.
502 //
503 // If we've reached this code path, we're going to grab a wake lock and
504 // wait for the next timer to fire. In that case, there's no reason to
505 // have a pending wakeup timer so we simply cancel it.
506 struct itimerspec end_of_time;
507 memset(&end_of_time, 0, sizeof(end_of_time));
508 end_of_time.it_value.tv_sec = (time_t)(1LL << (sizeof(time_t) * 8 - 2));
509 timer_settime(wakeup_timer, TIMER_ABSTIME, &end_of_time, NULL);
510 } else {
511 // WARNING: do not attempt to use relative timers with *_ALARM clock IDs
512 // in kernels before 3.17 unless you have the following patch:
513 // https://lkml.org/lkml/2014/7/7/576
514 struct itimerspec wakeup_time;
515 memset(&wakeup_time, 0, sizeof(wakeup_time));
516
517 wakeup_time.it_value.tv_sec = (next->deadline_ms / 1000);
518 wakeup_time.it_value.tv_nsec = (next->deadline_ms % 1000) * 1000000LL;
519 if (timer_settime(wakeup_timer, TIMER_ABSTIME, &wakeup_time, NULL) == -1) {
520 log::error("unable to set wakeup timer: {}", strerror(errno));
521 }
522 }
523
524 done:
525 timer_set = timer_time.it_value.tv_sec != 0 || timer_time.it_value.tv_nsec != 0;
526 if (timer_was_set && !timer_set) {
527 wakelock_release();
528 }
529
530 if (timer_settime(timer, TIMER_ABSTIME, &timer_time, NULL) == -1) {
531 log::error("unable to set timer: {}", strerror(errno));
532 }
533
534 // If next expiration was in the past (e.g. short timer that got context
535 // switched) then the timer might have diarmed itself. Detect this case and
536 // work around it by manually signalling the |alarm_expired| semaphore.
537 //
538 // It is possible that the timer was actually super short (a few
539 // milliseconds) and the timer expired normally before we called
540 // |timer_gettime|. Worst case, |alarm_expired| is signaled twice for that
541 // alarm. Nothing bad should happen in that case though since the callback
542 // dispatch function checks to make sure the timer at the head of the list
543 // actually expired.
544 if (timer_set) {
545 struct itimerspec time_to_expire;
546 timer_gettime(timer, &time_to_expire);
547 if (time_to_expire.it_value.tv_sec == 0 && time_to_expire.it_value.tv_nsec == 0) {
548 log::info("alarm expiration too close for posix timers, switching to guns");
549 semaphore_post(alarm_expired);
550 }
551 }
552 }
553
alarm_register_processing_queue(fixed_queue_t * queue,thread_t * thread)554 static void alarm_register_processing_queue(fixed_queue_t* queue, thread_t* thread) {
555 log::assert_that(queue != NULL, "assert failed: queue != NULL");
556 log::assert_that(thread != NULL, "assert failed: thread != NULL");
557
558 fixed_queue_register_dequeue(queue, thread_get_reactor(thread), alarm_queue_ready, NULL);
559 }
560
alarm_ready_generic(alarm_t * alarm,std::unique_lock<std::mutex> & lock)561 static void alarm_ready_generic(alarm_t* alarm, std::unique_lock<std::mutex>& lock) {
562 if (alarm == NULL) {
563 return; // The alarm was probably canceled
564 }
565
566 //
567 // If the alarm is not periodic, we've fully serviced it now, and can reset
568 // some of its internal state. This is useful to distinguish between expired
569 // alarms and active ones.
570 //
571 if (!alarm->callback) {
572 log::fatal("timer callback is NULL! Name={}", alarm->stats.name);
573 }
574 alarm_callback_t callback = alarm->callback;
575 void* data = alarm->data;
576 uint64_t deadline_ms = alarm->deadline_ms;
577 if (alarm->is_periodic) {
578 // The periodic alarm has been rescheduled and alarm->deadline has been
579 // updated, hence we need to use the previous deadline.
580 deadline_ms = alarm->prev_deadline_ms;
581 } else {
582 alarm->deadline_ms = 0;
583 alarm->callback = NULL;
584 alarm->data = NULL;
585 alarm->queue = NULL;
586 }
587
588 // Increment the reference count of the mutex so it doesn't get freed
589 // before the callback gets finished executing.
590 std::shared_ptr<std::recursive_mutex> local_mutex_ref = alarm->callback_mutex;
591 std::lock_guard<std::recursive_mutex> cb_lock(*local_mutex_ref);
592 lock.unlock();
593
594 // Update the statistics
595 update_scheduling_stats(&alarm->stats, now_ms(), deadline_ms);
596
597 // NOTE: Do NOT access "alarm" after the callback, as a safety precaution
598 // in case the callback itself deleted the alarm.
599 callback(data);
600 }
601
alarm_ready_mloop(alarm_t * alarm)602 static void alarm_ready_mloop(alarm_t* alarm) {
603 std::unique_lock<std::mutex> lock(alarms_mutex);
604 alarm_ready_generic(alarm, lock);
605 }
606
alarm_queue_ready(fixed_queue_t * queue,void *)607 static void alarm_queue_ready(fixed_queue_t* queue, void* /* context */) {
608 log::assert_that(queue != NULL, "assert failed: queue != NULL");
609
610 std::unique_lock<std::mutex> lock(alarms_mutex);
611 alarm_t* alarm = (alarm_t*)fixed_queue_try_dequeue(queue);
612 alarm_ready_generic(alarm, lock);
613 }
614
615 // Callback function for wake alarms and our posix timer
timer_callback(void *)616 static void timer_callback(void* /* ptr */) { semaphore_post(alarm_expired); }
617
618 // Function running on |dispatcher_thread| that performs the following:
619 // (1) Receives a signal using |alarm_exired| that the alarm has expired
620 // (2) Dispatches the alarm callback for processing by the corresponding
621 // thread for that alarm.
callback_dispatch(void *)622 static void callback_dispatch(void* /* context */) {
623 while (true) {
624 semaphore_wait(alarm_expired);
625 if (!dispatcher_thread_active) {
626 break;
627 }
628
629 std::lock_guard<std::mutex> lock(alarms_mutex);
630 alarm_t* alarm;
631
632 // Take into account that the alarm may get cancelled before we get to it.
633 // We're done here if there are no alarms or the alarm at the front is in
634 // the future. Exit right away since there's nothing left to do.
635 if (list_is_empty(alarms) ||
636 (alarm = static_cast<alarm_t*>(list_front(alarms)))->deadline_ms > now_ms()) {
637 reschedule_root_alarm();
638 continue;
639 }
640
641 list_remove(alarms, alarm);
642
643 if (alarm->is_periodic) {
644 alarm->prev_deadline_ms = alarm->deadline_ms;
645 schedule_next_instance(alarm);
646 alarm->stats.rescheduled_count++;
647 }
648 reschedule_root_alarm();
649
650 // Enqueue the alarm for processing
651 if (alarm->for_msg_loop) {
652 if (!get_main_thread()) {
653 log::error("message loop already NULL. Alarm: {}", alarm->stats.name);
654 continue;
655 }
656
657 alarm->closure.i.Reset(Bind(alarm_ready_mloop, alarm));
658 get_main_thread()->DoInThread(FROM_HERE, alarm->closure.i.callback());
659 } else {
660 fixed_queue_enqueue(alarm->queue, alarm);
661 }
662 }
663
664 log::info("Callback thread exited");
665 }
666
timer_create_internal(const clockid_t clock_id,timer_t * timer)667 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer) {
668 log::assert_that(timer != NULL, "assert failed: timer != NULL");
669
670 struct sigevent sigevent;
671 // create timer with RT priority thread
672 pthread_attr_t thread_attr;
673 pthread_attr_init(&thread_attr);
674 pthread_attr_setschedpolicy(&thread_attr, SCHED_FIFO);
675 struct sched_param param;
676 param.sched_priority = THREAD_RT_PRIORITY;
677 pthread_attr_setschedparam(&thread_attr, ¶m);
678
679 memset(&sigevent, 0, sizeof(sigevent));
680 sigevent.sigev_notify = SIGEV_THREAD;
681 sigevent.sigev_notify_function = (void (*)(union sigval))timer_callback;
682 sigevent.sigev_notify_attributes = &thread_attr;
683 if (timer_create(clock_id, &sigevent, timer) == -1) {
684 log::error("unable to create timer with clock {}: {}", clock_id, strerror(errno));
685 if (clock_id == CLOCK_BOOTTIME_ALARM) {
686 log::error(
687 "The kernel might not have support for "
688 "timer_create(CLOCK_BOOTTIME_ALARM): "
689 "https://lwn.net/Articles/429925/");
690 log::error(
691 "See following patches: "
692 "https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/log/"
693 "?qt=grep&q=CLOCK_BOOTTIME_ALARM");
694 }
695 return false;
696 }
697
698 return true;
699 }
700
update_scheduling_stats(alarm_stats_t * stats,uint64_t now_ms,uint64_t deadline_ms)701 static void update_scheduling_stats(alarm_stats_t* stats, uint64_t now_ms, uint64_t deadline_ms) {
702 stats->total_updates++;
703 stats->last_update_ms = now_ms;
704
705 if (deadline_ms < now_ms) {
706 // Overdue scheduling
707 uint64_t delta_ms = now_ms - deadline_ms;
708 update_stat(&stats->overdue_scheduling, delta_ms);
709 } else if (deadline_ms > now_ms) {
710 // Premature scheduling
711 uint64_t delta_ms = deadline_ms - now_ms;
712 update_stat(&stats->premature_scheduling, delta_ms);
713 }
714 }
715
dump_stat(int fd,stat_t * stat,const char * description)716 static void dump_stat(int fd, stat_t* stat, const char* description) {
717 uint64_t average_time_ms = 0;
718 if (stat->count != 0) {
719 average_time_ms = stat->total_ms / stat->count;
720 }
721
722 dprintf(fd, "%-51s: %llu / %llu / %llu\n", description, (unsigned long long)stat->total_ms,
723 (unsigned long long)stat->max_ms, (unsigned long long)average_time_ms);
724 }
725
alarm_debug_dump(int fd)726 void alarm_debug_dump(int fd) {
727 dprintf(fd, "\nBluetooth Alarms Statistics:\n");
728
729 std::lock_guard<std::mutex> lock(alarms_mutex);
730
731 if (alarms == NULL) {
732 dprintf(fd, " None\n");
733 return;
734 }
735
736 uint64_t just_now_ms = now_ms();
737
738 dprintf(fd, " Total Alarms: %zu\n\n", list_length(alarms));
739
740 // Dump info for each alarm
741 for (list_node_t* node = list_begin(alarms); node != list_end(alarms); node = list_next(node)) {
742 alarm_t* alarm = (alarm_t*)list_node(node);
743 alarm_stats_t* stats = &alarm->stats;
744
745 dprintf(fd, " Alarm : %s (%s)\n", stats->name, (alarm->is_periodic) ? "PERIODIC" : "SINGLE");
746
747 dprintf(fd, "%-51s: %zu / %zu / %zu / %zu\n", " Action counts (sched/resched/exec/cancel)",
748 stats->scheduled_count, stats->rescheduled_count, stats->total_updates,
749 stats->canceled_count);
750
751 dprintf(fd, "%-51s: %zu / %zu\n", " Deviation counts (overdue/premature)",
752 stats->overdue_scheduling.count, stats->premature_scheduling.count);
753
754 dprintf(fd, "%-51s: %llu / %llu / %lld\n", " Time in ms (since creation/interval/remaining)",
755 (unsigned long long)(just_now_ms - alarm->creation_time_ms),
756 (unsigned long long)alarm->period_ms, (long long)(alarm->deadline_ms - just_now_ms));
757
758 dump_stat(fd, &stats->overdue_scheduling, " Overdue scheduling time in ms (total/max/avg)");
759
760 dump_stat(fd, &stats->premature_scheduling,
761 " Premature scheduling time in ms (total/max/avg)");
762
763 dprintf(fd, "\n");
764 }
765 }
766