1 /******************************************************************************
2 *
3 * Copyright 2014 Google, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18
19 #include "internal_include/bt_target.h"
20
21 #define LOG_TAG "bt_osi_alarm"
22
23 #include "osi/include/alarm.h"
24
25 #include <base/cancelable_callback.h>
26 #include <base/logging.h>
27 #include <base/message_loop/message_loop.h>
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <inttypes.h>
31 #include <malloc.h>
32 #include <pthread.h>
33 #include <signal.h>
34 #include <string.h>
35 #include <time.h>
36
37 #include <hardware/bluetooth.h>
38
39 #include <mutex>
40
41 #include "osi/include/allocator.h"
42 #include "osi/include/fixed_queue.h"
43 #include "osi/include/list.h"
44 #include "osi/include/log.h"
45 #include "osi/include/osi.h"
46 #include "osi/include/semaphore.h"
47 #include "osi/include/thread.h"
48 #include "osi/include/wakelock.h"
49 #include "stack/include/btu.h"
50
51 using base::Bind;
52 using base::CancelableClosure;
53 using base::MessageLoop;
54
55 // Callback and timer threads should run at RT priority in order to ensure they
56 // meet audio deadlines. Use this priority for all audio/timer related thread.
57 static const int THREAD_RT_PRIORITY = 1;
58
59 typedef struct {
60 size_t count;
61 uint64_t total_ms;
62 uint64_t max_ms;
63 } stat_t;
64
65 // Alarm-related information and statistics
66 typedef struct {
67 const char* name;
68 size_t scheduled_count;
69 size_t canceled_count;
70 size_t rescheduled_count;
71 size_t total_updates;
72 uint64_t last_update_ms;
73 stat_t overdue_scheduling;
74 stat_t premature_scheduling;
75 } alarm_stats_t;
76
77 /* Wrapper around CancellableClosure that let it be embedded in structs, without
78 * need to define copy operator. */
79 struct CancelableClosureInStruct {
80 base::CancelableClosure i;
81
operator =CancelableClosureInStruct82 CancelableClosureInStruct& operator=(const CancelableClosureInStruct& in) {
83 if (!in.i.callback().is_null()) i.Reset(in.i.callback());
84 return *this;
85 }
86 };
87
88 struct alarm_t {
89 // The mutex is held while the callback for this alarm is being executed.
90 // It allows us to release the coarse-grained monitor lock while a
91 // potentially long-running callback is executing. |alarm_cancel| uses this
92 // mutex to provide a guarantee to its caller that the callback will not be
93 // in progress when it returns.
94 std::shared_ptr<std::recursive_mutex> callback_mutex;
95 uint64_t creation_time_ms;
96 uint64_t period_ms;
97 uint64_t deadline_ms;
98 uint64_t prev_deadline_ms; // Previous deadline - used for accounting of
99 // periodic timers
100 bool is_periodic;
101 fixed_queue_t* queue; // The processing queue to add this alarm to
102 alarm_callback_t callback;
103 void* data;
104 alarm_stats_t stats;
105
106 bool for_msg_loop; // True, if the alarm should be processed on message loop
107 CancelableClosureInStruct closure; // posted to message loop for processing
108 };
109
110 // If the next wakeup time is less than this threshold, we should acquire
111 // a wakelock instead of setting a wake alarm so we're not bouncing in
112 // and out of suspend frequently. This value is externally visible to allow
113 // unit tests to run faster. It should not be modified by production code.
114 int64_t TIMER_INTERVAL_FOR_WAKELOCK_IN_MS = 3000;
115 static const clockid_t CLOCK_ID = CLOCK_BOOTTIME;
116
117 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback
118 // functions execute serially and not concurrently. As a result, this mutex
119 // also protects the |alarms| list.
120 static std::mutex alarms_mutex;
121 static list_t* alarms;
122 static timer_t timer;
123 static timer_t wakeup_timer;
124 static bool timer_set;
125
126 // All alarm callbacks are dispatched from |dispatcher_thread|
127 static thread_t* dispatcher_thread;
128 static bool dispatcher_thread_active;
129 static semaphore_t* alarm_expired;
130
131 // Default alarm callback thread and queue
132 static thread_t* default_callback_thread;
133 static fixed_queue_t* default_callback_queue;
134
135 static alarm_t* alarm_new_internal(const char* name, bool is_periodic);
136 static bool lazy_initialize(void);
137 static uint64_t now_ms(void);
138 static void alarm_set_internal(alarm_t* alarm, uint64_t period_ms,
139 alarm_callback_t cb, void* data,
140 fixed_queue_t* queue, bool for_msg_loop);
141 static void alarm_cancel_internal(alarm_t* alarm);
142 static void remove_pending_alarm(alarm_t* alarm);
143 static void schedule_next_instance(alarm_t* alarm);
144 static void reschedule_root_alarm(void);
145 static void alarm_queue_ready(fixed_queue_t* queue, void* context);
146 static void timer_callback(void* data);
147 static void callback_dispatch(void* context);
148 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer);
149 static void update_scheduling_stats(alarm_stats_t* stats, uint64_t now_ms,
150 uint64_t deadline_ms);
151 // Registers |queue| for processing alarm callbacks on |thread|.
152 // |queue| may not be NULL. |thread| may not be NULL.
153 static void alarm_register_processing_queue(fixed_queue_t* queue,
154 thread_t* thread);
155
update_stat(stat_t * stat,uint64_t delta_ms)156 static void update_stat(stat_t* stat, uint64_t delta_ms) {
157 if (stat->max_ms < delta_ms) stat->max_ms = delta_ms;
158 stat->total_ms += delta_ms;
159 stat->count++;
160 }
161
alarm_new(const char * name)162 alarm_t* alarm_new(const char* name) { return alarm_new_internal(name, false); }
163
alarm_new_periodic(const char * name)164 alarm_t* alarm_new_periodic(const char* name) {
165 return alarm_new_internal(name, true);
166 }
167
alarm_new_internal(const char * name,bool is_periodic)168 static alarm_t* alarm_new_internal(const char* name, bool is_periodic) {
169 // Make sure we have a list we can insert alarms into.
170 if (!alarms && !lazy_initialize()) {
171 CHECK(false); // if initialization failed, we should not continue
172 return NULL;
173 }
174
175 alarm_t* ret = static_cast<alarm_t*>(osi_calloc(sizeof(alarm_t)));
176
177 std::shared_ptr<std::recursive_mutex> ptr(new std::recursive_mutex());
178 ret->callback_mutex = ptr;
179 ret->is_periodic = is_periodic;
180 ret->stats.name = osi_strdup(name);
181
182 ret->for_msg_loop = false;
183 // placement new
184 new (&ret->closure) CancelableClosureInStruct();
185
186 // NOTE: The stats were reset by osi_calloc() above
187
188 return ret;
189 }
190
alarm_free(alarm_t * alarm)191 void alarm_free(alarm_t* alarm) {
192 if (!alarm) return;
193
194 alarm_cancel(alarm);
195
196 osi_free((void*)alarm->stats.name);
197 alarm->closure.~CancelableClosureInStruct();
198 osi_free(alarm);
199 }
200
alarm_get_remaining_ms(const alarm_t * alarm)201 uint64_t alarm_get_remaining_ms(const alarm_t* alarm) {
202 CHECK(alarm != NULL);
203 uint64_t remaining_ms = 0;
204 uint64_t just_now_ms = now_ms();
205
206 std::lock_guard<std::mutex> lock(alarms_mutex);
207 if (alarm->deadline_ms > just_now_ms)
208 remaining_ms = alarm->deadline_ms - just_now_ms;
209
210 return remaining_ms;
211 }
212
alarm_set(alarm_t * alarm,uint64_t interval_ms,alarm_callback_t cb,void * data)213 void alarm_set(alarm_t* alarm, uint64_t interval_ms, alarm_callback_t cb,
214 void* data) {
215 alarm_set_internal(alarm, interval_ms, cb, data, default_callback_queue,
216 false);
217 }
218
alarm_set_on_mloop(alarm_t * alarm,uint64_t interval_ms,alarm_callback_t cb,void * data)219 void alarm_set_on_mloop(alarm_t* alarm, uint64_t interval_ms,
220 alarm_callback_t cb, void* data) {
221 alarm_set_internal(alarm, interval_ms, cb, data, NULL, true);
222 }
223
224 // Runs in exclusion with alarm_cancel and timer_callback.
alarm_set_internal(alarm_t * alarm,uint64_t period_ms,alarm_callback_t cb,void * data,fixed_queue_t * queue,bool for_msg_loop)225 static void alarm_set_internal(alarm_t* alarm, uint64_t period_ms,
226 alarm_callback_t cb, void* data,
227 fixed_queue_t* queue, bool for_msg_loop) {
228 CHECK(alarms != NULL);
229 CHECK(alarm != NULL);
230 CHECK(cb != NULL);
231
232 std::lock_guard<std::mutex> lock(alarms_mutex);
233
234 alarm->creation_time_ms = now_ms();
235 alarm->period_ms = period_ms;
236 alarm->queue = queue;
237 alarm->callback = cb;
238 alarm->data = data;
239 alarm->for_msg_loop = for_msg_loop;
240
241 schedule_next_instance(alarm);
242 alarm->stats.scheduled_count++;
243 }
244
alarm_cancel(alarm_t * alarm)245 void alarm_cancel(alarm_t* alarm) {
246 CHECK(alarms != NULL);
247 if (!alarm) return;
248
249 std::shared_ptr<std::recursive_mutex> local_mutex_ref;
250 {
251 std::lock_guard<std::mutex> lock(alarms_mutex);
252 local_mutex_ref = alarm->callback_mutex;
253 alarm_cancel_internal(alarm);
254 }
255
256 // If the callback for |alarm| is in progress, wait here until it completes.
257 std::lock_guard<std::recursive_mutex> lock(*local_mutex_ref);
258 }
259
260 // Internal implementation of canceling an alarm.
261 // The caller must hold the |alarms_mutex|
alarm_cancel_internal(alarm_t * alarm)262 static void alarm_cancel_internal(alarm_t* alarm) {
263 bool needs_reschedule =
264 (!list_is_empty(alarms) && list_front(alarms) == alarm);
265
266 remove_pending_alarm(alarm);
267
268 alarm->deadline_ms = 0;
269 alarm->prev_deadline_ms = 0;
270 alarm->callback = NULL;
271 alarm->data = NULL;
272 alarm->stats.canceled_count++;
273 alarm->queue = NULL;
274
275 if (needs_reschedule) reschedule_root_alarm();
276 }
277
alarm_is_scheduled(const alarm_t * alarm)278 bool alarm_is_scheduled(const alarm_t* alarm) {
279 if ((alarms == NULL) || (alarm == NULL)) return false;
280 return (alarm->callback != NULL);
281 }
282
alarm_cleanup(void)283 void alarm_cleanup(void) {
284 // If lazy_initialize never ran there is nothing else to do
285 if (!alarms) return;
286
287 dispatcher_thread_active = false;
288 semaphore_post(alarm_expired);
289 thread_free(dispatcher_thread);
290 dispatcher_thread = NULL;
291
292 std::lock_guard<std::mutex> lock(alarms_mutex);
293
294 fixed_queue_free(default_callback_queue, NULL);
295 default_callback_queue = NULL;
296 thread_free(default_callback_thread);
297 default_callback_thread = NULL;
298
299 timer_delete(wakeup_timer);
300 timer_delete(timer);
301 semaphore_free(alarm_expired);
302 alarm_expired = NULL;
303
304 list_free(alarms);
305 alarms = NULL;
306 }
307
lazy_initialize(void)308 static bool lazy_initialize(void) {
309 CHECK(alarms == NULL);
310
311 // timer_t doesn't have an invalid value so we must track whether
312 // the |timer| variable is valid ourselves.
313 bool timer_initialized = false;
314 bool wakeup_timer_initialized = false;
315
316 std::lock_guard<std::mutex> lock(alarms_mutex);
317
318 alarms = list_new(NULL);
319 if (!alarms) {
320 LOG_ERROR("%s unable to allocate alarm list.", __func__);
321 goto error;
322 }
323
324 if (!timer_create_internal(CLOCK_ID, &timer)) goto error;
325 timer_initialized = true;
326
327 if (!timer_create_internal(CLOCK_BOOTTIME_ALARM, &wakeup_timer)) {
328 if (!timer_create_internal(CLOCK_BOOTTIME, &wakeup_timer)) {
329 goto error;
330 }
331 }
332 wakeup_timer_initialized = true;
333
334 alarm_expired = semaphore_new(0);
335 if (!alarm_expired) {
336 LOG_ERROR("%s unable to create alarm expired semaphore", __func__);
337 goto error;
338 }
339
340 default_callback_thread =
341 thread_new_sized("alarm_default_callbacks", SIZE_MAX);
342 if (default_callback_thread == NULL) {
343 LOG_ERROR("%s unable to create default alarm callbacks thread.", __func__);
344 goto error;
345 }
346 thread_set_rt_priority(default_callback_thread, THREAD_RT_PRIORITY);
347 default_callback_queue = fixed_queue_new(SIZE_MAX);
348 if (default_callback_queue == NULL) {
349 LOG_ERROR("%s unable to create default alarm callbacks queue.", __func__);
350 goto error;
351 }
352 alarm_register_processing_queue(default_callback_queue,
353 default_callback_thread);
354
355 dispatcher_thread_active = true;
356 dispatcher_thread = thread_new("alarm_dispatcher");
357 if (!dispatcher_thread) {
358 LOG_ERROR("%s unable to create alarm callback thread.", __func__);
359 goto error;
360 }
361 thread_set_rt_priority(dispatcher_thread, THREAD_RT_PRIORITY);
362 thread_post(dispatcher_thread, callback_dispatch, NULL);
363 return true;
364
365 error:
366 fixed_queue_free(default_callback_queue, NULL);
367 default_callback_queue = NULL;
368 thread_free(default_callback_thread);
369 default_callback_thread = NULL;
370
371 thread_free(dispatcher_thread);
372 dispatcher_thread = NULL;
373
374 dispatcher_thread_active = false;
375
376 semaphore_free(alarm_expired);
377 alarm_expired = NULL;
378
379 if (wakeup_timer_initialized) timer_delete(wakeup_timer);
380
381 if (timer_initialized) timer_delete(timer);
382
383 list_free(alarms);
384 alarms = NULL;
385
386 return false;
387 }
388
now_ms(void)389 static uint64_t now_ms(void) {
390 CHECK(alarms != NULL);
391
392 struct timespec ts;
393 if (clock_gettime(CLOCK_ID, &ts) == -1) {
394 LOG_ERROR("%s unable to get current time: %s", __func__, strerror(errno));
395 return 0;
396 }
397
398 return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL);
399 }
400
401 // Remove alarm from internal alarm list and the processing queue
402 // The caller must hold the |alarms_mutex|
remove_pending_alarm(alarm_t * alarm)403 static void remove_pending_alarm(alarm_t* alarm) {
404 list_remove(alarms, alarm);
405
406 if (alarm->for_msg_loop) {
407 alarm->closure.i.Cancel();
408 } else {
409 while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) {
410 // Remove all repeated alarm instances from the queue.
411 // NOTE: We are defensive here - we shouldn't have repeated alarm
412 // instances
413 }
414 }
415 }
416
417 // Must be called with |alarms_mutex| held
schedule_next_instance(alarm_t * alarm)418 static void schedule_next_instance(alarm_t* alarm) {
419 // If the alarm is currently set and it's at the start of the list,
420 // we'll need to re-schedule since we've adjusted the earliest deadline.
421 bool needs_reschedule =
422 (!list_is_empty(alarms) && list_front(alarms) == alarm);
423 if (alarm->callback) remove_pending_alarm(alarm);
424
425 // Calculate the next deadline for this alarm
426 uint64_t just_now_ms = now_ms();
427 uint64_t ms_into_period = 0;
428 if ((alarm->is_periodic) && (alarm->period_ms != 0))
429 ms_into_period =
430 ((just_now_ms - alarm->creation_time_ms) % alarm->period_ms);
431 alarm->deadline_ms = just_now_ms + (alarm->period_ms - ms_into_period);
432
433 // Add it into the timer list sorted by deadline (earliest deadline first).
434 if (list_is_empty(alarms) ||
435 ((alarm_t*)list_front(alarms))->deadline_ms > alarm->deadline_ms) {
436 list_prepend(alarms, alarm);
437 } else {
438 for (list_node_t* node = list_begin(alarms); node != list_end(alarms);
439 node = list_next(node)) {
440 list_node_t* next = list_next(node);
441 if (next == list_end(alarms) ||
442 ((alarm_t*)list_node(next))->deadline_ms > alarm->deadline_ms) {
443 list_insert_after(alarms, node, alarm);
444 break;
445 }
446 }
447 }
448
449 // If the new alarm has the earliest deadline, we need to re-evaluate our
450 // schedule.
451 if (needs_reschedule ||
452 (!list_is_empty(alarms) && list_front(alarms) == alarm)) {
453 reschedule_root_alarm();
454 }
455 }
456
457 // NOTE: must be called with |alarms_mutex| held
reschedule_root_alarm(void)458 static void reschedule_root_alarm(void) {
459 CHECK(alarms != NULL);
460
461 const bool timer_was_set = timer_set;
462 alarm_t* next;
463 int64_t next_expiration;
464
465 // If used in a zeroed state, disarms the timer.
466 struct itimerspec timer_time;
467 memset(&timer_time, 0, sizeof(timer_time));
468
469 if (list_is_empty(alarms)) goto done;
470
471 next = static_cast<alarm_t*>(list_front(alarms));
472 next_expiration = next->deadline_ms - now_ms();
473 if (next_expiration < TIMER_INTERVAL_FOR_WAKELOCK_IN_MS) {
474 if (!timer_set) {
475 if (!wakelock_acquire()) {
476 LOG_ERROR("%s unable to acquire wake lock", __func__);
477 goto done;
478 }
479 }
480
481 timer_time.it_value.tv_sec = (next->deadline_ms / 1000);
482 timer_time.it_value.tv_nsec = (next->deadline_ms % 1000) * 1000000LL;
483
484 // It is entirely unsafe to call timer_settime(2) with a zeroed timerspec
485 // for timers with *_ALARM clock IDs. Although the man page states that the
486 // timer would be canceled, the current behavior (as of Linux kernel 3.17)
487 // is that the callback is issued immediately. The only way to cancel an
488 // *_ALARM timer is to delete the timer. But unfortunately, deleting and
489 // re-creating a timer is rather expensive; every timer_create(2) spawns a
490 // new thread. So we simply set the timer to fire at the largest possible
491 // time.
492 //
493 // If we've reached this code path, we're going to grab a wake lock and
494 // wait for the next timer to fire. In that case, there's no reason to
495 // have a pending wakeup timer so we simply cancel it.
496 struct itimerspec end_of_time;
497 memset(&end_of_time, 0, sizeof(end_of_time));
498 end_of_time.it_value.tv_sec = (time_t)(1LL << (sizeof(time_t) * 8 - 2));
499 timer_settime(wakeup_timer, TIMER_ABSTIME, &end_of_time, NULL);
500 } else {
501 // WARNING: do not attempt to use relative timers with *_ALARM clock IDs
502 // in kernels before 3.17 unless you have the following patch:
503 // https://lkml.org/lkml/2014/7/7/576
504 struct itimerspec wakeup_time;
505 memset(&wakeup_time, 0, sizeof(wakeup_time));
506
507 wakeup_time.it_value.tv_sec = (next->deadline_ms / 1000);
508 wakeup_time.it_value.tv_nsec = (next->deadline_ms % 1000) * 1000000LL;
509 if (timer_settime(wakeup_timer, TIMER_ABSTIME, &wakeup_time, NULL) == -1)
510 LOG_ERROR("%s unable to set wakeup timer: %s", __func__, strerror(errno));
511 }
512
513 done:
514 timer_set =
515 timer_time.it_value.tv_sec != 0 || timer_time.it_value.tv_nsec != 0;
516 if (timer_was_set && !timer_set) {
517 wakelock_release();
518 }
519
520 if (timer_settime(timer, TIMER_ABSTIME, &timer_time, NULL) == -1)
521 LOG_ERROR("%s unable to set timer: %s", __func__, strerror(errno));
522
523 // If next expiration was in the past (e.g. short timer that got context
524 // switched) then the timer might have diarmed itself. Detect this case and
525 // work around it by manually signalling the |alarm_expired| semaphore.
526 //
527 // It is possible that the timer was actually super short (a few
528 // milliseconds) and the timer expired normally before we called
529 // |timer_gettime|. Worst case, |alarm_expired| is signaled twice for that
530 // alarm. Nothing bad should happen in that case though since the callback
531 // dispatch function checks to make sure the timer at the head of the list
532 // actually expired.
533 if (timer_set) {
534 struct itimerspec time_to_expire;
535 timer_gettime(timer, &time_to_expire);
536 if (time_to_expire.it_value.tv_sec == 0 &&
537 time_to_expire.it_value.tv_nsec == 0) {
538 LOG_DEBUG(
539
540 "%s alarm expiration too close for posix timers, switching to guns",
541 __func__);
542 semaphore_post(alarm_expired);
543 }
544 }
545 }
546
alarm_register_processing_queue(fixed_queue_t * queue,thread_t * thread)547 static void alarm_register_processing_queue(fixed_queue_t* queue,
548 thread_t* thread) {
549 CHECK(queue != NULL);
550 CHECK(thread != NULL);
551
552 fixed_queue_register_dequeue(queue, thread_get_reactor(thread),
553 alarm_queue_ready, NULL);
554 }
555
alarm_ready_generic(alarm_t * alarm,std::unique_lock<std::mutex> & lock)556 static void alarm_ready_generic(alarm_t* alarm,
557 std::unique_lock<std::mutex>& lock) {
558 if (alarm == NULL) {
559 return; // The alarm was probably canceled
560 }
561
562 //
563 // If the alarm is not periodic, we've fully serviced it now, and can reset
564 // some of its internal state. This is useful to distinguish between expired
565 // alarms and active ones.
566 //
567 if (!alarm->callback) {
568 LOG(FATAL) << __func__
569 << ": timer callback is NULL! Name=" << alarm->stats.name;
570 }
571 alarm_callback_t callback = alarm->callback;
572 void* data = alarm->data;
573 uint64_t deadline_ms = alarm->deadline_ms;
574 if (alarm->is_periodic) {
575 // The periodic alarm has been rescheduled and alarm->deadline has been
576 // updated, hence we need to use the previous deadline.
577 deadline_ms = alarm->prev_deadline_ms;
578 } else {
579 alarm->deadline_ms = 0;
580 alarm->callback = NULL;
581 alarm->data = NULL;
582 alarm->queue = NULL;
583 }
584
585 // Increment the reference count of the mutex so it doesn't get freed
586 // before the callback gets finished executing.
587 std::shared_ptr<std::recursive_mutex> local_mutex_ref = alarm->callback_mutex;
588 std::lock_guard<std::recursive_mutex> cb_lock(*local_mutex_ref);
589 lock.unlock();
590
591 // Update the statistics
592 update_scheduling_stats(&alarm->stats, now_ms(), deadline_ms);
593
594 // NOTE: Do NOT access "alarm" after the callback, as a safety precaution
595 // in case the callback itself deleted the alarm.
596 callback(data);
597 }
598
alarm_ready_mloop(alarm_t * alarm)599 static void alarm_ready_mloop(alarm_t* alarm) {
600 std::unique_lock<std::mutex> lock(alarms_mutex);
601 alarm_ready_generic(alarm, lock);
602 }
603
alarm_queue_ready(fixed_queue_t * queue,UNUSED_ATTR void * context)604 static void alarm_queue_ready(fixed_queue_t* queue, UNUSED_ATTR void* context) {
605 CHECK(queue != NULL);
606
607 std::unique_lock<std::mutex> lock(alarms_mutex);
608 alarm_t* alarm = (alarm_t*)fixed_queue_try_dequeue(queue);
609 alarm_ready_generic(alarm, lock);
610 }
611
612 // Callback function for wake alarms and our posix timer
timer_callback(UNUSED_ATTR void * ptr)613 static void timer_callback(UNUSED_ATTR void* ptr) {
614 semaphore_post(alarm_expired);
615 }
616
617 // Function running on |dispatcher_thread| that performs the following:
618 // (1) Receives a signal using |alarm_exired| that the alarm has expired
619 // (2) Dispatches the alarm callback for processing by the corresponding
620 // thread for that alarm.
callback_dispatch(UNUSED_ATTR void * context)621 static void callback_dispatch(UNUSED_ATTR void* context) {
622 while (true) {
623 semaphore_wait(alarm_expired);
624 if (!dispatcher_thread_active) break;
625
626 std::lock_guard<std::mutex> lock(alarms_mutex);
627 alarm_t* alarm;
628
629 // Take into account that the alarm may get cancelled before we get to it.
630 // We're done here if there are no alarms or the alarm at the front is in
631 // the future. Exit right away since there's nothing left to do.
632 if (list_is_empty(alarms) ||
633 (alarm = static_cast<alarm_t*>(list_front(alarms)))->deadline_ms >
634 now_ms()) {
635 reschedule_root_alarm();
636 continue;
637 }
638
639 list_remove(alarms, alarm);
640
641 if (alarm->is_periodic) {
642 alarm->prev_deadline_ms = alarm->deadline_ms;
643 schedule_next_instance(alarm);
644 alarm->stats.rescheduled_count++;
645 }
646 reschedule_root_alarm();
647
648 // Enqueue the alarm for processing
649 if (alarm->for_msg_loop) {
650 if (!get_main_message_loop()) {
651 LOG_ERROR("%s: message loop already NULL. Alarm: %s", __func__,
652 alarm->stats.name);
653 continue;
654 }
655
656 alarm->closure.i.Reset(Bind(alarm_ready_mloop, alarm));
657 get_main_message_loop()->task_runner()->PostTask(
658 FROM_HERE, alarm->closure.i.callback());
659 } else {
660 fixed_queue_enqueue(alarm->queue, alarm);
661 }
662 }
663
664 LOG_DEBUG("%s Callback thread exited", __func__);
665 }
666
timer_create_internal(const clockid_t clock_id,timer_t * timer)667 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer) {
668 CHECK(timer != NULL);
669
670 struct sigevent sigevent;
671 // create timer with RT priority thread
672 pthread_attr_t thread_attr;
673 pthread_attr_init(&thread_attr);
674 pthread_attr_setschedpolicy(&thread_attr, SCHED_FIFO);
675 struct sched_param param;
676 param.sched_priority = THREAD_RT_PRIORITY;
677 pthread_attr_setschedparam(&thread_attr, ¶m);
678
679 memset(&sigevent, 0, sizeof(sigevent));
680 sigevent.sigev_notify = SIGEV_THREAD;
681 sigevent.sigev_notify_function = (void (*)(union sigval))timer_callback;
682 sigevent.sigev_notify_attributes = &thread_attr;
683 if (timer_create(clock_id, &sigevent, timer) == -1) {
684 LOG_ERROR("%s unable to create timer with clock %d: %s", __func__, clock_id,
685 strerror(errno));
686 if (clock_id == CLOCK_BOOTTIME_ALARM) {
687 LOG_ERROR(
688 "The kernel might not have support for "
689 "timer_create(CLOCK_BOOTTIME_ALARM): "
690 "https://lwn.net/Articles/429925/");
691 LOG_ERROR(
692 "See following patches: "
693 "https://git.kernel.org/cgit/linux/kernel/git/torvalds/"
694 "linux.git/log/?qt=grep&q=CLOCK_BOOTTIME_ALARM");
695 }
696 return false;
697 }
698
699 return true;
700 }
701
update_scheduling_stats(alarm_stats_t * stats,uint64_t now_ms,uint64_t deadline_ms)702 static void update_scheduling_stats(alarm_stats_t* stats, uint64_t now_ms,
703 uint64_t deadline_ms) {
704 stats->total_updates++;
705 stats->last_update_ms = now_ms;
706
707 if (deadline_ms < now_ms) {
708 // Overdue scheduling
709 uint64_t delta_ms = now_ms - deadline_ms;
710 update_stat(&stats->overdue_scheduling, delta_ms);
711 } else if (deadline_ms > now_ms) {
712 // Premature scheduling
713 uint64_t delta_ms = deadline_ms - now_ms;
714 update_stat(&stats->premature_scheduling, delta_ms);
715 }
716 }
717
dump_stat(int fd,stat_t * stat,const char * description)718 static void dump_stat(int fd, stat_t* stat, const char* description) {
719 uint64_t average_time_ms = 0;
720 if (stat->count != 0) average_time_ms = stat->total_ms / stat->count;
721
722 dprintf(fd, "%-51s: %llu / %llu / %llu\n", description,
723 (unsigned long long)stat->total_ms, (unsigned long long)stat->max_ms,
724 (unsigned long long)average_time_ms);
725 }
726
alarm_debug_dump(int fd)727 void alarm_debug_dump(int fd) {
728 dprintf(fd, "\nBluetooth Alarms Statistics:\n");
729
730 std::lock_guard<std::mutex> lock(alarms_mutex);
731
732 if (alarms == NULL) {
733 dprintf(fd, " None\n");
734 return;
735 }
736
737 uint64_t just_now_ms = now_ms();
738
739 dprintf(fd, " Total Alarms: %zu\n\n", list_length(alarms));
740
741 // Dump info for each alarm
742 for (list_node_t* node = list_begin(alarms); node != list_end(alarms);
743 node = list_next(node)) {
744 alarm_t* alarm = (alarm_t*)list_node(node);
745 alarm_stats_t* stats = &alarm->stats;
746
747 dprintf(fd, " Alarm : %s (%s)\n", stats->name,
748 (alarm->is_periodic) ? "PERIODIC" : "SINGLE");
749
750 dprintf(fd, "%-51s: %zu / %zu / %zu / %zu\n",
751 " Action counts (sched/resched/exec/cancel)",
752 stats->scheduled_count, stats->rescheduled_count,
753 stats->total_updates, stats->canceled_count);
754
755 dprintf(fd, "%-51s: %zu / %zu\n",
756 " Deviation counts (overdue/premature)",
757 stats->overdue_scheduling.count, stats->premature_scheduling.count);
758
759 dprintf(fd, "%-51s: %llu / %llu / %lld\n",
760 " Time in ms (since creation/interval/remaining)",
761 (unsigned long long)(just_now_ms - alarm->creation_time_ms),
762 (unsigned long long)alarm->period_ms,
763 (long long)(alarm->deadline_ms - just_now_ms));
764
765 dump_stat(fd, &stats->overdue_scheduling,
766 " Overdue scheduling time in ms (total/max/avg)");
767
768 dump_stat(fd, &stats->premature_scheduling,
769 " Premature scheduling time in ms (total/max/avg)");
770
771 dprintf(fd, "\n");
772 }
773 }
774