1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 #include <optional>
10 #include <utility>
11 
12 #include "base/auto_reset.h"
13 #include "base/feature_list.h"
14 #include "base/logging.h"
15 #include "base/memory/ptr_util.h"
16 #include "base/memory/stack_allocated.h"
17 #include "base/message_loop/message_pump.h"
18 #include "base/metrics/histogram.h"
19 #include "base/metrics/histogram_macros.h"
20 #include "base/task/sequence_manager/tasks.h"
21 #include "base/task/task_features.h"
22 #include "base/threading/hang_watcher.h"
23 #include "base/time/tick_clock.h"
24 #include "base/time/time.h"
25 #include "base/trace_event/base_tracing.h"
26 #include "build/build_config.h"
27 
28 #if BUILDFLAG(IS_IOS)
29 #include "base/message_loop/message_pump_apple.h"
30 #elif BUILDFLAG(IS_ANDROID)
31 #include "base/message_loop/message_pump_android.h"
32 #endif
33 
34 namespace base {
35 namespace sequence_manager {
36 namespace internal {
37 namespace {
38 
39 // Returns |next_run_time| capped at 1 day from |lazy_now|. This is used to
40 // mitigate https://crbug.com/850450 where some platforms are unhappy with
41 // delays > 100,000,000 seconds. In practice, a diagnosis metric showed that no
42 // sleep > 1 hour ever completes (always interrupted by an earlier MessageLoop
43 // event) and 99% of completed sleeps are the ones scheduled for <= 1 second.
44 // Details @ https://crrev.com/c/1142589.
CapAtOneDay(TimeTicks next_run_time,LazyNow * lazy_now)45 TimeTicks CapAtOneDay(TimeTicks next_run_time, LazyNow* lazy_now) {
46   return std::min(next_run_time, lazy_now->Now() + Days(1));
47 }
48 
49 BASE_FEATURE(kAvoidScheduleWorkDuringNativeEventProcessing,
50              "AvoidScheduleWorkDuringNativeEventProcessing",
51              base::FEATURE_DISABLED_BY_DEFAULT);
52 
53 #if BUILDFLAG(IS_WIN)
54 // If enabled, deactivate the high resolution timer immediately in DoWork(),
55 // instead of waiting for next DoIdleWork.
56 BASE_FEATURE(kUseLessHighResTimers,
57              "UseLessHighResTimers",
58              base::FEATURE_ENABLED_BY_DEFAULT);
59 std::atomic_bool g_use_less_high_res_timers = true;
60 #endif
61 
62 std::atomic_bool g_run_tasks_by_batches = false;
63 std::atomic_bool g_avoid_schedule_calls_during_native_event_processing = false;
64 
GetLeewayForWakeUp(std::optional<WakeUp> wake_up)65 base::TimeDelta GetLeewayForWakeUp(std::optional<WakeUp> wake_up) {
66   if (!wake_up || wake_up->delay_policy == subtle::DelayPolicy::kPrecise) {
67     return TimeDelta();
68   }
69   return wake_up->leeway;
70 }
71 
72 }  // namespace
73 
74 // static
InitializeFeatures()75 void ThreadControllerWithMessagePumpImpl::InitializeFeatures() {
76   g_run_tasks_by_batches.store(FeatureList::IsEnabled(base::kRunTasksByBatches),
77                                std::memory_order_relaxed);
78   g_avoid_schedule_calls_during_native_event_processing.store(
79       FeatureList::IsEnabled(kAvoidScheduleWorkDuringNativeEventProcessing),
80       std::memory_order_relaxed);
81 #if BUILDFLAG(IS_WIN)
82   g_use_less_high_res_timers.store(
83       FeatureList::IsEnabled(kUseLessHighResTimers), std::memory_order_relaxed);
84 #endif
85 }
86 
87 // static
ResetFeatures()88 void ThreadControllerWithMessagePumpImpl::ResetFeatures() {
89   g_run_tasks_by_batches.store(
90       base::kRunTasksByBatches.default_state == FEATURE_ENABLED_BY_DEFAULT,
91       std::memory_order_relaxed);
92 }
93 
ThreadControllerWithMessagePumpImpl(const SequenceManager::Settings & settings)94 ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
95     const SequenceManager::Settings& settings)
96     : ThreadController(settings.clock),
97       work_deduplicator_(associated_thread_),
98       can_run_tasks_by_batches_(settings.can_run_tasks_by_batches) {}
99 
ThreadControllerWithMessagePumpImpl(std::unique_ptr<MessagePump> message_pump,const SequenceManager::Settings & settings)100 ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
101     std::unique_ptr<MessagePump> message_pump,
102     const SequenceManager::Settings& settings)
103     : ThreadControllerWithMessagePumpImpl(settings) {
104   BindToCurrentThread(std::move(message_pump));
105 }
106 
~ThreadControllerWithMessagePumpImpl()107 ThreadControllerWithMessagePumpImpl::~ThreadControllerWithMessagePumpImpl() {
108   // Destructors of MessagePump::Delegate and
109   // SingleThreadTaskRunner::CurrentDefaultHandle will do all the clean-up.
110   // ScopedSetSequenceLocalStorageMapForCurrentThread destructor will
111   // de-register the current thread as a sequence.
112 
113 #if BUILDFLAG(IS_WIN)
114   if (main_thread_only().in_high_res_mode) {
115     main_thread_only().in_high_res_mode = false;
116     Time::ActivateHighResolutionTimer(false);
117   }
118 #endif
119 }
120 
121 // static
122 std::unique_ptr<ThreadControllerWithMessagePumpImpl>
CreateUnbound(const SequenceManager::Settings & settings)123 ThreadControllerWithMessagePumpImpl::CreateUnbound(
124     const SequenceManager::Settings& settings) {
125   return base::WrapUnique(new ThreadControllerWithMessagePumpImpl(settings));
126 }
127 
128 ThreadControllerWithMessagePumpImpl::MainThreadOnly::MainThreadOnly() = default;
129 
130 ThreadControllerWithMessagePumpImpl::MainThreadOnly::~MainThreadOnly() =
131     default;
132 
SetSequencedTaskSource(SequencedTaskSource * task_source)133 void ThreadControllerWithMessagePumpImpl::SetSequencedTaskSource(
134     SequencedTaskSource* task_source) {
135   DCHECK(task_source);
136   DCHECK(!main_thread_only().task_source);
137   main_thread_only().task_source = task_source;
138 }
139 
BindToCurrentThread(std::unique_ptr<MessagePump> message_pump)140 void ThreadControllerWithMessagePumpImpl::BindToCurrentThread(
141     std::unique_ptr<MessagePump> message_pump) {
142   associated_thread_->BindToCurrentThread();
143   pump_ = std::move(message_pump);
144   work_id_provider_ = WorkIdProvider::GetForCurrentThread();
145   RunLoop::RegisterDelegateForCurrentThread(this);
146   scoped_set_sequence_local_storage_map_for_current_thread_ = std::make_unique<
147       base::internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
148       &sequence_local_storage_map_);
149   {
150     base::internal::CheckedAutoLock task_runner_lock(task_runner_lock_);
151     if (task_runner_)
152       InitializeSingleThreadTaskRunnerCurrentDefaultHandle();
153   }
154   if (work_deduplicator_.BindToCurrentThread() ==
155       ShouldScheduleWork::kScheduleImmediate) {
156     pump_->ScheduleWork();
157   }
158 }
159 
SetWorkBatchSize(int work_batch_size)160 void ThreadControllerWithMessagePumpImpl::SetWorkBatchSize(
161     int work_batch_size) {
162   DCHECK_GE(work_batch_size, 1);
163   CHECK(main_thread_only().can_change_batch_size);
164   main_thread_only().work_batch_size = work_batch_size;
165 }
166 
WillQueueTask(PendingTask * pending_task)167 void ThreadControllerWithMessagePumpImpl::WillQueueTask(
168     PendingTask* pending_task) {
169   task_annotator_.WillQueueTask("SequenceManager PostTask", pending_task);
170 }
171 
ScheduleWork()172 void ThreadControllerWithMessagePumpImpl::ScheduleWork() {
173   base::internal::CheckedLock::AssertNoLockHeldOnCurrentThread();
174   if (work_deduplicator_.OnWorkRequested() ==
175       ShouldScheduleWork::kScheduleImmediate) {
176     if (!associated_thread_->IsBoundToCurrentThread()) {
177       run_level_tracker_.RecordScheduleWork();
178     } else {
179       TRACE_EVENT_INSTANT("wakeup.flow", "ScheduleWorkToSelf");
180     }
181     pump_->ScheduleWork();
182   }
183 }
BeginNativeWorkBeforeDoWork()184 void ThreadControllerWithMessagePumpImpl::BeginNativeWorkBeforeDoWork() {
185   if (!g_avoid_schedule_calls_during_native_event_processing.load(
186           std::memory_order_relaxed)) {
187     return;
188   }
189   in_native_work_batch_ = true;
190 
191   // Reuse the deduplicator facility to indicate that there is no need for
192   // ScheduleWork() until the next time we look for work.
193   work_deduplicator_.OnWorkStarted();
194 }
195 
SetNextDelayedDoWork(LazyNow * lazy_now,std::optional<WakeUp> wake_up)196 void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork(
197     LazyNow* lazy_now,
198     std::optional<WakeUp> wake_up) {
199   DCHECK(!wake_up || !wake_up->is_immediate());
200   // It's very rare for PostDelayedTask to be called outside of a DoWork in
201   // production, so most of the time this does nothing.
202   if (work_deduplicator_.OnDelayedWorkRequested() !=
203       ShouldScheduleWork::kScheduleImmediate) {
204     return;
205   }
206   TimeTicks run_time =
207       wake_up.has_value()
208           ? pump_->AdjustDelayedRunTime(wake_up->earliest_time(), wake_up->time,
209                                         wake_up->latest_time())
210           : TimeTicks::Max();
211   DCHECK_LT(lazy_now->Now(), run_time);
212 
213   if (!run_time.is_max()) {
214     run_time = CapAtOneDay(run_time, lazy_now);
215   }
216   // |pump_| can't be null as all postTasks are cross-thread before binding,
217   // and delayed cross-thread postTasks do the thread hop through an immediate
218   // task.
219   pump_->ScheduleDelayedWork(
220       {run_time, GetLeewayForWakeUp(wake_up), lazy_now->Now()});
221 }
222 
RunsTasksInCurrentSequence()223 bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
224   return associated_thread_->IsBoundToCurrentThread();
225 }
226 
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)227 void ThreadControllerWithMessagePumpImpl::SetDefaultTaskRunner(
228     scoped_refptr<SingleThreadTaskRunner> task_runner) {
229   base::internal::CheckedAutoLock lock(task_runner_lock_);
230   task_runner_ = task_runner;
231   if (associated_thread_->IsBound()) {
232     DCHECK(associated_thread_->IsBoundToCurrentThread());
233     // Thread task runner handle will be created in BindToCurrentThread().
234     InitializeSingleThreadTaskRunnerCurrentDefaultHandle();
235   }
236 }
237 
238 void ThreadControllerWithMessagePumpImpl::
InitializeSingleThreadTaskRunnerCurrentDefaultHandle()239     InitializeSingleThreadTaskRunnerCurrentDefaultHandle() {
240   // Only one SingleThreadTaskRunner::CurrentDefaultHandle can exist at any
241   // time, so reset the old one.
242   main_thread_only().thread_task_runner_handle.reset();
243   main_thread_only().thread_task_runner_handle =
244       std::make_unique<SingleThreadTaskRunner::CurrentDefaultHandle>(
245           task_runner_);
246   // When the task runner is known, bind the power manager. Power notifications
247   // are received through that sequence.
248   power_monitor_.BindToCurrentThread();
249 }
250 
251 scoped_refptr<SingleThreadTaskRunner>
GetDefaultTaskRunner()252 ThreadControllerWithMessagePumpImpl::GetDefaultTaskRunner() {
253   base::internal::CheckedAutoLock lock(task_runner_lock_);
254   return task_runner_;
255 }
256 
RestoreDefaultTaskRunner()257 void ThreadControllerWithMessagePumpImpl::RestoreDefaultTaskRunner() {
258   // There is no default task runner (as opposed to ThreadControllerImpl).
259 }
260 
AddNestingObserver(RunLoop::NestingObserver * observer)261 void ThreadControllerWithMessagePumpImpl::AddNestingObserver(
262     RunLoop::NestingObserver* observer) {
263   DCHECK(!main_thread_only().nesting_observer);
264   DCHECK(observer);
265   main_thread_only().nesting_observer = observer;
266   RunLoop::AddNestingObserverOnCurrentThread(this);
267 }
268 
RemoveNestingObserver(RunLoop::NestingObserver * observer)269 void ThreadControllerWithMessagePumpImpl::RemoveNestingObserver(
270     RunLoop::NestingObserver* observer) {
271   DCHECK_EQ(main_thread_only().nesting_observer, observer);
272   main_thread_only().nesting_observer = nullptr;
273   RunLoop::RemoveNestingObserverOnCurrentThread(this);
274 }
275 
OnBeginWorkItem()276 void ThreadControllerWithMessagePumpImpl::OnBeginWorkItem() {
277   LazyNow lazy_now(time_source_);
278   OnBeginWorkItemImpl(lazy_now);
279 }
280 
OnBeginWorkItemImpl(LazyNow & lazy_now)281 void ThreadControllerWithMessagePumpImpl::OnBeginWorkItemImpl(
282     LazyNow& lazy_now) {
283   hang_watch_scope_.emplace();
284   work_id_provider_->IncrementWorkId();
285   run_level_tracker_.OnWorkStarted(lazy_now);
286   main_thread_only().task_source->OnBeginWork();
287 }
288 
OnEndWorkItem(int run_level_depth)289 void ThreadControllerWithMessagePumpImpl::OnEndWorkItem(int run_level_depth) {
290   LazyNow lazy_now(time_source_);
291   OnEndWorkItemImpl(lazy_now, run_level_depth);
292 }
293 
OnEndWorkItemImpl(LazyNow & lazy_now,int run_level_depth)294 void ThreadControllerWithMessagePumpImpl::OnEndWorkItemImpl(
295     LazyNow& lazy_now,
296     int run_level_depth) {
297   // Work completed, begin a new hang watch until the next task (watching the
298   // pump's overhead).
299   hang_watch_scope_.emplace();
300   work_id_provider_->IncrementWorkId();
301   run_level_tracker_.OnWorkEnded(lazy_now, run_level_depth);
302 }
303 
BeforeWait()304 void ThreadControllerWithMessagePumpImpl::BeforeWait() {
305   // DoWork is guaranteed to be called after native work batches and before
306   // wait.
307   CHECK(!in_native_work_batch_);
308 
309   // In most cases, DoIdleWork() will already have cleared the
310   // `hang_watch_scope_` but in some cases where the native side of the
311   // MessagePump impl is instrumented, it's possible to get a BeforeWait()
312   // outside of a DoWork cycle (e.g. message_pump_win.cc :
313   // MessagePumpForUI::HandleWorkMessage).
314   hang_watch_scope_.reset();
315 
316   work_id_provider_->IncrementWorkId();
317   LazyNow lazy_now(time_source_);
318   run_level_tracker_.OnIdle(lazy_now);
319 }
320 
321 MessagePump::Delegate::NextWorkInfo
DoWork()322 ThreadControllerWithMessagePumpImpl::DoWork() {
323   in_native_work_batch_ = false;
324 
325 #if BUILDFLAG(IS_WIN)
326   // We've been already in a wakeup here. Deactivate the high res timer of OS
327   // immediately instead of waiting for next DoIdleWork().
328   if (g_use_less_high_res_timers.load(std::memory_order_relaxed) &&
329       main_thread_only().in_high_res_mode) {
330     main_thread_only().in_high_res_mode = false;
331     Time::ActivateHighResolutionTimer(false);
332   }
333 #endif
334   MessagePump::Delegate::NextWorkInfo next_work_info{};
335 
336   work_deduplicator_.OnWorkStarted();
337   LazyNow continuation_lazy_now(time_source_);
338   std::optional<WakeUp> next_wake_up = DoWorkImpl(&continuation_lazy_now);
339 
340   // If we are yielding after DoWorkImpl (a work batch) set the flag boolean.
341   // This will inform the MessagePump to schedule a new continuation based on
342   // the information below, but even if its immediate let the native sequence
343   // have a chance to run.
344   // When we have |g_run_tasks_by_batches| active we want to always set the flag
345   // to true to have a similar behavior on Android as on the desktop platforms
346   // for this experiment.
347   if (RunsTasksByBatches() ||
348       (!main_thread_only().yield_to_native_after_batch.is_null() &&
349        continuation_lazy_now.Now() <
350            main_thread_only().yield_to_native_after_batch)) {
351     next_work_info.yield_to_native = true;
352   }
353   // Schedule a continuation.
354   WorkDeduplicator::NextTask next_task =
355       (next_wake_up && next_wake_up->is_immediate())
356           ? WorkDeduplicator::NextTask::kIsImmediate
357           : WorkDeduplicator::NextTask::kIsDelayed;
358   if (work_deduplicator_.DidCheckForMoreWork(next_task) ==
359       ShouldScheduleWork::kScheduleImmediate) {
360     // Need to run new work immediately, but due to the contract of DoWork
361     // we only need to return a null TimeTicks to ensure that happens.
362     return next_work_info;
363   }
364 
365   // Special-casing here avoids unnecessarily sampling Now() when out of work.
366   if (!next_wake_up) {
367     next_work_info.delayed_run_time = TimeTicks::Max();
368     return next_work_info;
369   }
370 
371   // The MessagePump will schedule the wake up on our behalf, so we need to
372   // update |next_work_info.delayed_run_time|.
373   TimeTicks next_delayed_do_work = pump_->AdjustDelayedRunTime(
374       next_wake_up->earliest_time(), next_wake_up->time,
375       next_wake_up->latest_time());
376 
377   // Don't request a run time past |main_thread_only().quit_runloop_after|.
378   if (next_delayed_do_work > main_thread_only().quit_runloop_after) {
379     next_delayed_do_work = main_thread_only().quit_runloop_after;
380     // If we've passed |quit_runloop_after| there's no more work to do.
381     if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after) {
382       next_work_info.delayed_run_time = TimeTicks::Max();
383       return next_work_info;
384     }
385   }
386 
387   next_work_info.delayed_run_time =
388       CapAtOneDay(next_delayed_do_work, &continuation_lazy_now);
389   next_work_info.leeway = GetLeewayForWakeUp(next_wake_up);
390   next_work_info.recent_now = continuation_lazy_now.Now();
391   return next_work_info;
392 }
393 
DoWorkImpl(LazyNow * continuation_lazy_now)394 std::optional<WakeUp> ThreadControllerWithMessagePumpImpl::DoWorkImpl(
395     LazyNow* continuation_lazy_now) {
396   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
397                "ThreadControllerImpl::DoWork");
398 
399   if (!main_thread_only().task_execution_allowed) {
400     // Broadcast in a trace event that application tasks were disallowed. This
401     // helps spot nested loops that intentionally starve application tasks.
402     TRACE_EVENT0("base", "ThreadController: application tasks disallowed");
403     if (main_thread_only().quit_runloop_after == TimeTicks::Max())
404       return std::nullopt;
405     return WakeUp{main_thread_only().quit_runloop_after};
406   }
407 
408   DCHECK(main_thread_only().task_source);
409 
410   // Keep running tasks for up to 8ms before yielding to the pump when tasks are
411   // run by batches.
412   const base::TimeDelta batch_duration =
413       RunsTasksByBatches() ? base::Milliseconds(8) : base::Milliseconds(0);
414 
415   const std::optional<base::TimeTicks> start_time =
416       batch_duration.is_zero()
417           ? std::nullopt
418           : std::optional<base::TimeTicks>(time_source_->NowTicks());
419   std::optional<base::TimeTicks> recent_time = start_time;
420 
421   // Loops for |batch_duration|, or |work_batch_size| times if |batch_duration|
422   // is zero.
423   for (int num_tasks_executed = 0;
424        (!batch_duration.is_zero() &&
425         (recent_time.value() - start_time.value()) < batch_duration) ||
426        (batch_duration.is_zero() &&
427         num_tasks_executed < main_thread_only().work_batch_size);
428        ++num_tasks_executed) {
429     LazyNow lazy_now_select_task(recent_time, time_source_);
430     // Include SelectNextTask() in the scope of the work item. This ensures
431     // it's covered in tracing and hang reports. This is particularly
432     // important when SelectNextTask() finds no work immediately after a
433     // wakeup, otherwise the power-inefficient wakeup is invisible in
434     // tracing. OnApplicationTaskSelected() assumes this ordering as well.
435     OnBeginWorkItemImpl(lazy_now_select_task);
436     int run_depth = static_cast<int>(run_level_tracker_.num_run_levels());
437 
438     const SequencedTaskSource::SelectTaskOption select_task_option =
439         power_monitor_.IsProcessInPowerSuspendState()
440             ? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
441             : SequencedTaskSource::SelectTaskOption::kDefault;
442     std::optional<SequencedTaskSource::SelectedTask> selected_task =
443         main_thread_only().task_source->SelectNextTask(lazy_now_select_task,
444                                                        select_task_option);
445     LazyNow lazy_now_task_selected(time_source_);
446     run_level_tracker_.OnApplicationTaskSelected(
447         (selected_task && selected_task->task.delayed_run_time.is_null())
448             ? selected_task->task.queue_time
449             : TimeTicks(),
450         lazy_now_task_selected);
451     if (!selected_task) {
452       OnEndWorkItemImpl(lazy_now_task_selected, run_depth);
453       break;
454     }
455 
456     // Execute the task and assume the worst: it is probably not reentrant.
457     AutoReset<bool> ban_nested_application_tasks(
458         &main_thread_only().task_execution_allowed, false);
459 
460     // Trace-parsing tools (DevTools, Lighthouse, etc) consume this event to
461     // determine long tasks.
462     // See https://crbug.com/681863 and https://crbug.com/874982
463     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
464 
465     {
466       // Always track the start of the task, as this is low-overhead.
467       TaskAnnotator::LongTaskTracker long_task_tracker(
468           time_source_, selected_task->task, &task_annotator_);
469 
470       // Note: all arguments after task are just passed to a TRACE_EVENT for
471       // logging so lambda captures are safe as lambda is executed inline.
472       SequencedTaskSource* source = main_thread_only().task_source;
473       task_annotator_.RunTask(
474           "ThreadControllerImpl::RunTask", selected_task->task,
475           [&selected_task, &source](perfetto::EventContext& ctx) {
476             if (selected_task->task_execution_trace_logger) {
477               selected_task->task_execution_trace_logger.Run(
478                   ctx, selected_task->task);
479             }
480             source->MaybeEmitTaskDetails(ctx, selected_task.value());
481           });
482     }
483 
484     // Reset `selected_task` before the call to `DidRunTask()` below makes its
485     // `PendingTask` reference dangling.
486     selected_task.reset();
487 
488     LazyNow lazy_now_after_run_task(time_source_);
489     main_thread_only().task_source->DidRunTask(lazy_now_after_run_task);
490     // End the work item scope after DidRunTask() as it can process microtasks
491     // (which are extensions of the RunTask).
492     OnEndWorkItemImpl(lazy_now_after_run_task, run_depth);
493 
494     // If DidRunTask() read the clock (lazy_now_after_run_task.has_value()) or
495     // if |batch_duration| > 0, store the clock value in `recent_time` so it can
496     // be reused by SelectNextTask() at the next loop iteration.
497     if (lazy_now_after_run_task.has_value() || !batch_duration.is_zero()) {
498       recent_time = lazy_now_after_run_task.Now();
499     } else {
500       recent_time.reset();
501     }
502 
503     // When Quit() is called we must stop running the batch because the
504     // caller expects per-task granularity.
505     if (main_thread_only().quit_pending)
506       break;
507   }
508 
509   if (main_thread_only().quit_pending)
510     return std::nullopt;
511 
512   work_deduplicator_.WillCheckForMoreWork();
513 
514   // Re-check the state of the power after running tasks. An executed task may
515   // have been a power change notification.
516   const SequencedTaskSource::SelectTaskOption select_task_option =
517       power_monitor_.IsProcessInPowerSuspendState()
518           ? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
519           : SequencedTaskSource::SelectTaskOption::kDefault;
520   return main_thread_only().task_source->GetPendingWakeUp(continuation_lazy_now,
521                                                           select_task_option);
522 }
523 
RunsTasksByBatches() const524 bool ThreadControllerWithMessagePumpImpl::RunsTasksByBatches() const {
525   return can_run_tasks_by_batches_ &&
526          g_run_tasks_by_batches.load(std::memory_order_relaxed);
527 }
528 
DoIdleWork()529 bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
530   struct OnIdle {
531     STACK_ALLOCATED();
532 
533    public:
534     OnIdle(const TickClock* time_source, RunLevelTracker& run_level_tracker_ref)
535         : lazy_now(time_source), run_level_tracker(run_level_tracker_ref) {}
536 
537     // Very last step before going idle, must be fast as this is hidden from the
538     // DoIdleWork trace event below.
539     ~OnIdle() { run_level_tracker.OnIdle(lazy_now); }
540 
541     LazyNow lazy_now;
542 
543    private:
544     RunLevelTracker& run_level_tracker;
545   };
546   std::optional<OnIdle> on_idle;
547 
548   // Must be after `on_idle` as this trace event's scope must end before the END
549   // of the "ThreadController active" trace event emitted from
550   // `run_level_tracker_.OnIdle()`.
551   TRACE_EVENT0("sequence_manager", "SequenceManager::DoIdleWork");
552 
553 #if BUILDFLAG(IS_WIN)
554   if (!power_monitor_.IsProcessInPowerSuspendState()) {
555     // Avoid calling Time::ActivateHighResolutionTimer() between
556     // suspend/resume as the system hangs if we do (crbug.com/1074028).
557     // OnResume() will generate a task on this thread per the
558     // ThreadControllerPowerMonitor observer and DoIdleWork() will thus get
559     // another chance to set the right high-resolution-timer-state before
560     // going to sleep after resume.
561 
562     const bool need_high_res_mode =
563         main_thread_only().task_source->HasPendingHighResolutionTasks();
564     if (main_thread_only().in_high_res_mode != need_high_res_mode) {
565       // On Windows we activate the high resolution timer so that the wait
566       // _if_ triggered by the timer happens with good resolution. If we don't
567       // do this the default resolution is 15ms which might not be acceptable
568       // for some tasks.
569       main_thread_only().in_high_res_mode = need_high_res_mode;
570       Time::ActivateHighResolutionTimer(need_high_res_mode);
571     }
572   }
573 #endif  // BUILDFLAG(IS_WIN)
574 
575   if (main_thread_only().task_source->OnIdle()) {
576     // The OnIdle() callback resulted in more immediate work, so schedule a
577     // DoWork callback. For some message pumps returning true from here is
578     // sufficient to do that but not on mac.
579     pump_->ScheduleWork();
580     return false;
581   }
582 
583   // This is mostly redundant with the identical call in BeforeWait (upcoming)
584   // but some uninstrumented MessagePump impls don't call BeforeWait so it must
585   // also be done here.
586   hang_watch_scope_.reset();
587 
588   // All return paths below are truly idle.
589   on_idle.emplace(time_source_, run_level_tracker_);
590 
591   // Check if any runloop timeout has expired.
592   if (main_thread_only().quit_runloop_after != TimeTicks::Max() &&
593       main_thread_only().quit_runloop_after <= on_idle->lazy_now.Now()) {
594     Quit();
595     return false;
596   }
597 
598   // RunLoop::Delegate knows whether we called Run() or RunUntilIdle().
599   if (ShouldQuitWhenIdle())
600     Quit();
601 
602   return false;
603 }
604 
RunDepth()605 int ThreadControllerWithMessagePumpImpl::RunDepth() {
606   return static_cast<int>(run_level_tracker_.num_run_levels());
607 }
608 
Run(bool application_tasks_allowed,TimeDelta timeout)609 void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed,
610                                               TimeDelta timeout) {
611   DCHECK(RunsTasksInCurrentSequence());
612 
613   // Inside a `RunLoop`, all work that has mutual exclusion or ordering
614   // expectations with the task source is tracked, so it's safe to allow running
615   // tasks synchronously in `RunOrPostTask()`.
616   main_thread_only().task_source->SetRunTaskSynchronouslyAllowed(true);
617 
618   LazyNow lazy_now_run_loop_start(time_source_);
619 
620   // RunLoops can be nested so we need to restore the previous value of
621   // |quit_runloop_after| upon exit. NB we could use saturated arithmetic here
622   // but don't because we have some tests which assert the number of calls to
623   // Now.
624   AutoReset<TimeTicks> quit_runloop_after(
625       &main_thread_only().quit_runloop_after,
626       (timeout == TimeDelta::Max()) ? TimeTicks::Max()
627                                     : lazy_now_run_loop_start.Now() + timeout);
628 
629   run_level_tracker_.OnRunLoopStarted(RunLevelTracker::kInBetweenWorkItems,
630                                       lazy_now_run_loop_start);
631 
632   // Quit may have been called outside of a Run(), so |quit_pending| might be
633   // true here. We can't use InTopLevelDoWork() in Quit() as this call may be
634   // outside top-level DoWork but still in Run().
635   main_thread_only().quit_pending = false;
636   hang_watch_scope_.emplace();
637   if (application_tasks_allowed && !main_thread_only().task_execution_allowed) {
638     // Allow nested task execution as explicitly requested.
639     DCHECK(RunLoop::IsNestedOnCurrentThread());
640     main_thread_only().task_execution_allowed = true;
641     pump_->Run(this);
642     main_thread_only().task_execution_allowed = false;
643   } else {
644     pump_->Run(this);
645   }
646 
647   run_level_tracker_.OnRunLoopEnded();
648   main_thread_only().quit_pending = false;
649 
650   // If this was a nested loop, hang watch the remainder of the task which
651   // caused it. Otherwise, stop watching as we're no longer running.
652   if (RunLoop::IsNestedOnCurrentThread()) {
653     hang_watch_scope_.emplace();
654   } else {
655     hang_watch_scope_.reset();
656   }
657   work_id_provider_->IncrementWorkId();
658 
659   // Work outside of a `RunLoop` may have mutual exclusion or ordering
660   // guarantees with the task source, so disallow running tasks synchronously in
661   // `RunOrPostTask()`.
662   if (run_level_tracker_.num_run_levels() == 0) {
663     main_thread_only().task_source->SetRunTaskSynchronouslyAllowed(false);
664   }
665 }
666 
OnBeginNestedRunLoop()667 void ThreadControllerWithMessagePumpImpl::OnBeginNestedRunLoop() {
668   // We don't need to ScheduleWork here! That's because the call to pump_->Run()
669   // above, which is always called for RunLoop().Run(), guarantees a call to
670   // DoWork on all platforms.
671   if (main_thread_only().nesting_observer)
672     main_thread_only().nesting_observer->OnBeginNestedRunLoop();
673 }
674 
OnExitNestedRunLoop()675 void ThreadControllerWithMessagePumpImpl::OnExitNestedRunLoop() {
676   if (main_thread_only().nesting_observer)
677     main_thread_only().nesting_observer->OnExitNestedRunLoop();
678 }
679 
Quit()680 void ThreadControllerWithMessagePumpImpl::Quit() {
681   DCHECK(RunsTasksInCurrentSequence());
682   // Interrupt a batch of work.
683   main_thread_only().quit_pending = true;
684 
685   // If we're in a nested RunLoop, continuation will be posted if necessary.
686   pump_->Quit();
687 }
688 
EnsureWorkScheduled()689 void ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled() {
690   if (work_deduplicator_.OnWorkRequested() ==
691       ShouldScheduleWork::kScheduleImmediate) {
692     pump_->ScheduleWork();
693   }
694 }
695 
696 void ThreadControllerWithMessagePumpImpl::
SetTaskExecutionAllowedInNativeNestedLoop(bool allowed)697     SetTaskExecutionAllowedInNativeNestedLoop(bool allowed) {
698   if (allowed) {
699     // We need to schedule work unconditionally because we might be about to
700     // enter an OS level nested message loop. Unlike a RunLoop().Run() we don't
701     // get a call to DoWork on entering for free.
702     work_deduplicator_.OnWorkRequested();  // Set the pending DoWork flag.
703   } else {
704     // We've (probably) just left an OS level nested message loop. Make sure a
705     // subsequent PostTask within the same Task doesn't ScheduleWork with the
706     // pump (this will be done anyway when the task exits).
707     work_deduplicator_.OnWorkStarted();
708   }
709   if (!pump_->HandleNestedNativeLoopWithApplicationTasks(allowed)) {
710     // Pump does not have its own support for native nested loops,
711     // ThreadController must handle scheduling for upcoming tasks.
712     if (allowed) {
713       pump_->ScheduleWork();
714     }
715   }
716   main_thread_only().task_execution_allowed = allowed;
717 }
718 
IsTaskExecutionAllowed() const719 bool ThreadControllerWithMessagePumpImpl::IsTaskExecutionAllowed() const {
720   return main_thread_only().task_execution_allowed;
721 }
722 
GetBoundMessagePump() const723 MessagePump* ThreadControllerWithMessagePumpImpl::GetBoundMessagePump() const {
724   return pump_.get();
725 }
726 
PrioritizeYieldingToNative(base::TimeTicks prioritize_until)727 void ThreadControllerWithMessagePumpImpl::PrioritizeYieldingToNative(
728     base::TimeTicks prioritize_until) {
729   main_thread_only().yield_to_native_after_batch = prioritize_until;
730 }
731 
732 #if BUILDFLAG(IS_IOS)
AttachToMessagePump()733 void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
734   static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Attach(this);
735 }
736 
DetachFromMessagePump()737 void ThreadControllerWithMessagePumpImpl::DetachFromMessagePump() {
738   static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Detach();
739 }
740 #elif BUILDFLAG(IS_ANDROID)
AttachToMessagePump()741 void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
742   CHECK(main_thread_only().work_batch_size == 1);
743   // Aborting the message pump currently relies on the batch size being 1.
744   main_thread_only().can_change_batch_size = false;
745   static_cast<MessagePumpForUI*>(pump_.get())->Attach(this);
746 }
747 #endif
748 
ShouldQuitRunLoopWhenIdle()749 bool ThreadControllerWithMessagePumpImpl::ShouldQuitRunLoopWhenIdle() {
750   if (run_level_tracker_.num_run_levels() == 0)
751     return false;
752   // It's only safe to call ShouldQuitWhenIdle() when in a RunLoop.
753   return ShouldQuitWhenIdle();
754 }
755 
756 }  // namespace internal
757 }  // namespace sequence_manager
758 }  // namespace base
759