xref: /aosp_15_r20/external/cronet/base/task/thread_pool/thread_group.h (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
6 #define BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
7 
8 #include <memory>
9 #include <optional>
10 #include <string>
11 #include <string_view>
12 #include <vector>
13 
14 #include "base/base_export.h"
15 #include "base/dcheck_is_on.h"
16 #include "base/memory/raw_ptr.h"
17 #include "base/memory/raw_ptr_exclusion.h"
18 #include "base/memory/stack_allocated.h"
19 #include "base/task/common/checked_lock.h"
20 #include "base/task/thread_pool/priority_queue.h"
21 #include "base/task/thread_pool/task.h"
22 #include "base/task/thread_pool/task_source.h"
23 #include "base/task/thread_pool/tracked_ref.h"
24 #include "base/task/thread_pool/worker_thread.h"
25 #include "build/build_config.h"
26 #include "third_party/abseil-cpp/absl/container/inlined_vector.h"
27 
28 #if BUILDFLAG(IS_WIN)
29 #include "base/win/scoped_windows_thread_environment.h"
30 #endif
31 
32 namespace base {
33 
34 class WorkerThreadObserver;
35 
36 namespace internal {
37 
38 class TaskTracker;
39 
40 // Interface and base implementation for a thread group. A thread group is a
41 // subset of the threads in the thread pool (see GetThreadGroupForTraits() for
42 // thread group selection logic when posting tasks and creating task runners).
43 //
44 // This class is thread-safe.
45 class BASE_EXPORT ThreadGroup {
46  public:
47   // Delegate interface for ThreadGroup.
48   class BASE_EXPORT Delegate {
49    public:
50     virtual ~Delegate() = default;
51 
52     // Invoked when a TaskSource with |traits| is non-empty after the
53     // ThreadGroup has run a task from it. The implementation must return the
54     // thread group in which the TaskSource should be reenqueued.
55     virtual ThreadGroup* GetThreadGroupForTraits(const TaskTraits& traits) = 0;
56   };
57 
58   enum class WorkerEnvironment {
59     // No special worker environment required.
60     NONE,
61 #if BUILDFLAG(IS_WIN)
62     // Initialize a COM MTA on the worker.
63     COM_MTA,
64 #endif  // BUILDFLAG(IS_WIN)
65   };
66 
67   ThreadGroup(const ThreadGroup&) = delete;
68   ThreadGroup& operator=(const ThreadGroup&) = delete;
69   virtual ~ThreadGroup();
70 
71   // Creates threads, allowing existing and future tasks to run. The thread
72   // group runs at most `max_tasks` / `max_best_effort_tasks` unblocked task
73   // with any / BEST_EFFORT priority concurrently. It reclaims unused threads
74   // after `suggested_reclaim_time`. It uses `service_thread_task_runner` to
75   // monitor for blocked tasks, `service_thread_task_runner` is used to setup
76   // FileDescriptorWatcher on worker threads. It must refer to a Thread with
77   // MessagePumpType::IO. If specified, it notifies `worker_thread_observer`
78   // when a worker enters and exits its main function (the observer must not be
79   // destroyed before JoinForTesting() has returned). `worker_environment`
80   // specifies the environment in which tasks are executed.
81   // `may_block_threshold` is the timeout after which a task in a MAY_BLOCK
82   // ScopedBlockingCall is considered blocked (the thread group will choose an
83   // appropriate value if none is specified).
84   // `synchronous_thread_start_for_testing` is true if this ThreadGroup should
85   // synchronously wait for OnMainEntry() after starting each worker. Can only
86   // be called once. CHECKs on failure.
87   virtual void Start(
88       size_t max_tasks,
89       size_t max_best_effort_tasks,
90       TimeDelta suggested_reclaim_time,
91       scoped_refptr<SingleThreadTaskRunner> service_thread_task_runner,
92       WorkerThreadObserver* worker_thread_observer,
93       WorkerEnvironment worker_environment,
94       bool synchronous_thread_start_for_testing,
95       std::optional<TimeDelta> may_block_threshold) = 0;
96 
97   // Registers the thread group in TLS.
98   void BindToCurrentThread();
99 
100   // Resets the thread group in TLS.
101   void UnbindFromCurrentThread();
102 
103   // Returns true if the thread group is registered in TLS.
104   bool IsBoundToCurrentThread() const;
105 
106   // Removes |task_source| from |priority_queue_|. Returns a
107   // RegisteredTaskSource that evaluats to true if successful, or false if
108   // |task_source| is not currently in |priority_queue_|, such as when a worker
109   // is running a task from it.
110   RegisteredTaskSource RemoveTaskSource(const TaskSource& task_source);
111 
112   // Updates the position of the TaskSource in |transaction| in this
113   // ThreadGroup's PriorityQueue based on the TaskSource's current traits.
114   //
115   // Implementations should instantiate a concrete ScopedCommandsExecutor and
116   // invoke UpdateSortKeyImpl().
117   virtual void UpdateSortKey(TaskSource::Transaction transaction) = 0;
118 
119   // Pushes the TaskSource in |transaction_with_task_source| into this
120   // ThreadGroup's PriorityQueue and wakes up workers as appropriate.
121   //
122   // Implementations should instantiate a concrete ScopedCommandsExecutor and
123   // invoke PushTaskSourceAndWakeUpWorkersImpl().
124   virtual void PushTaskSourceAndWakeUpWorkers(
125       RegisteredTaskSourceAndTransaction transaction_with_task_source) = 0;
126 
127   // Move all task sources from this ThreadGroup's PriorityQueue to the
128   // |destination_thread_group|'s.
129   void HandoffAllTaskSourcesToOtherThreadGroup(
130       ThreadGroup* destination_thread_group);
131   // Move all task sources except the ones with TaskPriority::USER_BLOCKING,
132   // from this ThreadGroup's PriorityQueue to the |destination_thread_group|'s.
133   void HandoffNonUserBlockingTaskSourcesToOtherThreadGroup(
134       ThreadGroup* destination_thread_group);
135 
136   // Returns true if a task with |sort_key| running in this thread group should
137   // return ASAP, either because its priority is not allowed to run or because
138   // work of higher priority is pending. Thread-safe but may return an outdated
139   // result (if a task unnecessarily yields due to this, it will simply be
140   // re-scheduled).
141   bool ShouldYield(TaskSourceSortKey sort_key);
142 
143   // Prevents new tasks from starting to run and waits for currently running
144   // tasks to complete their execution. It is guaranteed that no thread will do
145   // work on behalf of this ThreadGroup after this returns. It is
146   // invalid to post a task once this is called. TaskTracker::Flush() can be
147   // called before this to complete existing tasks, which might otherwise post a
148   // task during JoinForTesting(). This can only be called once.
149   virtual void JoinForTesting() = 0;
150 
151   // Returns the maximum number of non-blocked tasks that can run concurrently
152   // in this ThreadGroup.
153   //
154   // TODO(fdoray): Remove this method. https://crbug.com/687264
155   virtual size_t GetMaxConcurrentNonBlockedTasksDeprecated() const;
156 
157   // Wakes up workers as appropriate for the new CanRunPolicy policy. Must be
158   // called after an update to CanRunPolicy in TaskTracker.
159   virtual void DidUpdateCanRunPolicy() = 0;
160 
161   virtual void OnShutdownStarted() = 0;
162 
163   // Returns true if a thread group is registered in TLS. Used by diagnostic
164   // code to check whether it's inside a ThreadPool task.
165   static bool CurrentThreadHasGroup();
166 
167   // Returns |max_tasks_|/|max_best_effort_tasks_|.
168   size_t GetMaxTasksForTesting() const;
169   size_t GetMaxBestEffortTasksForTesting() const;
170 
171   // Waits until at least |n| workers are idle. Note that while workers are
172   // disallowed from cleaning up during this call: tests using a custom
173   // |suggested_reclaim_time_| need to be careful to invoke this swiftly after
174   // unblocking the waited upon workers as: if a worker is already detached by
175   // the time this is invoked, it will never make it onto the idle set and
176   // this call will hang.
177   void WaitForWorkersIdleForTesting(size_t n);
178 
179   // Waits until at least |n| workers are idle.
180   void WaitForWorkersIdleLockRequiredForTesting(size_t n)
181       EXCLUSIVE_LOCKS_REQUIRED(lock_);
182 
183   // Waits until all workers are idle.
184   void WaitForAllWorkersIdleForTesting();
185 
186   // Waits until |n| workers have cleaned up (went through
187   // WorkerThread::Delegate::OnMainExit()) since the last call to
188   // WaitForWorkersCleanedUpForTesting() (or Start() if that wasn't called yet).
189   void WaitForWorkersCleanedUpForTesting(size_t n);
190 
191   // Returns the number of workers in this thread group.
192   size_t NumberOfWorkersForTesting() const;
193   // Returns the number of workers that are idle (i.e. not running tasks).
194   size_t NumberOfIdleWorkersForTesting() const;
195   // Returns the number of workers that are idle (i.e. not running tasks).
196   virtual size_t NumberOfIdleWorkersLockRequiredForTesting() const
197       EXCLUSIVE_LOCKS_REQUIRED(lock_) = 0;
198 
199   class ThreadGroupWorkerDelegate;
200 
201  protected:
202   ThreadGroup(std::string_view histogram_label,
203               std::string_view thread_group_label,
204               ThreadType thread_type_hint,
205               TrackedRef<TaskTracker> task_tracker,
206               TrackedRef<Delegate> delegate);
207 
208   void StartImpl(
209       size_t max_tasks,
210       size_t max_best_effort_tasks,
211       TimeDelta suggested_reclaim_time,
212       scoped_refptr<SingleThreadTaskRunner> service_thread_task_runner,
213       WorkerThreadObserver* worker_thread_observer,
214       WorkerEnvironment worker_environment,
215       bool synchronous_thread_start_for_testing = false,
216       std::optional<TimeDelta> may_block_threshold =
217           std::optional<TimeDelta>());
218 
219   // Derived classes must implement a ScopedCommandsExecutor that derives from
220   // this to perform operations at the end of a scope, when all locks have been
221   // released.
222   class BaseScopedCommandsExecutor {
223    public:
224     BaseScopedCommandsExecutor(const BaseScopedCommandsExecutor&) = delete;
225     BaseScopedCommandsExecutor& operator=(const BaseScopedCommandsExecutor&) =
226         delete;
227     virtual ~BaseScopedCommandsExecutor();
228 
229     void ScheduleStart(scoped_refptr<WorkerThread> worker);
230     void ScheduleAdjustMaxTasks();
231     void ScheduleReleaseTaskSource(RegisteredTaskSource task_source);
232     // Unlocks held_lock. Flushes this executor.
233     void FlushWorkerCreation(CheckedLock* held_lock);
234 
235    protected:
236     explicit BaseScopedCommandsExecutor(ThreadGroup* outer);
237 
238     // RAW_PTR_EXCLUSION: Performance: visible in sampling profiler and stack
239     // scoped, also a back-pointer to the owning object.
240     RAW_PTR_EXCLUSION ThreadGroup* outer_ = nullptr;
241 
242    protected:
243     // Performs BaseScopedCommandsExecutor-related tasks, must be called in this
244     // class' destructor.
245     void Flush();
246 
247     std::vector<RegisteredTaskSource> task_sources_to_release_;
248     absl::InlinedVector<scoped_refptr<WorkerThread>, 2> workers_to_start_;
249     bool must_schedule_adjust_max_tasks_ = false;
250   };
251   virtual std::unique_ptr<BaseScopedCommandsExecutor> GetExecutor() = 0;
252 
253   // Allows a task source to be pushed to a ThreadGroup's PriorityQueue at the
254   // end of a scope, when all locks have been released.
255   class ScopedReenqueueExecutor {
256     STACK_ALLOCATED();
257 
258    public:
259     ScopedReenqueueExecutor();
260     ScopedReenqueueExecutor(const ScopedReenqueueExecutor&) = delete;
261     ScopedReenqueueExecutor& operator=(const ScopedReenqueueExecutor&) = delete;
262     ~ScopedReenqueueExecutor();
263 
264     // A RegisteredTaskSourceAndTransaction and the ThreadGroup in which it
265     // should be enqueued.
266     void SchedulePushTaskSourceAndWakeUpWorkers(
267         RegisteredTaskSourceAndTransaction transaction_with_task_source,
268         ThreadGroup* destination_thread_group);
269 
270    private:
271     // A RegisteredTaskSourceAndTransaction and the thread group in which it
272     // should be enqueued.
273     std::optional<RegisteredTaskSourceAndTransaction>
274         transaction_with_task_source_;
275     ThreadGroup* destination_thread_group_ = nullptr;
276   };
277 
278   ThreadGroup(TrackedRef<TaskTracker> task_tracker,
279               TrackedRef<Delegate> delegate);
280 
281 #if BUILDFLAG(IS_WIN)
282   static std::unique_ptr<win::ScopedWindowsThreadEnvironment>
283   GetScopedWindowsThreadEnvironment(WorkerEnvironment environment);
284 #endif
285 
286   const TrackedRef<TaskTracker> task_tracker_;
287   const TrackedRef<Delegate> delegate_;
288 
289   // Returns the number of workers required of workers to run all queued
290   // BEST_EFFORT task sources allowed to run by the current CanRunPolicy.
291   size_t GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() const
292       EXCLUSIVE_LOCKS_REQUIRED(lock_);
293 
294   // Returns the number of workers required to run all queued
295   // USER_VISIBLE/USER_BLOCKING task sources allowed to run by the current
296   // CanRunPolicy.
297   size_t GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired() const
298       EXCLUSIVE_LOCKS_REQUIRED(lock_);
299 
300   // Ensures that there are enough workers to run queued task sources.
301   // |executor| is forwarded from the one received in
302   // PushTaskSourceAndWakeUpWorkersImpl()
303   virtual void EnsureEnoughWorkersLockRequired(
304       BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_) = 0;
305 
306   // Reenqueues a |transaction_with_task_source| from which a Task just ran in
307   // the current ThreadGroup into the appropriate ThreadGroup.
308   void ReEnqueueTaskSourceLockRequired(
309       BaseScopedCommandsExecutor* workers_executor,
310       ScopedReenqueueExecutor* reenqueue_executor,
311       RegisteredTaskSourceAndTransaction transaction_with_task_source)
312       EXCLUSIVE_LOCKS_REQUIRED(lock_);
313 
314   // Returns the next task source from |priority_queue_| if permitted to run and
315   // pops |priority_queue_| if the task source returned no longer needs to be
316   // queued (reached its maximum concurrency). Otherwise returns nullptr and
317   // pops |priority_queue_| so this can be called again.
318   RegisteredTaskSource TakeRegisteredTaskSource(
319       BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
320 
321   // Must be invoked by implementations of the corresponding non-Impl() methods.
322   void UpdateSortKeyImpl(BaseScopedCommandsExecutor* executor,
323                          TaskSource::Transaction transaction);
324   void PushTaskSourceAndWakeUpWorkersImpl(
325       BaseScopedCommandsExecutor* executor,
326       RegisteredTaskSourceAndTransaction transaction_with_task_source);
327   void OnShutDownStartedImpl(BaseScopedCommandsExecutor* executor);
328 
329   virtual ThreadGroupWorkerDelegate* GetWorkerDelegate(
330       WorkerThread* worker) = 0;
331 
332   // Returns the desired number of awake workers, given current workload and
333   // concurrency limits.
334   size_t GetDesiredNumAwakeWorkersLockRequired() const
335       EXCLUSIVE_LOCKS_REQUIRED(lock_);
336 
337   // Examines the list of WorkerThreads and increments |max_tasks_| for each
338   // worker that has been within the scope of a MAY_BLOCK ScopedBlockingCall for
339   // more than BlockedThreshold(). Reschedules a call if necessary.
340   void AdjustMaxTasks();
341 
342   // Schedules AdjustMaxTasks() if required.
343   void MaybeScheduleAdjustMaxTasksLockRequired(
344       BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
345 
346   // Enqueues all task sources from `new_priority_queue` into this thread group.
347   void EnqueueAllTaskSources(PriorityQueue* new_priority_queue);
348 
349   // Returns the threshold after which the max tasks is increased to compensate
350   // for a worker that is within a MAY_BLOCK ScopedBlockingCall.
may_block_threshold_for_testing()351   TimeDelta may_block_threshold_for_testing() const {
352     return after_start().may_block_threshold;
353   }
354 
355   // Interval at which the service thread checks for workers in this thread
356   // group that have been in a MAY_BLOCK ScopedBlockingCall for more than
357   // may_block_threshold().
blocked_workers_poll_period_for_testing()358   TimeDelta blocked_workers_poll_period_for_testing() const {
359     return after_start().blocked_workers_poll_period;
360   }
361 
362   // Starts calling AdjustMaxTasks() periodically on
363   // |service_thread_task_runner_|.
364   void ScheduleAdjustMaxTasks();
365 
366   // Returns true if AdjustMaxTasks() should periodically be called on
367   // |service_thread_task_runner_|.
368   bool ShouldPeriodicallyAdjustMaxTasksLockRequired()
369       EXCLUSIVE_LOCKS_REQUIRED(lock_);
370 
371   // Updates the minimum priority allowed to run below which tasks should yield.
372   // This should be called whenever |num_running_tasks_| or |max_tasks| changes,
373   // or when a new task is added to |priority_queue_|.
374   void UpdateMinAllowedPriorityLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
375 
376   // Increments/decrements the number of tasks of |priority| that are currently
377   // running in this thread group. Must be invoked before/after running a task.
378   void DecrementTasksRunningLockRequired(TaskPriority priority)
379       EXCLUSIVE_LOCKS_REQUIRED(lock_);
380   void IncrementTasksRunningLockRequired(TaskPriority priority)
381       EXCLUSIVE_LOCKS_REQUIRED(lock_);
382 
383   // Increments/decrements the number of [best effort] tasks that can run in
384   // this thread group.
385   void DecrementMaxTasksLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
386   void IncrementMaxTasksLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
387   void DecrementMaxBestEffortTasksLockRequired()
388       EXCLUSIVE_LOCKS_REQUIRED(lock_);
389   void IncrementMaxBestEffortTasksLockRequired()
390       EXCLUSIVE_LOCKS_REQUIRED(lock_);
391 
392   // Values set at Start() and never modified afterwards.
393   struct InitializedInStart {
394     InitializedInStart();
395     ~InitializedInStart();
396 
397 #if DCHECK_IS_ON()
398     // Set after all members of this struct are set.
399     bool initialized = false;
400 #endif
401 
402     // Initial value of |max_tasks_|.
403     size_t initial_max_tasks = 0;
404 
405     // Suggested reclaim time for workers.
406     TimeDelta suggested_reclaim_time;
407     bool no_worker_reclaim = false;
408 
409     // Environment to be initialized per worker.
410     WorkerEnvironment worker_environment = WorkerEnvironment::NONE;
411 
412     scoped_refptr<SingleThreadTaskRunner> service_thread_task_runner;
413 
414     // Optional observer notified when a worker enters and exits its main.
415     raw_ptr<WorkerThreadObserver> worker_thread_observer = nullptr;
416 
417     // Threshold after which the max tasks is increased to compensate for a
418     // worker that is within a MAY_BLOCK ScopedBlockingCall.
419     TimeDelta may_block_threshold;
420 
421     // The period between calls to AdjustMaxTasks() when the thread group is at
422     // capacity.
423     TimeDelta blocked_workers_poll_period;
424 
425     // The max number of workers that a ThreadGroupSemaphore will create in any
426     // one EnsureEnoughWorkers() call.
427     int max_num_workers_created = 2;
428   } initialized_in_start_;
429 
in_start()430   InitializedInStart& in_start() {
431 #if DCHECK_IS_ON()
432     DCHECK(!initialized_in_start_.initialized);
433 #endif
434     return initialized_in_start_;
435   }
after_start()436   const InitializedInStart& after_start() const {
437 #if DCHECK_IS_ON()
438     DCHECK(initialized_in_start_.initialized);
439 #endif
440     return initialized_in_start_;
441   }
442 
443   // Synchronizes accesses to all members of this class which are neither const,
444   // atomic, nor immutable after start. Since this lock is a bottleneck to post
445   // and schedule work, only simple data structure manipulations are allowed
446   // within its scope (no thread creation or wake up).
447   mutable CheckedLock lock_{};
448 
GUARDED_BY(lock_)449   bool disable_fair_scheduling_ GUARDED_BY(lock_){false};
450 
451   // PriorityQueue from which all threads of this ThreadGroup get work.
452   PriorityQueue priority_queue_ GUARDED_BY(lock_);
453 
454   struct YieldSortKey {
455     TaskPriority priority;
456     uint8_t worker_count;
457   };
458   // Sort key which compares greater than or equal to any other sort key.
459   static constexpr YieldSortKey kMaxYieldSortKey = {TaskPriority::BEST_EFFORT,
460                                                     0U};
461 
462   // When the thread group is at or above capacity and has pending work, this is
463   // set to contain the priority and worker count of the next TaskSource to
464   // schedule, or kMaxYieldSortKey otherwise. This is used to decide whether a
465   // TaskSource should yield. Once ShouldYield() returns true, it is reset to
466   // kMaxYieldSortKey to prevent additional from unnecessary yielding. This is
467   // expected to be always kept up-to-date by derived classes when |lock_| is
468   // released. It is annotated as GUARDED_BY(lock_) because it is always updated
469   // under the lock (to avoid races with other state during the update) but it
470   // is nonetheless always safe to read it without the lock (since it's atomic).
GUARDED_BY(lock_)471   std::atomic<YieldSortKey> max_allowed_sort_key_ GUARDED_BY(lock_){
472       kMaxYieldSortKey};
473 
474   const std::string histogram_label_;
475   const std::string thread_group_label_;
476   const ThreadType thread_type_hint_;
477 
478   // All workers owned by this thread group.
479   size_t worker_sequence_num_ GUARDED_BY(lock_) = 0;
480 
481   bool shutdown_started_ GUARDED_BY(lock_) = false;
482 
483   // Maximum number of tasks of any priority / BEST_EFFORT priority that can run
484   // concurrently in this thread group.
485   size_t max_tasks_ GUARDED_BY(lock_) = 0;
486   size_t max_best_effort_tasks_ GUARDED_BY(lock_) = 0;
487 
488   // Number of tasks of any priority / BEST_EFFORT priority that are currently
489   // running in this thread group.
490   size_t num_running_tasks_ GUARDED_BY(lock_) = 0;
491   size_t num_running_best_effort_tasks_ GUARDED_BY(lock_) = 0;
492 
493   // Number of workers running a task of any priority / BEST_EFFORT priority
494   // that are within the scope of a MAY_BLOCK ScopedBlockingCall but haven't
495   // caused a max tasks increase yet.
496   int num_unresolved_may_block_ GUARDED_BY(lock_) = 0;
497   int num_unresolved_best_effort_may_block_ GUARDED_BY(lock_) = 0;
498 
499   // Signaled when a worker is added to the idle workers set.
500   ConditionVariable idle_workers_set_cv_for_testing_ GUARDED_BY(lock_);
501 
502   // Whether an AdjustMaxTasks() task was posted to the service thread.
503   bool adjust_max_tasks_posted_ GUARDED_BY(lock_) = false;
504 
505   // Indicates to the delegates that workers are not permitted to cleanup.
506   bool worker_cleanup_disallowed_for_testing_ GUARDED_BY(lock_) = false;
507 
508   // Counts the number of workers cleaned up (went through
509   // WorkerThreadDelegateImpl::OnMainExit()) since the last call to
510   // WaitForWorkersCleanedUpForTesting() (or Start() if that wasn't called yet).
511   // |some_workers_cleaned_up_for_testing_| is true if this was ever
512   // incremented. Tests with a custom |suggested_reclaim_time_| can wait on a
513   // specific number of workers being cleaned up via
514   // WaitForWorkersCleanedUpForTesting().
515   size_t num_workers_cleaned_up_for_testing_ GUARDED_BY(lock_) = 0;
516 #if DCHECK_IS_ON()
517   bool some_workers_cleaned_up_for_testing_ GUARDED_BY(lock_) = false;
518 #endif
519 
520   // Signaled, if non-null, when |num_workers_cleaned_up_for_testing_| is
521   // incremented.
522   std::optional<ConditionVariable> num_workers_cleaned_up_for_testing_cv_
523       GUARDED_BY(lock_);
524 
525   // All workers owned by this thread group.
526   std::vector<scoped_refptr<WorkerThread>> workers_ GUARDED_BY(lock_);
527 
528   // Null-opt unless |synchronous_thread_start_for_testing| was true at
529   // construction. In that case, it's signaled each time
530   // WorkerThreadDelegateImpl::OnMainEntry() completes.
531   std::optional<WaitableEvent> worker_started_for_testing_;
532 
533   // Set at the start of JoinForTesting().
534   bool join_for_testing_started_ GUARDED_BY(lock_) = false;
535 };
536 
537 }  // namespace internal
538 }  // namespace base
539 
540 #endif  // BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
541