xref: /aosp_15_r20/external/cronet/base/test/launcher/test_launcher.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/test/launcher/test_launcher.h"
6 
7 #include <stdio.h>
8 
9 #include <algorithm>
10 #include <map>
11 #include <random>
12 #include <string_view>
13 #include <unordered_map>
14 #include <unordered_set>
15 #include <utility>
16 
17 #include "base/at_exit.h"
18 #include "base/clang_profiling_buildflags.h"
19 #include "base/command_line.h"
20 #include "base/containers/adapters.h"
21 #include "base/containers/contains.h"
22 #include "base/environment.h"
23 #include "base/files/file_enumerator.h"
24 #include "base/files/file_path.h"
25 #include "base/files/file_util.h"
26 #include "base/files/scoped_file.h"
27 #include "base/files/scoped_temp_dir.h"
28 #include "base/format_macros.h"
29 #include "base/functional/bind.h"
30 #include "base/hash/hash.h"
31 #include "base/lazy_instance.h"
32 #include "base/location.h"
33 #include "base/logging.h"
34 #include "base/memory/ptr_util.h"
35 #include "base/memory/raw_ptr.h"
36 #include "base/memory/raw_ref.h"
37 #include "base/numerics/safe_conversions.h"
38 #include "base/process/kill.h"
39 #include "base/process/launch.h"
40 #include "base/ranges/algorithm.h"
41 #include "base/run_loop.h"
42 #include "base/strings/pattern.h"
43 #include "base/strings/strcat.h"
44 #include "base/strings/string_number_conversions.h"
45 
46 #include "base/strings/string_split.h"
47 #include "base/strings/string_util.h"
48 #include "base/strings/stringize_macros.h"
49 #include "base/strings/stringprintf.h"
50 #include "base/strings/utf_string_conversions.h"
51 #include "base/system/sys_info.h"
52 #include "base/task/post_job.h"
53 #include "base/task/single_thread_task_runner.h"
54 #include "base/task/thread_pool.h"
55 #include "base/task/thread_pool/thread_pool_instance.h"
56 #include "base/test/gtest_util.h"
57 #include "base/test/gtest_xml_util.h"
58 #include "base/test/launcher/test_launcher_tracer.h"
59 #include "base/test/launcher/test_results_tracker.h"
60 #include "base/test/scoped_logging_settings.h"
61 #include "base/test/test_file_util.h"
62 #include "base/test/test_switches.h"
63 #include "base/test/test_timeouts.h"
64 #include "base/threading/platform_thread.h"
65 #include "base/threading/thread_restrictions.h"
66 #include "base/time/time.h"
67 #include "build/build_config.h"
68 #include "build/chromeos_buildflags.h"
69 #include "testing/gtest/include/gtest/gtest.h"
70 
71 #if BUILDFLAG(IS_POSIX)
72 #include <fcntl.h>
73 
74 #include "base/files/file_descriptor_watcher_posix.h"
75 #endif
76 
77 #if BUILDFLAG(IS_APPLE)
78 #include "base/apple/scoped_nsautorelease_pool.h"
79 #endif
80 
81 #if BUILDFLAG(IS_WIN)
82 #include <windows.h>
83 
84 #include "base/strings/string_util_win.h"
85 
86 // To avoid conflicts with the macro from the Windows SDK...
87 #undef GetCommandLine
88 #endif
89 
90 #if BUILDFLAG(IS_FUCHSIA)
91 #include <lib/fdio/namespace.h>
92 #include <lib/zx/job.h>
93 #include <lib/zx/time.h>
94 #include "base/atomic_sequence_num.h"
95 #include "base/fuchsia/default_job.h"
96 #include "base/fuchsia/file_utils.h"
97 #include "base/fuchsia/fuchsia_logging.h"
98 #endif
99 
100 #if BUILDFLAG(IS_IOS)
101 #include "base/path_service.h"
102 #endif
103 
104 namespace base {
105 
106 // See
107 // https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ
108 using ::operator<<;
109 
110 // The environment variable name for the total number of test shards.
111 const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
112 // The environment variable name for the test shard index.
113 const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
114 
115 // Prefix indicating test has to run prior to the other test.
116 const char kPreTestPrefix[] = "PRE_";
117 
118 // Prefix indicating test is disabled, will not run unless specified.
119 const char kDisabledTestPrefix[] = "DISABLED_";
120 
ResultWatcher(FilePath result_file,size_t num_tests)121 ResultWatcher::ResultWatcher(FilePath result_file, size_t num_tests)
122     : result_file_(std::move(result_file)), num_tests_(num_tests) {}
123 
PollUntilDone(TimeDelta timeout_per_test)124 bool ResultWatcher::PollUntilDone(TimeDelta timeout_per_test) {
125   CHECK(timeout_per_test.is_positive());
126   TimeTicks batch_deadline = TimeTicks::Now() + num_tests_ * timeout_per_test;
127   TimeDelta time_to_next_check = timeout_per_test;
128   do {
129     if (WaitWithTimeout(time_to_next_check)) {
130       return true;
131     }
132     time_to_next_check = PollOnce(timeout_per_test);
133   } while (TimeTicks::Now() < batch_deadline &&
134            time_to_next_check.is_positive());
135   // The process may have exited or is about to exit. Give the process a grace
136   // period to exit on its own.
137   return WaitWithTimeout(TestTimeouts::tiny_timeout());
138 }
139 
PollOnce(TimeDelta timeout_per_test)140 TimeDelta ResultWatcher::PollOnce(TimeDelta timeout_per_test) {
141   std::vector<TestResult> test_results;
142   // If the result watcher is unlucky enough to read the results while the
143   // runner process is writing an update, it is possible to read an incomplete
144   // XML entry, in which case `ProcessGTestOutput` will return false.
145   if (!ProcessGTestOutput(result_file_, &test_results, nullptr)) {
146     return TestTimeouts::tiny_timeout();
147   }
148   Time latest_completion = LatestCompletionTimestamp(test_results);
149   // Didn't complete a single test before timeout, fail.
150   if (latest_completion.is_null()) {
151     return TimeDelta();
152   }
153   // The gtest result writer gets timestamps from `Time::Now`.
154   TimeDelta time_since_latest_completion = Time::Now() - latest_completion;
155   // This heuristic attempts to prevent unrelated clock changes between the
156   // latest write and read from being falsely identified as a test timeout.
157   // For example, daylight savings time starting or ending can add an
158   // artificial delta of +1 or -1 hour to `time_since_latest_completion`.
159   if (time_since_latest_completion.is_negative() ||
160       time_since_latest_completion > kDaylightSavingsThreshold) {
161     return timeout_per_test;
162   }
163   // Expect another test to complete no later than `timeout_per_test` after
164   // the latest completion.
165   return timeout_per_test - time_since_latest_completion;
166 }
167 
LatestCompletionTimestamp(const std::vector<TestResult> & test_results)168 Time ResultWatcher::LatestCompletionTimestamp(
169     const std::vector<TestResult>& test_results) {
170   CHECK_LE(test_results.size(), num_tests_);
171   // Since the result file is append-only, timestamps should already be in
172   // ascending order.
173   for (const TestResult& result : Reversed(test_results)) {
174     if (result.completed()) {
175       Time test_start = result.timestamp.value_or(Time());
176       return test_start + result.elapsed_time;
177     }
178   }
179   return Time();
180 }
181 
182 // Watch results generated by a child test process. Wait for the child process
183 // to exit between result checks.
184 class ProcessResultWatcher : public ResultWatcher {
185  public:
ProcessResultWatcher(FilePath result_file,size_t num_tests,Process & process)186   ProcessResultWatcher(FilePath result_file, size_t num_tests, Process& process)
187       : ResultWatcher(result_file, num_tests), process_(process) {}
188 
189   // Get the exit code of the process, or -1 if the process has not exited yet.
190   int GetExitCode();
191 
192   bool WaitWithTimeout(TimeDelta timeout) override;
193 
194  private:
195   const raw_ref<Process> process_;
196   int exit_code_ = -1;
197 };
198 
GetExitCode()199 int ProcessResultWatcher::GetExitCode() {
200   return exit_code_;
201 }
202 
WaitWithTimeout(TimeDelta timeout)203 bool ProcessResultWatcher::WaitWithTimeout(TimeDelta timeout) {
204   return process_->WaitForExitWithTimeout(timeout, &exit_code_);
205 }
206 
207 namespace {
208 
209 // Global tag for test runs where the results are unreliable for any reason.
210 const char kUnreliableResultsTag[] = "UNRELIABLE_RESULTS";
211 
212 // Maximum time of no output after which we print list of processes still
213 // running. This deliberately doesn't use TestTimeouts (which is otherwise
214 // a recommended solution), because they can be increased. This would defeat
215 // the purpose of this timeout, which is 1) to avoid buildbot "no output for
216 // X seconds" timeout killing the process 2) help communicate status of
217 // the test launcher to people looking at the output (no output for a long
218 // time is mysterious and gives no info about what is happening) 3) help
219 // debugging in case the process hangs anyway.
220 constexpr TimeDelta kOutputTimeout = Seconds(15);
221 
222 // Limit of output snippet lines when printing to stdout.
223 // Avoids flooding the logs with amount of output that gums up
224 // the infrastructure.
225 const size_t kOutputSnippetLinesLimit = 5000;
226 
227 // Limit of output snippet size. Exceeding this limit
228 // results in truncating the output and failing the test.
229 const size_t kOutputSnippetBytesLimit = 300 * 1024;
230 
231 // Limit of seed values for gtest shuffling. Arbitrary, but based on
232 // gtest's similarly arbitrary choice.
233 const uint32_t kRandomSeedUpperBound = 100000;
234 
235 // Set of live launch test processes with corresponding lock (it is allowed
236 // for callers to launch processes on different threads).
GetLiveProcessesLock()237 Lock* GetLiveProcessesLock() {
238   static auto* lock = new Lock;
239   return lock;
240 }
241 
GetLiveProcesses()242 std::map<ProcessHandle, CommandLine>* GetLiveProcesses() {
243   static auto* map = new std::map<ProcessHandle, CommandLine>;
244   return map;
245 }
246 
247 // Performance trace generator.
GetTestLauncherTracer()248 TestLauncherTracer* GetTestLauncherTracer() {
249   static auto* tracer = new TestLauncherTracer;
250   return tracer;
251 }
252 
253 #if BUILDFLAG(IS_FUCHSIA)
WaitForJobExit(const zx::job & job)254 zx_status_t WaitForJobExit(const zx::job& job) {
255   zx::time deadline =
256       zx::deadline_after(zx::duration(kOutputTimeout.ToZxDuration()));
257   zx_signals_t to_wait_for = ZX_JOB_NO_JOBS | ZX_JOB_NO_PROCESSES;
258   while (to_wait_for) {
259     zx_signals_t observed = 0;
260     zx_status_t status = job.wait_one(to_wait_for, deadline, &observed);
261     if (status != ZX_OK)
262       return status;
263     to_wait_for &= ~observed;
264   }
265   return ZX_OK;
266 }
267 #endif  // BUILDFLAG(IS_FUCHSIA)
268 
269 #if BUILDFLAG(IS_POSIX)
270 // Self-pipe that makes it possible to do complex shutdown handling
271 // outside of the signal handler.
272 int g_shutdown_pipe[2] = { -1, -1 };
273 
ShutdownPipeSignalHandler(int signal)274 void ShutdownPipeSignalHandler(int signal) {
275   HANDLE_EINTR(write(g_shutdown_pipe[1], "q", 1));
276 }
277 
KillSpawnedTestProcesses()278 void KillSpawnedTestProcesses() {
279   // Keep the lock until exiting the process to prevent further processes
280   // from being spawned.
281   AutoLock lock(*GetLiveProcessesLock());
282 
283   fprintf(stdout, "Sending SIGTERM to %zu child processes... ",
284           GetLiveProcesses()->size());
285   fflush(stdout);
286 
287   for (const auto& pair : *GetLiveProcesses()) {
288     // Send the signal to entire process group.
289     kill((-1) * (pair.first), SIGTERM);
290   }
291 
292   fprintf(stdout,
293           "done.\nGiving processes a chance to terminate cleanly... ");
294   fflush(stdout);
295 
296   PlatformThread::Sleep(Milliseconds(500));
297 
298   fprintf(stdout, "done.\n");
299   fflush(stdout);
300 
301   fprintf(stdout, "Sending SIGKILL to %zu child processes... ",
302           GetLiveProcesses()->size());
303   fflush(stdout);
304 
305   for (const auto& pair : *GetLiveProcesses()) {
306     // Send the signal to entire process group.
307     kill((-1) * (pair.first), SIGKILL);
308   }
309 
310   fprintf(stdout, "done.\n");
311   fflush(stdout);
312 }
313 #endif  // BUILDFLAG(IS_POSIX)
314 
315 // Parses the environment variable var as an Int32.  If it is unset, returns
316 // true.  If it is set, unsets it then converts it to Int32 before
317 // returning it in |result|.  Returns true on success.
TakeInt32FromEnvironment(const char * const var,int32_t * result)318 bool TakeInt32FromEnvironment(const char* const var, int32_t* result) {
319   std::unique_ptr<Environment> env(Environment::Create());
320   std::string str_val;
321 
322   if (!env->GetVar(var, &str_val))
323     return true;
324 
325   if (!env->UnSetVar(var)) {
326     LOG(ERROR) << "Invalid environment: we could not unset " << var << ".\n";
327     return false;
328   }
329 
330   if (!StringToInt(str_val, result)) {
331     LOG(ERROR) << "Invalid environment: " << var << " is not an integer.\n";
332     return false;
333   }
334 
335   return true;
336 }
337 
338 // Unsets the environment variable |name| and returns true on success.
339 // Also returns true if the variable just doesn't exist.
UnsetEnvironmentVariableIfExists(const std::string & name)340 bool UnsetEnvironmentVariableIfExists(const std::string& name) {
341   std::unique_ptr<Environment> env(Environment::Create());
342   std::string str_val;
343   if (!env->GetVar(name, &str_val))
344     return true;
345   return env->UnSetVar(name);
346 }
347 
348 // Returns true if bot mode has been requested, i.e. defaults optimized
349 // for continuous integration bots. This way developers don't have to remember
350 // special command-line flags.
BotModeEnabled(const CommandLine * command_line)351 bool BotModeEnabled(const CommandLine* command_line) {
352   std::unique_ptr<Environment> env(Environment::Create());
353   return command_line->HasSwitch(switches::kTestLauncherBotMode) ||
354          env->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
355 }
356 
357 // Returns command line command line after gtest-specific processing
358 // and applying |wrapper|.
PrepareCommandLineForGTest(const CommandLine & command_line,const std::string & wrapper,const size_t retries_left)359 CommandLine PrepareCommandLineForGTest(const CommandLine& command_line,
360                                        const std::string& wrapper,
361                                        const size_t retries_left) {
362   CommandLine new_command_line(command_line.GetProgram());
363   CommandLine::SwitchMap switches = command_line.GetSwitches();
364 
365   // Handled by the launcher process.
366   switches.erase(kGTestRepeatFlag);
367   switches.erase(kIsolatedScriptTestRepeatFlag);
368 
369   // Don't try to write the final XML report in child processes.
370   switches.erase(kGTestOutputFlag);
371 
372 #if BUILDFLAG(IS_IOS)
373   // We only need the xctest flag for the parent process. Passing it to
374   // child processes will cause the tests not to run, so remove it.
375   switches.erase(switches::kEnableRunIOSUnittestsWithXCTest);
376 #endif
377 
378   if (switches.find(switches::kTestLauncherRetriesLeft) == switches.end()) {
379     switches[switches::kTestLauncherRetriesLeft] =
380 #if BUILDFLAG(IS_WIN)
381         base::NumberToWString(
382 #else
383         base::NumberToString(
384 #endif
385             retries_left);
386   }
387 
388   for (CommandLine::SwitchMap::const_iterator iter = switches.begin();
389        iter != switches.end(); ++iter) {
390     new_command_line.AppendSwitchNative((*iter).first, (*iter).second);
391   }
392 
393   // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
394   // does not really support removing switches well, and trying to do that
395   // on a CommandLine with a wrapper is known to break.
396   // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
397 #if BUILDFLAG(IS_WIN)
398   new_command_line.PrependWrapper(UTF8ToWide(wrapper));
399 #else
400   new_command_line.PrependWrapper(wrapper);
401 #endif
402 
403   return new_command_line;
404 }
405 
406 // Launches a child process using |command_line|. If a test is still running
407 // after |timeout|, the child process is terminated and |*was_timeout| is set to
408 // true. Returns exit code of the process.
LaunchChildTestProcessWithOptions(const CommandLine & command_line,const LaunchOptions & options,int flags,const FilePath & result_file,TimeDelta timeout_per_test,size_t num_tests,TestLauncherDelegate * delegate,bool * was_timeout)409 int LaunchChildTestProcessWithOptions(const CommandLine& command_line,
410                                       const LaunchOptions& options,
411                                       int flags,
412                                       const FilePath& result_file,
413                                       TimeDelta timeout_per_test,
414                                       size_t num_tests,
415                                       TestLauncherDelegate* delegate,
416                                       bool* was_timeout) {
417 #if BUILDFLAG(IS_POSIX)
418   // Make sure an option we rely on is present - see LaunchChildGTestProcess.
419   DCHECK(options.new_process_group);
420 #endif
421 
422   LaunchOptions new_options(options);
423 
424 #if BUILDFLAG(IS_WIN)
425   DCHECK(!new_options.job_handle);
426 
427   win::ScopedHandle job_handle;
428   if (flags & TestLauncher::USE_JOB_OBJECTS) {
429     job_handle.Set(CreateJobObject(NULL, NULL));
430     if (!job_handle.is_valid()) {
431       LOG(ERROR) << "Could not create JobObject.";
432       return -1;
433     }
434 
435     DWORD job_flags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
436 
437     if (!SetJobObjectLimitFlags(job_handle.get(), job_flags)) {
438       LOG(ERROR) << "Could not SetJobObjectLimitFlags.";
439       return -1;
440     }
441 
442     new_options.job_handle = job_handle.get();
443   }
444 #elif BUILDFLAG(IS_FUCHSIA)
445   DCHECK(!new_options.job_handle);
446 
447   // Set the clone policy, deliberately omitting FDIO_SPAWN_CLONE_NAMESPACE so
448   // that we can install a different /data.
449   new_options.spawn_flags = FDIO_SPAWN_CLONE_STDIO | FDIO_SPAWN_CLONE_JOB;
450 
451   const base::FilePath kDataPath(base::kPersistedDataDirectoryPath);
452   const base::FilePath kCachePath(base::kPersistedCacheDirectoryPath);
453 
454   // Clone all namespace entries from the current process, except /data and
455   // /cache, which are overridden below.
456   fdio_flat_namespace_t* flat_namespace = nullptr;
457   zx_status_t result = fdio_ns_export_root(&flat_namespace);
458   ZX_CHECK(ZX_OK == result, result) << "fdio_ns_export_root";
459   for (size_t i = 0; i < flat_namespace->count; ++i) {
460     base::FilePath path(flat_namespace->path[i]);
461     if (path == kDataPath || path == kCachePath) {
462       result = zx_handle_close(flat_namespace->handle[i]);
463       ZX_CHECK(ZX_OK == result, result) << "zx_handle_close";
464     } else {
465       new_options.paths_to_transfer.push_back(
466           {path, flat_namespace->handle[i]});
467     }
468   }
469   free(flat_namespace);
470 
471   zx::job job_handle;
472   result = zx::job::create(*GetDefaultJob(), 0, &job_handle);
473   ZX_CHECK(ZX_OK == result, result) << "zx_job_create";
474   new_options.job_handle = job_handle.get();
475 
476   // Give this test its own isolated /data directory by creating a new temporary
477   // subdirectory under data (/data/test-$PID) and binding paths under that to
478   // /data and /cache in the child process.
479   // Persistent data storage is mapped to /cache rather than system-provided
480   // cache storage, to avoid unexpected purges (see crbug.com/1242170).
481   CHECK(base::PathExists(kDataPath));
482 
483   // Create the test subdirectory with a name that is unique to the child test
484   // process (qualified by parent PID and an autoincrementing test process
485   // index).
486   static base::AtomicSequenceNumber child_launch_index;
487   const base::FilePath child_data_path = kDataPath.AppendASCII(
488       base::StringPrintf("test-%zu-%d", base::Process::Current().Pid(),
489                          child_launch_index.GetNext()));
490   CHECK(!base::DirectoryExists(child_data_path));
491   CHECK(base::CreateDirectory(child_data_path));
492   DCHECK(base::DirectoryExists(child_data_path));
493 
494   const base::FilePath test_data_dir(child_data_path.AppendASCII("data"));
495   CHECK(base::CreateDirectory(test_data_dir));
496   const base::FilePath test_cache_dir(child_data_path.AppendASCII("cache"));
497   CHECK(base::CreateDirectory(test_cache_dir));
498 
499   // Transfer handles to the new directories as /data and /cache in the child
500   // process' namespace.
501   new_options.paths_to_transfer.push_back(
502       {kDataPath,
503        base::OpenDirectoryHandle(test_data_dir).TakeChannel().release()});
504   new_options.paths_to_transfer.push_back(
505       {kCachePath,
506        base::OpenDirectoryHandle(test_cache_dir).TakeChannel().release()});
507 #endif  // BUILDFLAG(IS_FUCHSIA)
508 
509 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
510   // To prevent accidental privilege sharing to an untrusted child, processes
511   // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
512   // new child will be privileged and trusted.
513   new_options.allow_new_privs = true;
514 #endif
515 
516   Process process;
517 
518   {
519     // Note how we grab the lock before the process possibly gets created.
520     // This ensures that when the lock is held, ALL the processes are registered
521     // in the set.
522     AutoLock lock(*GetLiveProcessesLock());
523 
524 #if BUILDFLAG(IS_WIN)
525     // Allow the handle used to capture stdio and stdout to be inherited by the
526     // child. Note that this is done under GetLiveProcessesLock() to ensure that
527     // only the desired child receives the handle.
528     if (new_options.stdout_handle) {
529       ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT,
530                              HANDLE_FLAG_INHERIT);
531     }
532 #endif
533 
534     process = LaunchProcess(command_line, new_options);
535 
536 #if BUILDFLAG(IS_WIN)
537     // Revoke inheritance so that the handle isn't leaked into other children.
538     // Note that this is done under GetLiveProcessesLock() to ensure that only
539     // the desired child receives the handle.
540     if (new_options.stdout_handle)
541       ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT, 0);
542 #endif
543 
544     if (!process.IsValid())
545       return -1;
546 
547     // TODO(rvargas) crbug.com/417532: Don't store process handles.
548     GetLiveProcesses()->insert(std::make_pair(process.Handle(), command_line));
549   }
550 
551   int exit_code = 0;
552   bool did_exit = false;
553 
554   {
555     base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
556     if (num_tests == 1) {
557       did_exit = process.WaitForExitWithTimeout(timeout_per_test, &exit_code);
558     } else {
559       ProcessResultWatcher result_watcher(result_file, num_tests, process);
560       did_exit = result_watcher.PollUntilDone(timeout_per_test);
561       exit_code = result_watcher.GetExitCode();
562     }
563   }
564 
565   if (!did_exit) {
566     if (delegate)
567       delegate->OnTestTimedOut(command_line);
568 
569     *was_timeout = true;
570     exit_code = -1;  // Set a non-zero exit code to signal a failure.
571 
572     {
573       base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
574       // Ensure that the process terminates.
575       process.Terminate(-1, true);
576     }
577   }
578 
579 #if BUILDFLAG(IS_FUCHSIA)
580   zx_status_t wait_status = WaitForJobExit(job_handle);
581   if (wait_status != ZX_OK) {
582     LOG(ERROR) << "Batch leaked jobs or processes.";
583     exit_code = -1;
584   }
585 #endif  // BUILDFLAG(IS_FUCHSIA)
586 
587   {
588     // Note how we grab the log before issuing a possibly broad process kill.
589     // Other code parts that grab the log kill processes, so avoid trying
590     // to do that twice and trigger all kinds of log messages.
591     AutoLock lock(*GetLiveProcessesLock());
592 
593 #if BUILDFLAG(IS_FUCHSIA)
594     zx_status_t status = job_handle.kill();
595     ZX_CHECK(status == ZX_OK, status);
596 
597     // Cleanup the data directory.
598     CHECK(DeletePathRecursively(child_data_path));
599 #elif BUILDFLAG(IS_POSIX)
600     // It is not possible to waitpid() on any leaked sub-processes of the test
601     // batch process, since those are not direct children of this process.
602     // kill()ing the process-group will return a result indicating whether the
603     // group was found (i.e. processes were still running in it) or not (i.e.
604     // sub-processes had exited already). Unfortunately many tests (e.g. browser
605     // tests) have processes exit asynchronously, so checking the kill() result
606     // will report false failures.
607     // Unconditionally kill the process group, regardless of the batch exit-code
608     // until a better solution is available.
609     kill(-1 * process.Handle(), SIGKILL);
610 #endif  // BUILDFLAG(IS_POSIX)
611 
612     GetLiveProcesses()->erase(process.Handle());
613   }
614 
615   return exit_code;
616 }
617 
618 struct ChildProcessResults {
619   // Total time for DoLaunchChildTest Process to execute.
620   TimeDelta elapsed_time;
621   // If stdio is redirected, pass output file content.
622   std::string output_file_contents;
623   // True if child process timed out.
624   bool was_timeout = false;
625   // Exit code of child process.
626   int exit_code;
627   // Thread ID of the runner.
628   PlatformThreadId thread_id;
629   // The sequence number of the child test process executed.
630   // It's used instead of process id to distinguish processes that process id
631   // might be reused by OS.
632   int process_num;
633 };
634 
635 // Returns the path to a temporary directory within |task_temp_dir| for the
636 // child process of index |child_index|, or an empty FilePath if per-child temp
637 // dirs are not supported.
CreateChildTempDirIfSupported(const FilePath & task_temp_dir,int child_index)638 FilePath CreateChildTempDirIfSupported(const FilePath& task_temp_dir,
639                                        int child_index) {
640   if (!TestLauncher::SupportsPerChildTempDirs())
641     return FilePath();
642   FilePath child_temp = task_temp_dir.AppendASCII(NumberToString(child_index));
643   CHECK(CreateDirectoryAndGetError(child_temp, nullptr));
644   return child_temp;
645 }
646 
647 // Adds the platform-specific variable setting |temp_dir| as a process's
648 // temporary directory to |environment|.
SetTemporaryDirectory(const FilePath & temp_dir,EnvironmentMap * environment)649 void SetTemporaryDirectory(const FilePath& temp_dir,
650                            EnvironmentMap* environment) {
651 #if BUILDFLAG(IS_WIN)
652   environment->emplace(L"TMP", temp_dir.value());
653 #elif BUILDFLAG(IS_APPLE)
654   environment->emplace("MAC_CHROMIUM_TMPDIR", temp_dir.value());
655 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
656   environment->emplace("TMPDIR", temp_dir.value());
657 #endif
658 }
659 
660 // This launches the child test process, waits for it to complete,
661 // and returns child process results.
DoLaunchChildTestProcess(const CommandLine & command_line,const FilePath & process_temp_dir,const FilePath & result_file,TimeDelta timeout_per_test,size_t num_tests,const TestLauncher::LaunchOptions & test_launch_options,bool redirect_stdio,TestLauncherDelegate * delegate)662 ChildProcessResults DoLaunchChildTestProcess(
663     const CommandLine& command_line,
664     const FilePath& process_temp_dir,
665     const FilePath& result_file,
666     TimeDelta timeout_per_test,
667     size_t num_tests,
668     const TestLauncher::LaunchOptions& test_launch_options,
669     bool redirect_stdio,
670     TestLauncherDelegate* delegate) {
671   TimeTicks start_time = TimeTicks::Now();
672 
673   ChildProcessResults result;
674   result.thread_id = PlatformThread::CurrentId();
675 
676   ScopedFILE output_file;
677   FilePath output_filename;
678   if (redirect_stdio) {
679     output_file = CreateAndOpenTemporaryStream(&output_filename);
680     CHECK(output_file);
681 #if BUILDFLAG(IS_WIN)
682     // Paint the file so that it will be deleted when all handles are closed.
683     if (!FILEToFile(output_file.get()).DeleteOnClose(true)) {
684       PLOG(WARNING) << "Failed to mark " << output_filename.AsUTF8Unsafe()
685                     << " for deletion on close";
686     }
687 #endif
688   }
689 
690   LaunchOptions options;
691 
692 #if BUILDFLAG(IS_IOS)
693   // We need to allow XPC to start extension processes so magically we set this
694   // flag to 1.
695   options.environment.emplace("XPC_FLAGS", "1");
696 #endif
697   // Tell the child process to use its designated temporary directory.
698   if (!process_temp_dir.empty())
699     SetTemporaryDirectory(process_temp_dir, &options.environment);
700 #if BUILDFLAG(IS_WIN)
701 
702   options.inherit_mode = test_launch_options.inherit_mode;
703   options.handles_to_inherit = test_launch_options.handles_to_inherit;
704   if (redirect_stdio) {
705     HANDLE handle =
706         reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(output_file.get())));
707     CHECK_NE(INVALID_HANDLE_VALUE, handle);
708     options.stdin_handle = INVALID_HANDLE_VALUE;
709     options.stdout_handle = handle;
710     options.stderr_handle = handle;
711     // See LaunchOptions.stdout_handle comments for why this compares against
712     // FILE_TYPE_CHAR.
713     if (options.inherit_mode == base::LaunchOptions::Inherit::kSpecific &&
714         GetFileType(handle) != FILE_TYPE_CHAR) {
715       options.handles_to_inherit.push_back(handle);
716     }
717   }
718 
719 #else  // if !BUILDFLAG(IS_WIN)
720 
721   options.fds_to_remap = test_launch_options.fds_to_remap;
722   if (redirect_stdio) {
723     int output_file_fd = fileno(output_file.get());
724     CHECK_LE(0, output_file_fd);
725     options.fds_to_remap.push_back(
726         std::make_pair(output_file_fd, STDOUT_FILENO));
727     options.fds_to_remap.push_back(
728         std::make_pair(output_file_fd, STDERR_FILENO));
729   }
730 
731 #if !BUILDFLAG(IS_FUCHSIA)
732   options.new_process_group = true;
733 #endif
734 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
735   options.kill_on_parent_death = true;
736 #endif
737 
738 #endif  // !BUILDFLAG(IS_WIN)
739 
740   result.exit_code = LaunchChildTestProcessWithOptions(
741       command_line, options, test_launch_options.flags, result_file,
742       timeout_per_test, num_tests, delegate, &result.was_timeout);
743 
744   if (redirect_stdio) {
745     fflush(output_file.get());
746 
747     // Reading the file can sometimes fail when the process was killed midflight
748     // (e.g. on test suite timeout): https://crbug.com/826408. Attempt to read
749     // the output file anyways, but do not crash on failure in this case.
750     CHECK(ReadStreamToString(output_file.get(), &result.output_file_contents) ||
751           result.exit_code != 0);
752 
753     output_file.reset();
754 #if !BUILDFLAG(IS_WIN)
755     // On Windows, the reset() above is enough to delete the file since it was
756     // painted for such after being opened. Lesser platforms require an explicit
757     // delete now.
758     if (!DeleteFile(output_filename))
759       LOG(WARNING) << "Failed to delete " << output_filename.AsUTF8Unsafe();
760 #endif
761   }
762   result.elapsed_time = TimeTicks::Now() - start_time;
763   result.process_num = GetTestLauncherTracer()->RecordProcessExecution(
764       start_time, result.elapsed_time);
765   return result;
766 }
767 
ExtractTestsFromFilter(const std::string & filter,bool double_colon_supported)768 std::vector<std::string> ExtractTestsFromFilter(const std::string& filter,
769                                                 bool double_colon_supported) {
770   std::vector<std::string> tests;
771   if (double_colon_supported) {
772     tests =
773         SplitString(filter, "::", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
774   }
775   if (tests.size() <= 1) {
776     tests =
777         SplitString(filter, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
778   }
779   return tests;
780 }
781 
782 // A test runner object to run tests across a number of sequence runners,
783 // and control running pre tests in sequence.
784 class TestRunner {
785  public:
TestRunner(TestLauncher * launcher,size_t max_workers=1u,size_t batch_size=1u)786   explicit TestRunner(TestLauncher* launcher,
787                       size_t max_workers = 1u,
788                       size_t batch_size = 1u)
789       : launcher_(launcher),
790         max_workers_(max_workers),
791         batch_size_(batch_size) {}
792 
793   // Sets |test_names| to be run, with |batch_size| tests per process.
794   // Posts a job to run LaunchChildGTestProcess on |max_workers| workers.
795   void Run(const std::vector<std::string>& test_names);
796 
797  private:
798   // Called to check if the next batch has to run on the same
799   // sequence task runner and using the same temporary directory.
IsPreTestBatch(const std::vector<std::string> & test_names)800   static bool IsPreTestBatch(const std::vector<std::string>& test_names) {
801     return test_names.size() == 1u &&
802            test_names.front().find(kPreTestPrefix) != std::string::npos;
803   }
804 
IsSingleThreaded() const805   bool IsSingleThreaded() const { return batch_size_ == 0; }
806 
807   void WorkerTask(scoped_refptr<TaskRunner> main_task_runner,
808                   base::JobDelegate* delegate);
809 
GetMaxConcurrency(size_t worker_count)810   size_t GetMaxConcurrency(size_t worker_count) {
811     AutoLock auto_lock(lock_);
812     if (IsSingleThreaded()) {
813       return tests_to_run_.empty() ? 0 : 1;
814     }
815 
816     // Round up the division to ensure enough workers for all tests.
817     return std::min((tests_to_run_.size() + batch_size_ - 1) / batch_size_,
818                     max_workers_);
819   }
820 
GetNextBatch()821   std::vector<std::string> GetNextBatch() EXCLUSIVE_LOCKS_REQUIRED(lock_) {
822     size_t batch_size;
823     // Single threaded case runs all tests in one batch.
824     if (IsSingleThreaded()) {
825       batch_size = tests_to_run_.size();
826     }
827     // Run remaining tests up to |batch_size_|.
828     else {
829       batch_size = std::min(batch_size_, tests_to_run_.size());
830     }
831     std::vector<std::string> batch(tests_to_run_.rbegin(),
832                                    tests_to_run_.rbegin() + batch_size);
833     tests_to_run_.erase(tests_to_run_.end() - batch_size, tests_to_run_.end());
834     return batch;
835   }
836 
837   // Cleans up |task_temp_dir| from a previous task and quits |run_loop| if
838   // |done|.
839   void CleanupTask(base::ScopedTempDir task_temp_dir, bool done);
840 
841   ThreadChecker thread_checker_;
842 
843   const raw_ptr<TestLauncher> launcher_;
844   JobHandle job_handle_;
845   // Max number of workers to use.
846   const size_t max_workers_;
847   // Number of tests per process, 0 is special case for all tests.
848   const size_t batch_size_;
849   RunLoop run_loop_;
850   // Protects member used concurrently by worker tasks.
851   base::Lock lock_;
852   std::vector<std::string> tests_to_run_ GUARDED_BY(lock_);
853 
854   base::WeakPtrFactory<TestRunner> weak_ptr_factory_{this};
855 };
856 
Run(const std::vector<std::string> & test_names)857 void TestRunner::Run(const std::vector<std::string>& test_names) {
858   DCHECK(thread_checker_.CalledOnValidThread());
859   // No workers, fail immediately.
860   CHECK_GT(max_workers_, 0u);
861   if (test_names.empty()) {
862     return;
863   }
864 
865   {
866     AutoLock auto_lock(lock_);
867     tests_to_run_ = test_names;
868     // Reverse test order to avoid copying the whole vector when removing tests.
869     std::reverse(tests_to_run_.begin(), tests_to_run_.end());
870   }
871 
872   job_handle_ = base::PostJob(
873       FROM_HERE, {TaskPriority::USER_BLOCKING, MayBlock()},
874       BindRepeating(&TestRunner::WorkerTask, Unretained(this),
875                     SingleThreadTaskRunner::GetCurrentDefault()),
876       BindRepeating(&TestRunner::GetMaxConcurrency, Unretained(this)));
877 
878   run_loop_.Run();
879 }
880 
WorkerTask(scoped_refptr<TaskRunner> main_task_runner,base::JobDelegate * delegate)881 void TestRunner::WorkerTask(scoped_refptr<TaskRunner> main_task_runner,
882                             base::JobDelegate* delegate) {
883   bool done = false;
884   while (!done && !delegate->ShouldYield()) {
885     // Create a temporary directory for this task. This directory will hold the
886     // flags and results files for the child processes as well as their User
887     // Data dir, where appropriate. For platforms that support per-child temp
888     // dirs, this directory will also contain one subdirectory per child for
889     // that child's process-wide temp dir.
890     base::ScopedTempDir task_temp_dir;
891     CHECK(task_temp_dir.CreateUniqueTempDirUnderPath(GetTempDirForTesting()));
892     int child_index = 0;
893 
894     std::vector<std::vector<std::string>> batches;
895     {
896       AutoLock auto_lock(lock_);
897       if (!tests_to_run_.empty()) {
898         batches.push_back(GetNextBatch());
899         while (IsPreTestBatch(batches.back())) {
900           DCHECK(!tests_to_run_.empty());
901           batches.push_back(GetNextBatch());
902         }
903       }
904       done = tests_to_run_.empty();
905     }
906     for (const auto& batch : batches) {
907       launcher_->LaunchChildGTestProcess(
908           main_task_runner, batch, task_temp_dir.GetPath(),
909           CreateChildTempDirIfSupported(task_temp_dir.GetPath(),
910                                         child_index++));
911     }
912 
913     // Cleaning up test results is scheduled to |main_task_runner| because it
914     // must happen after all post processing step that was scheduled in
915     // LaunchChildGTestProcess to |main_task_runner|.
916     main_task_runner->PostTask(
917         FROM_HERE,
918         BindOnce(&TestRunner::CleanupTask, weak_ptr_factory_.GetWeakPtr(),
919                  std::move(task_temp_dir), done));
920   }
921 }
922 
CleanupTask(base::ScopedTempDir task_temp_dir,bool done)923 void TestRunner::CleanupTask(base::ScopedTempDir task_temp_dir, bool done) {
924   DCHECK(thread_checker_.CalledOnValidThread());
925 
926   // delete previous temporary directory
927   if (!task_temp_dir.Delete()) {
928     // This needs to be non-fatal at least for Windows.
929     LOG(WARNING) << "Failed to delete "
930                  << task_temp_dir.GetPath().AsUTF8Unsafe();
931   }
932 
933   if (!done) {
934     return;
935   }
936 
937   if (job_handle_) {
938     job_handle_.Cancel();
939     run_loop_.QuitWhenIdle();
940   }
941 }
942 
943 // Returns the number of files and directories in |dir|, or 0 if |dir| is empty.
CountItemsInDirectory(const FilePath & dir)944 int CountItemsInDirectory(const FilePath& dir) {
945   if (dir.empty())
946     return 0;
947   int items = 0;
948   FileEnumerator file_enumerator(
949       dir, /*recursive=*/false,
950       FileEnumerator::FILES | FileEnumerator::DIRECTORIES);
951   for (FilePath name = file_enumerator.Next(); !name.empty();
952        name = file_enumerator.Next()) {
953     ++items;
954   }
955   return items;
956 }
957 
958 // Truncates a snippet in the middle to the given byte limit. byte_limit should
959 // be at least 30.
TruncateSnippet(const std::string_view snippet,size_t byte_limit)960 std::string TruncateSnippet(const std::string_view snippet, size_t byte_limit) {
961   if (snippet.length() <= byte_limit) {
962     return std::string(snippet);
963   }
964   std::string truncation_message =
965       StringPrintf("\n<truncated (%zu bytes)>\n", snippet.length());
966   if (truncation_message.length() > byte_limit) {
967     // Fail gracefully.
968     return truncation_message;
969   }
970   size_t remaining_limit = byte_limit - truncation_message.length();
971   size_t first_half = remaining_limit / 2;
972   return base::StrCat(
973       {snippet.substr(0, first_half), truncation_message,
974        snippet.substr(snippet.length() - (remaining_limit - first_half))});
975 }
976 
977 }  // namespace
978 
979 const char kGTestBreakOnFailure[] = "gtest_break_on_failure";
980 const char kGTestFilterFlag[] = "gtest_filter";
981 const char kGTestFlagfileFlag[] = "gtest_flagfile";
982 const char kGTestHelpFlag[]   = "gtest_help";
983 const char kGTestListTestsFlag[] = "gtest_list_tests";
984 const char kGTestRepeatFlag[] = "gtest_repeat";
985 const char kGTestRunDisabledTestsFlag[] = "gtest_also_run_disabled_tests";
986 const char kGTestOutputFlag[] = "gtest_output";
987 const char kGTestShuffleFlag[] = "gtest_shuffle";
988 const char kGTestRandomSeedFlag[] = "gtest_random_seed";
989 const char kIsolatedScriptRunDisabledTestsFlag[] =
990     "isolated-script-test-also-run-disabled-tests";
991 const char kIsolatedScriptTestFilterFlag[] = "isolated-script-test-filter";
992 const char kIsolatedScriptTestRepeatFlag[] = "isolated-script-test-repeat";
993 
994 class TestLauncher::TestInfo {
995  public:
996   TestInfo() = default;
997   TestInfo(const TestInfo& other) = default;
998   TestInfo(const TestIdentifier& test_id);
999   ~TestInfo() = default;
1000 
1001   // Returns test name excluding DISABLE_ prefix.
1002   std::string GetDisabledStrippedName() const;
1003 
1004   // Returns full test name.
1005   std::string GetFullName() const;
1006 
1007   // Returns test name with PRE_ prefix added, excluding DISABLE_ prefix.
1008   std::string GetPreName() const;
1009 
1010   // Returns test name excluding DISABLED_ and PRE_ prefixes.
1011   std::string GetPrefixStrippedName() const;
1012 
test_case_name() const1013   const std::string& test_case_name() const { return test_case_name_; }
test_name() const1014   const std::string& test_name() const { return test_name_; }
file() const1015   const std::string& file() const { return file_; }
line() const1016   int line() const { return line_; }
disabled() const1017   bool disabled() const { return disabled_; }
pre_test() const1018   bool pre_test() const { return pre_test_; }
1019 
1020  private:
1021   std::string test_case_name_;
1022   std::string test_name_;
1023   std::string file_;
1024   int line_;
1025   bool disabled_;
1026   bool pre_test_;
1027 };
1028 
TestInfo(const TestIdentifier & test_id)1029 TestLauncher::TestInfo::TestInfo(const TestIdentifier& test_id)
1030     : test_case_name_(test_id.test_case_name),
1031       test_name_(test_id.test_name),
1032       file_(test_id.file),
1033       line_(test_id.line),
1034       disabled_(false),
1035       pre_test_(false) {
1036   disabled_ = GetFullName().find(kDisabledTestPrefix) != std::string::npos;
1037   pre_test_ = test_name_.find(kPreTestPrefix) != std::string::npos;
1038 }
1039 
GetDisabledStrippedName() const1040 std::string TestLauncher::TestInfo::GetDisabledStrippedName() const {
1041   std::string test_name = GetFullName();
1042   ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1043                                std::string());
1044   return test_name;
1045 }
1046 
GetFullName() const1047 std::string TestLauncher::TestInfo::GetFullName() const {
1048   return FormatFullTestName(test_case_name_, test_name_);
1049 }
1050 
GetPreName() const1051 std::string TestLauncher::TestInfo::GetPreName() const {
1052   std::string name = test_name_;
1053   ReplaceSubstringsAfterOffset(&name, 0, kDisabledTestPrefix, std::string());
1054   std::string case_name = test_case_name_;
1055   ReplaceSubstringsAfterOffset(&case_name, 0, kDisabledTestPrefix,
1056                                std::string());
1057   return FormatFullTestName(case_name, kPreTestPrefix + name);
1058 }
1059 
GetPrefixStrippedName() const1060 std::string TestLauncher::TestInfo::GetPrefixStrippedName() const {
1061   std::string test_name = GetDisabledStrippedName();
1062   ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1063   return test_name;
1064 }
1065 
1066 TestLauncherDelegate::~TestLauncherDelegate() = default;
1067 
ShouldRunTest(const TestIdentifier & test)1068 bool TestLauncherDelegate::ShouldRunTest(const TestIdentifier& test) {
1069   return true;
1070 }
1071 
1072 TestLauncher::LaunchOptions::LaunchOptions() = default;
1073 TestLauncher::LaunchOptions::LaunchOptions(const LaunchOptions& other) =
1074     default;
1075 TestLauncher::LaunchOptions::~LaunchOptions() = default;
1076 
TestLauncher(TestLauncherDelegate * launcher_delegate,size_t parallel_jobs,size_t retry_limit)1077 TestLauncher::TestLauncher(TestLauncherDelegate* launcher_delegate,
1078                            size_t parallel_jobs,
1079                            size_t retry_limit)
1080     : launcher_delegate_(launcher_delegate),
1081       total_shards_(1),
1082       shard_index_(0),
1083       cycles_(1),
1084       broken_threshold_(0),
1085       test_started_count_(0),
1086       test_finished_count_(0),
1087       test_success_count_(0),
1088       test_broken_count_(0),
1089       retries_left_(0),
1090       retry_limit_(retry_limit),
1091       output_bytes_limit_(kOutputSnippetBytesLimit),
1092       force_run_broken_tests_(false),
1093       watchdog_timer_(FROM_HERE,
1094                       kOutputTimeout,
1095                       this,
1096                       &TestLauncher::OnOutputTimeout),
1097       parallel_jobs_(parallel_jobs),
1098       print_test_stdio_(AUTO) {}
1099 
~TestLauncher()1100 TestLauncher::~TestLauncher() {
1101   if (base::ThreadPoolInstance::Get()) {
1102     // Clear the ThreadPoolInstance entirely to make it clear to final cleanup
1103     // phases that they are happening in a single-threaded phase. Assertions in
1104     // code like ~ScopedFeatureList are unhappy otherwise (crbug.com/1359095).
1105     base::ThreadPoolInstance::Get()->Shutdown();
1106     base::ThreadPoolInstance::Get()->JoinForTesting();
1107     base::ThreadPoolInstance::Set(nullptr);
1108   }
1109 }
1110 
Run(CommandLine * command_line)1111 bool TestLauncher::Run(CommandLine* command_line) {
1112   base::PlatformThread::SetName("TestLauncherMain");
1113 
1114   if (!Init((command_line == nullptr) ? CommandLine::ForCurrentProcess()
1115                                       : command_line))
1116     return false;
1117 
1118 #if BUILDFLAG(IS_POSIX)
1119   CHECK_EQ(0, pipe(g_shutdown_pipe));
1120 
1121   struct sigaction action;
1122   memset(&action, 0, sizeof(action));
1123   sigemptyset(&action.sa_mask);
1124   action.sa_handler = &ShutdownPipeSignalHandler;
1125 
1126   CHECK_EQ(0, sigaction(SIGINT, &action, nullptr));
1127   CHECK_EQ(0, sigaction(SIGQUIT, &action, nullptr));
1128   CHECK_EQ(0, sigaction(SIGTERM, &action, nullptr));
1129 
1130   auto controller = base::FileDescriptorWatcher::WatchReadable(
1131       g_shutdown_pipe[0],
1132       base::BindRepeating(&TestLauncher::OnShutdownPipeReadable,
1133                           Unretained(this)));
1134 #endif  // BUILDFLAG(IS_POSIX)
1135 
1136   // Start the watchdog timer.
1137   watchdog_timer_.Reset();
1138 
1139   // Indicate a test did not succeed.
1140   bool test_failed = false;
1141   int iterations = cycles_;
1142   if (cycles_ > 1 && !stop_on_failure_) {
1143     // If we don't stop on failure, execute all the repeats in all iteration,
1144     // which allows us to parallelize the execution.
1145     iterations = 1;
1146     repeats_per_iteration_ = cycles_;
1147   }
1148   // Set to false if any iteration fails.
1149   bool run_result = true;
1150 
1151   while ((iterations > 0 || iterations == -1) &&
1152          !(stop_on_failure_ && test_failed)) {
1153     OnTestIterationStart();
1154 
1155     RunTests();
1156     bool retry_result = RunRetryTests();
1157     // Signal failure, but continue to run all requested test iterations.
1158     // With the summary of all iterations at the end this is a good default.
1159     run_result = run_result && retry_result;
1160 
1161     if (retry_result) {
1162       fprintf(stdout, "SUCCESS: all tests passed.\n");
1163       fflush(stdout);
1164     }
1165 
1166     test_failed = test_success_count_ != test_finished_count_;
1167     OnTestIterationFinished();
1168     // Special value "-1" means "repeat indefinitely".
1169     iterations = (iterations == -1) ? iterations : iterations - 1;
1170   }
1171 
1172   if (cycles_ != 1)
1173     results_tracker_.PrintSummaryOfAllIterations();
1174 
1175   MaybeSaveSummaryAsJSON(std::vector<std::string>());
1176 
1177   return run_result;
1178 }
1179 
LaunchChildGTestProcess(scoped_refptr<TaskRunner> task_runner,const std::vector<std::string> & test_names,const FilePath & task_temp_dir,const FilePath & child_temp_dir)1180 void TestLauncher::LaunchChildGTestProcess(
1181     scoped_refptr<TaskRunner> task_runner,
1182     const std::vector<std::string>& test_names,
1183     const FilePath& task_temp_dir,
1184     const FilePath& child_temp_dir) {
1185   FilePath result_file;
1186   CommandLine cmd_line = launcher_delegate_->GetCommandLine(
1187       test_names, task_temp_dir, &result_file);
1188 
1189   // Record the exact command line used to launch the child.
1190   CommandLine new_command_line(PrepareCommandLineForGTest(
1191       cmd_line, launcher_delegate_->GetWrapper(), retries_left_));
1192   LaunchOptions options;
1193   options.flags = launcher_delegate_->GetLaunchOptions();
1194 
1195   if (BotModeEnabled(CommandLine::ForCurrentProcess())) {
1196     LOG(INFO) << "Starting [" << base::JoinString(test_names, ", ") << "]";
1197   }
1198 
1199   ChildProcessResults process_results = DoLaunchChildTestProcess(
1200       new_command_line, child_temp_dir, result_file,
1201       launcher_delegate_->GetTimeout(), test_names.size(), options,
1202       redirect_stdio_, launcher_delegate_);
1203 
1204   // Invoke ProcessTestResults on the original thread, not
1205   // on a worker pool thread.
1206   task_runner->PostTask(
1207       FROM_HERE,
1208       BindOnce(&TestLauncher::ProcessTestResults, Unretained(this), test_names,
1209                result_file, process_results.output_file_contents,
1210                process_results.elapsed_time, process_results.exit_code,
1211                process_results.was_timeout, process_results.thread_id,
1212                process_results.process_num,
1213                CountItemsInDirectory(child_temp_dir)));
1214 }
1215 
1216 // Determines which result status will be assigned for missing test results.
MissingResultStatus(size_t tests_to_run_count,bool was_timeout,bool exit_code)1217 TestResult::Status MissingResultStatus(size_t tests_to_run_count,
1218                                        bool was_timeout,
1219                                        bool exit_code) {
1220   // There is more than one test, cannot assess status.
1221   if (tests_to_run_count > 1u)
1222     return TestResult::TEST_SKIPPED;
1223 
1224   // There is only one test and no results.
1225   // Try to determine status by timeout or exit code.
1226   if (was_timeout)
1227     return TestResult::TEST_TIMEOUT;
1228   if (exit_code != 0)
1229     return TestResult::TEST_FAILURE;
1230 
1231   // It's strange case when test executed successfully,
1232   // but we failed to read machine-readable report for it.
1233   return TestResult::TEST_UNKNOWN;
1234 }
1235 
1236 // Returns interpreted test results.
ProcessTestResults(const std::vector<std::string> & test_names,const FilePath & result_file,const std::string & output,TimeDelta elapsed_time,int exit_code,bool was_timeout,PlatformThreadId thread_id,int process_num,int leaked_items)1237 void TestLauncher::ProcessTestResults(
1238     const std::vector<std::string>& test_names,
1239     const FilePath& result_file,
1240     const std::string& output,
1241     TimeDelta elapsed_time,
1242     int exit_code,
1243     bool was_timeout,
1244     PlatformThreadId thread_id,
1245     int process_num,
1246     int leaked_items) {
1247   std::vector<TestResult> test_results;
1248   bool crashed = false;
1249   bool have_test_results =
1250       ProcessGTestOutput(result_file, &test_results, &crashed);
1251 
1252   if (!have_test_results) {
1253     // We do not have reliable details about test results (parsing test
1254     // stdout is known to be unreliable).
1255     LOG(ERROR) << "Failed to get out-of-band test success data, "
1256                   "dumping full stdio below:\n"
1257                << output << "\n";
1258     // This is odd, but sometimes ProcessGtestOutput returns
1259     // false, but TestResults is not empty.
1260     test_results.clear();
1261   }
1262 
1263   TestResult::Status missing_result_status =
1264       MissingResultStatus(test_names.size(), was_timeout, exit_code);
1265 
1266   // TODO(phajdan.jr): Check for duplicates and mismatches between
1267   // the results we got from XML file and tests we intended to run.
1268   std::map<std::string, TestResult> results_map;
1269   for (const auto& i : test_results)
1270     results_map[i.full_name] = i;
1271 
1272   // Results to be reported back to the test launcher.
1273   std::vector<TestResult> final_results;
1274 
1275   for (const auto& i : test_names) {
1276     if (Contains(results_map, i)) {
1277       TestResult test_result = results_map[i];
1278       // Fix up the test status: we forcibly kill the child process
1279       // after the timeout, so from XML results it looks just like
1280       // a crash.
1281       if ((was_timeout && test_result.status == TestResult::TEST_CRASH) ||
1282           // If we run multiple tests in a batch with a timeout applied
1283           // to the entire batch. It is possible that with other tests
1284           // running quickly some tests take longer than the per-test timeout.
1285           // For consistent handling of tests independent of order and other
1286           // factors, mark them as timing out.
1287           test_result.elapsed_time > launcher_delegate_->GetTimeout()) {
1288         test_result.status = TestResult::TEST_TIMEOUT;
1289       }
1290       final_results.push_back(test_result);
1291     } else {
1292       // TODO(phajdan.jr): Explicitly pass the info that the test didn't
1293       // run for a mysterious reason.
1294       LOG(ERROR) << "no test result for " << i;
1295       TestResult test_result;
1296       test_result.full_name = i;
1297       test_result.status = missing_result_status;
1298       final_results.push_back(test_result);
1299     }
1300   }
1301   // TODO(phajdan.jr): Handle the case where processing XML output
1302   // indicates a crash but none of the test results is marked as crashing.
1303 
1304   bool has_non_success_test = false;
1305   for (const auto& i : final_results) {
1306     if (i.status != TestResult::TEST_SUCCESS) {
1307       has_non_success_test = true;
1308       break;
1309     }
1310   }
1311 
1312   if (!has_non_success_test && exit_code != 0) {
1313     // This is a bit surprising case: all tests are marked as successful,
1314     // but the exit code was not zero. This can happen e.g. under memory
1315     // tools that report leaks this way. Mark all tests as a failure on exit,
1316     // and for more precise info they'd need to be retried serially.
1317     for (auto& i : final_results)
1318       i.status = TestResult::TEST_FAILURE_ON_EXIT;
1319   }
1320 
1321   for (auto& i : final_results) {
1322     // Fix the output snippet after possible changes to the test result.
1323     i.output_snippet = GetTestOutputSnippet(i, output);
1324     // The thread id injected here is the worker thread that launching the child
1325     // testing process, it might be different from the current thread that
1326     // ProcessTestResults.
1327     i.thread_id = thread_id;
1328     i.process_num = process_num;
1329   }
1330 
1331   if (leaked_items)
1332     results_tracker_.AddLeakedItems(leaked_items, test_names);
1333 
1334   launcher_delegate_->ProcessTestResults(final_results, elapsed_time);
1335 
1336   for (const auto& result : final_results)
1337     OnTestFinished(result);
1338 }
1339 
OnTestFinished(const TestResult & original_result)1340 void TestLauncher::OnTestFinished(const TestResult& original_result) {
1341   ++test_finished_count_;
1342 
1343   TestResult result(original_result);
1344 
1345   if (result.output_snippet.length() > output_bytes_limit_) {
1346     if (result.status == TestResult::TEST_SUCCESS)
1347       result.status = TestResult::TEST_EXCESSIVE_OUTPUT;
1348 
1349     result.output_snippet =
1350         TruncateSnippetFocused(result.output_snippet, output_bytes_limit_);
1351   }
1352 
1353   bool print_snippet = false;
1354   if (print_test_stdio_ == AUTO) {
1355     print_snippet = (result.status != TestResult::TEST_SUCCESS);
1356   } else if (print_test_stdio_ == ALWAYS) {
1357     print_snippet = true;
1358   } else if (print_test_stdio_ == NEVER) {
1359     print_snippet = false;
1360   }
1361   if (print_snippet) {
1362     std::vector<std::string_view> snippet_lines =
1363         SplitStringPiece(result.output_snippet, "\n", base::KEEP_WHITESPACE,
1364                          base::SPLIT_WANT_ALL);
1365     if (snippet_lines.size() > kOutputSnippetLinesLimit) {
1366       size_t truncated_size = snippet_lines.size() - kOutputSnippetLinesLimit;
1367       snippet_lines.erase(
1368           snippet_lines.begin(),
1369           snippet_lines.begin() + truncated_size);
1370       snippet_lines.insert(snippet_lines.begin(), "<truncated>");
1371     }
1372     fprintf(stdout, "%s", base::JoinString(snippet_lines, "\n").c_str());
1373     fflush(stdout);
1374   }
1375 
1376   if (result.status == TestResult::TEST_SUCCESS) {
1377     ++test_success_count_;
1378   } else {
1379     // Records prefix stripped name to run all dependent tests.
1380     std::string test_name(result.full_name);
1381     ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1382     ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1383                                  std::string());
1384     tests_to_retry_.insert(test_name);
1385   }
1386 
1387   // There are no results for this tests,
1388   // most likley due to another test failing in the same batch.
1389   if (result.status != TestResult::TEST_SKIPPED)
1390     results_tracker_.AddTestResult(result);
1391 
1392   // TODO(phajdan.jr): Align counter (padding).
1393   std::string status_line(StringPrintf("[%zu/%zu] %s ", test_finished_count_,
1394                                        test_started_count_,
1395                                        result.full_name.c_str()));
1396   if (result.completed()) {
1397     status_line.append(StringPrintf("(%" PRId64 " ms)",
1398                                     result.elapsed_time.InMilliseconds()));
1399   } else if (result.status == TestResult::TEST_TIMEOUT) {
1400     status_line.append("(TIMED OUT)");
1401   } else if (result.status == TestResult::TEST_CRASH) {
1402     status_line.append("(CRASHED)");
1403   } else if (result.status == TestResult::TEST_SKIPPED) {
1404     status_line.append("(SKIPPED)");
1405   } else if (result.status == TestResult::TEST_UNKNOWN) {
1406     status_line.append("(UNKNOWN)");
1407   } else {
1408     // Fail very loudly so it's not ignored.
1409     CHECK(false) << "Unhandled test result status: " << result.status;
1410   }
1411   fprintf(stdout, "%s\n", status_line.c_str());
1412   fflush(stdout);
1413 
1414   if (CommandLine::ForCurrentProcess()->HasSwitch(
1415           switches::kTestLauncherPrintTimestamps)) {
1416     ::logging::ScopedLoggingSettings scoped_logging_setting;
1417     ::logging::SetLogItems(true, true, true, true);
1418     LOG(INFO) << "Test_finished_timestamp";
1419   }
1420   // We just printed a status line, reset the watchdog timer.
1421   watchdog_timer_.Reset();
1422 
1423   // Do not waste time on timeouts.
1424   if (result.status == TestResult::TEST_TIMEOUT) {
1425     test_broken_count_++;
1426   }
1427   if (!force_run_broken_tests_ && test_broken_count_ >= broken_threshold_) {
1428     fprintf(stdout, "Too many badly broken tests (%zu), exiting now.\n",
1429             test_broken_count_);
1430     fflush(stdout);
1431 
1432 #if BUILDFLAG(IS_POSIX)
1433     KillSpawnedTestProcesses();
1434 #endif  // BUILDFLAG(IS_POSIX)
1435 
1436     MaybeSaveSummaryAsJSON({"BROKEN_TEST_EARLY_EXIT"});
1437 
1438     exit(1);
1439   }
1440 }
1441 
1442 // Helper used to parse test filter files. Syntax is documented in
1443 // //testing/buildbot/filters/README.md .
LoadFilterFile(const FilePath & file_path,std::vector<std::string> * positive_filter,std::vector<std::string> * negative_filter)1444 bool LoadFilterFile(const FilePath& file_path,
1445                     std::vector<std::string>* positive_filter,
1446                     std::vector<std::string>* negative_filter) {
1447   std::string file_content;
1448   if (!ReadFileToString(file_path, &file_content)) {
1449     LOG(ERROR) << "Failed to read the filter file.";
1450     return false;
1451   }
1452 
1453   std::vector<std::string> filter_lines = SplitString(
1454       file_content, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1455   int line_num = 0;
1456   for (const std::string& filter_line : filter_lines) {
1457     line_num++;
1458 
1459     size_t hash_pos = filter_line.find('#');
1460 
1461     // In case when # symbol is not in the beginning of the line and is not
1462     // proceeded with a space then it's likely that the comment was
1463     // unintentional.
1464     if (hash_pos != std::string::npos && hash_pos > 0 &&
1465         filter_line[hash_pos - 1] != ' ') {
1466       LOG(WARNING) << "Content of line " << line_num << " in " << file_path
1467                    << " after # is treated as a comment, " << filter_line;
1468     }
1469 
1470     // Strip comments and whitespace from each line.
1471     std::string trimmed_line(
1472         TrimWhitespaceASCII(filter_line.substr(0, hash_pos), TRIM_ALL));
1473 
1474     if (trimmed_line.substr(0, 2) == "//") {
1475       LOG(ERROR) << "Line " << line_num << " in " << file_path
1476                  << " starts with //, use # for comments.";
1477       return false;
1478     }
1479 
1480     // Treat a line starting with '//' as a comment.
1481     if (trimmed_line.empty())
1482       continue;
1483 
1484     if (trimmed_line[0] == '-')
1485       negative_filter->push_back(trimmed_line.substr(1));
1486     else
1487       positive_filter->push_back(trimmed_line);
1488   }
1489 
1490   return true;
1491 }
1492 
IsOnlyExactPositiveFilterFromFile(const CommandLine * command_line) const1493 bool TestLauncher::IsOnlyExactPositiveFilterFromFile(
1494     const CommandLine* command_line) const {
1495   if (command_line->HasSwitch(kGTestFilterFlag)) {
1496     LOG(ERROR) << "Found " << switches::kTestLauncherFilterFile;
1497     return false;
1498   }
1499   if (!negative_test_filter_.empty()) {
1500     LOG(ERROR) << "Found negative filters in the filter file.";
1501     return false;
1502   }
1503   for (const auto& filter : positive_test_filter_) {
1504     if (Contains(filter, '*')) {
1505       LOG(ERROR) << "Found wildcard positive filters in the filter file.";
1506       return false;
1507     }
1508   }
1509   return true;
1510 }
1511 
Init(CommandLine * command_line)1512 bool TestLauncher::Init(CommandLine* command_line) {
1513   // Initialize sharding. Command line takes precedence over legacy environment
1514   // variables.
1515   if (command_line->HasSwitch(switches::kTestLauncherTotalShards) &&
1516       command_line->HasSwitch(switches::kTestLauncherShardIndex)) {
1517     if (!StringToInt(
1518             command_line->GetSwitchValueASCII(
1519                 switches::kTestLauncherTotalShards),
1520             &total_shards_)) {
1521       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherTotalShards;
1522       return false;
1523     }
1524     if (!StringToInt(
1525             command_line->GetSwitchValueASCII(
1526                 switches::kTestLauncherShardIndex),
1527             &shard_index_)) {
1528       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherShardIndex;
1529       return false;
1530     }
1531     fprintf(stdout,
1532             "Using sharding settings from command line. This is shard %d/%d\n",
1533             shard_index_, total_shards_);
1534     fflush(stdout);
1535   } else {
1536     if (!TakeInt32FromEnvironment(kTestTotalShards, &total_shards_))
1537       return false;
1538     if (!TakeInt32FromEnvironment(kTestShardIndex, &shard_index_))
1539       return false;
1540     fprintf(stdout,
1541             "Using sharding settings from environment. This is shard %d/%d\n",
1542             shard_index_, total_shards_);
1543     fflush(stdout);
1544   }
1545   if (shard_index_ < 0 ||
1546       total_shards_ < 0 ||
1547       shard_index_ >= total_shards_) {
1548     LOG(ERROR) << "Invalid sharding settings: we require 0 <= "
1549                << kTestShardIndex << " < " << kTestTotalShards
1550                << ", but you have " << kTestShardIndex << "=" << shard_index_
1551                << ", " << kTestTotalShards << "=" << total_shards_ << ".\n";
1552     return false;
1553   }
1554 
1555   // Make sure we don't pass any sharding-related environment to the child
1556   // processes. This test launcher implements the sharding completely.
1557   CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
1558   CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
1559 
1560   if (command_line->HasSwitch(kGTestRepeatFlag) &&
1561       !StringToInt(command_line->GetSwitchValueASCII(kGTestRepeatFlag),
1562                    &cycles_)) {
1563     LOG(ERROR) << "Invalid value for " << kGTestRepeatFlag;
1564     return false;
1565   }
1566   if (command_line->HasSwitch(kIsolatedScriptTestRepeatFlag) &&
1567       !StringToInt(
1568           command_line->GetSwitchValueASCII(kIsolatedScriptTestRepeatFlag),
1569           &cycles_)) {
1570     LOG(ERROR) << "Invalid value for " << kIsolatedScriptTestRepeatFlag;
1571     return false;
1572   }
1573 
1574   if (command_line->HasSwitch(switches::kTestLauncherRetryLimit)) {
1575     int retry_limit = -1;
1576     if (!StringToInt(command_line->GetSwitchValueASCII(
1577                          switches::kTestLauncherRetryLimit), &retry_limit) ||
1578         retry_limit < 0) {
1579       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherRetryLimit;
1580       return false;
1581     }
1582 
1583     retry_limit_ = retry_limit;
1584   } else if (command_line->HasSwitch(
1585                  switches::kIsolatedScriptTestLauncherRetryLimit)) {
1586     int retry_limit = -1;
1587     if (!StringToInt(command_line->GetSwitchValueASCII(
1588                          switches::kIsolatedScriptTestLauncherRetryLimit),
1589                      &retry_limit) ||
1590         retry_limit < 0) {
1591       LOG(ERROR) << "Invalid value for "
1592                  << switches::kIsolatedScriptTestLauncherRetryLimit;
1593       return false;
1594     }
1595 
1596     retry_limit_ = retry_limit;
1597   } else if (command_line->HasSwitch(kGTestRepeatFlag) ||
1598              command_line->HasSwitch(kGTestBreakOnFailure)) {
1599     // If we are repeating tests or waiting for the first test to fail, disable
1600     // retries.
1601     retry_limit_ = 0U;
1602   } else if (!BotModeEnabled(command_line) &&
1603              (command_line->HasSwitch(kGTestFilterFlag) ||
1604               command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
1605     // No retry flag specified, not in bot mode and filtered by flag
1606     // Set reties to zero
1607     retry_limit_ = 0U;
1608   }
1609 
1610   retries_left_ = retry_limit_;
1611   force_run_broken_tests_ =
1612       command_line->HasSwitch(switches::kTestLauncherForceRunBrokenTests);
1613 
1614   if (command_line->HasSwitch(switches::kTestLauncherOutputBytesLimit)) {
1615     int output_bytes_limit = -1;
1616     if (!StringToInt(command_line->GetSwitchValueASCII(
1617                          switches::kTestLauncherOutputBytesLimit),
1618                      &output_bytes_limit) ||
1619         output_bytes_limit < 0) {
1620       LOG(ERROR) << "Invalid value for "
1621                  << switches::kTestLauncherOutputBytesLimit;
1622       return false;
1623     }
1624 
1625     output_bytes_limit_ = output_bytes_limit;
1626   }
1627 
1628   fprintf(stdout, "Using %zu parallel jobs.\n", parallel_jobs_);
1629   fflush(stdout);
1630 
1631   CreateAndStartThreadPool(parallel_jobs_);
1632 
1633   std::vector<std::string> positive_file_filter;
1634   std::vector<std::string> positive_gtest_filter;
1635 
1636   if (command_line->HasSwitch(switches::kTestLauncherFilterFile)) {
1637     auto filter =
1638         command_line->GetSwitchValueNative(switches::kTestLauncherFilterFile);
1639     for (auto filter_file :
1640          SplitStringPiece(filter, FILE_PATH_LITERAL(";"), base::TRIM_WHITESPACE,
1641                           base::SPLIT_WANT_ALL)) {
1642 #if BUILDFLAG(IS_IOS)
1643       // On iOS, the filter files are bundled with the test application.
1644       base::FilePath data_dir;
1645       PathService::Get(DIR_SRC_TEST_DATA_ROOT, &data_dir);
1646       base::FilePath filter_file_path = data_dir.Append(FilePath(filter_file));
1647 #else
1648       base::FilePath filter_file_path =
1649           base::MakeAbsoluteFilePath(FilePath(filter_file));
1650 #endif  // BUILDFLAG(IS_IOS)
1651 
1652       if (!LoadFilterFile(filter_file_path, &positive_file_filter,
1653                           &negative_test_filter_))
1654         return false;
1655     }
1656   }
1657 
1658   // If kGTestRunDisabledTestsFlag is set, force running all negative
1659   // tests in testing/buildbot/filters.
1660   if (command_line->HasSwitch(kGTestRunDisabledTestsFlag)) {
1661     negative_test_filter_.clear();
1662   }
1663 
1664   // If `kEnforceExactPositiveFilter` is set, only accept exact positive
1665   // filters from the filter file.
1666   enforce_exact_postive_filter_ =
1667       command_line->HasSwitch(switches::kEnforceExactPositiveFilter);
1668   if (enforce_exact_postive_filter_ &&
1669       !IsOnlyExactPositiveFilterFromFile(command_line)) {
1670     LOG(ERROR) << "With " << switches::kEnforceExactPositiveFilter
1671                << ", only accept exact positive filters via "
1672                << switches::kTestLauncherFilterFile;
1673     return false;
1674   }
1675 
1676   // Split --gtest_filter at '-', if there is one, to separate into
1677   // positive filter and negative filter portions.
1678   bool double_colon_supported = !command_line->HasSwitch(kGTestFilterFlag);
1679   std::string filter = command_line->GetSwitchValueASCII(
1680       double_colon_supported ? kIsolatedScriptTestFilterFlag
1681                              : kGTestFilterFlag);
1682   size_t dash_pos = filter.find('-');
1683   if (dash_pos == std::string::npos) {
1684     positive_gtest_filter =
1685         ExtractTestsFromFilter(filter, double_colon_supported);
1686   } else {
1687     // Everything up to the dash.
1688     positive_gtest_filter = ExtractTestsFromFilter(filter.substr(0, dash_pos),
1689                                                    double_colon_supported);
1690 
1691     // Everything after the dash.
1692     for (std::string pattern : ExtractTestsFromFilter(
1693              filter.substr(dash_pos + 1), double_colon_supported)) {
1694       negative_test_filter_.push_back(pattern);
1695     }
1696   }
1697 
1698   skip_disabled_tests_ =
1699       !command_line->HasSwitch(kGTestRunDisabledTestsFlag) &&
1700       !command_line->HasSwitch(kIsolatedScriptRunDisabledTestsFlag);
1701 
1702   if (!InitTests())
1703     return false;
1704 
1705   if (!ShuffleTests(command_line))
1706     return false;
1707 
1708   if (!ProcessAndValidateTests())
1709     return false;
1710 
1711   if (command_line->HasSwitch(switches::kTestLauncherPrintTestStdio)) {
1712     std::string print_test_stdio = command_line->GetSwitchValueASCII(
1713         switches::kTestLauncherPrintTestStdio);
1714     if (print_test_stdio == "auto") {
1715       print_test_stdio_ = AUTO;
1716     } else if (print_test_stdio == "always") {
1717       print_test_stdio_ = ALWAYS;
1718     } else if (print_test_stdio == "never") {
1719       print_test_stdio_ = NEVER;
1720     } else {
1721       LOG(WARNING) << "Invalid value of "
1722                    << switches::kTestLauncherPrintTestStdio << ": "
1723                    << print_test_stdio;
1724       return false;
1725     }
1726   }
1727 
1728   stop_on_failure_ = command_line->HasSwitch(kGTestBreakOnFailure);
1729 
1730   if (command_line->HasSwitch(switches::kTestLauncherSummaryOutput)) {
1731     summary_path_ = FilePath(
1732         command_line->GetSwitchValuePath(switches::kTestLauncherSummaryOutput));
1733   }
1734   if (command_line->HasSwitch(switches::kTestLauncherTrace)) {
1735     trace_path_ = FilePath(
1736         command_line->GetSwitchValuePath(switches::kTestLauncherTrace));
1737   }
1738 
1739   // When running in parallel mode we need to redirect stdio to avoid mixed-up
1740   // output. We also always redirect on the bots to get the test output into
1741   // JSON summary.
1742   redirect_stdio_ = (parallel_jobs_ > 1) || BotModeEnabled(command_line);
1743 
1744   CombinePositiveTestFilters(std::move(positive_gtest_filter),
1745                              std::move(positive_file_filter));
1746 
1747   if (!results_tracker_.Init(*command_line)) {
1748     LOG(ERROR) << "Failed to initialize test results tracker.";
1749     return true;
1750   }
1751 
1752 #if defined(NDEBUG)
1753   results_tracker_.AddGlobalTag("MODE_RELEASE");
1754 #else
1755   results_tracker_.AddGlobalTag("MODE_DEBUG");
1756 #endif
1757 
1758   // Operating systems (sorted alphabetically).
1759   // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
1760   // of OS_POSIX.
1761 #if BUILDFLAG(IS_ANDROID)
1762   results_tracker_.AddGlobalTag("OS_ANDROID");
1763 #endif
1764 
1765 #if BUILDFLAG(IS_APPLE)
1766   results_tracker_.AddGlobalTag("OS_APPLE");
1767 #endif
1768 
1769 #if BUILDFLAG(IS_BSD)
1770   results_tracker_.AddGlobalTag("OS_BSD");
1771 #endif
1772 
1773 #if BUILDFLAG(IS_FREEBSD)
1774   results_tracker_.AddGlobalTag("OS_FREEBSD");
1775 #endif
1776 
1777 #if BUILDFLAG(IS_FUCHSIA)
1778   results_tracker_.AddGlobalTag("OS_FUCHSIA");
1779 #endif
1780 
1781 #if BUILDFLAG(IS_IOS)
1782   results_tracker_.AddGlobalTag("OS_IOS");
1783 #endif
1784 
1785 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
1786   results_tracker_.AddGlobalTag("OS_LINUX");
1787 #endif
1788 
1789 #if BUILDFLAG(IS_CHROMEOS_ASH)
1790   results_tracker_.AddGlobalTag("OS_CHROMEOS");
1791 #endif
1792 
1793 #if BUILDFLAG(IS_MAC)
1794   results_tracker_.AddGlobalTag("OS_MAC");
1795 #endif
1796 
1797 #if BUILDFLAG(IS_NACL)
1798   results_tracker_.AddGlobalTag("OS_NACL");
1799 #endif
1800 
1801 #if BUILDFLAG(IS_OPENBSD)
1802   results_tracker_.AddGlobalTag("OS_OPENBSD");
1803 #endif
1804 
1805 #if BUILDFLAG(IS_POSIX)
1806   results_tracker_.AddGlobalTag("OS_POSIX");
1807 #endif
1808 
1809 #if BUILDFLAG(IS_SOLARIS)
1810   results_tracker_.AddGlobalTag("OS_SOLARIS");
1811 #endif
1812 
1813 #if BUILDFLAG(IS_WIN)
1814   results_tracker_.AddGlobalTag("OS_WIN");
1815 #endif
1816 
1817   // CPU-related tags.
1818 #if defined(ARCH_CPU_32_BITS)
1819   results_tracker_.AddGlobalTag("CPU_32_BITS");
1820 #endif
1821 
1822 #if defined(ARCH_CPU_64_BITS)
1823   results_tracker_.AddGlobalTag("CPU_64_BITS");
1824 #endif
1825 
1826   return true;
1827 }
1828 
InitTests()1829 bool TestLauncher::InitTests() {
1830   std::vector<TestIdentifier> tests;
1831   if (!launcher_delegate_->GetTests(&tests)) {
1832     LOG(ERROR) << "Failed to get list of tests.";
1833     return false;
1834   }
1835 
1836   // Check for duplicate test names. These can cause difficult-to-diagnose
1837   // crashes in the test runner as well as confusion about exactly what test is
1838   // failing. See https://crbug.com/1463355 for details.
1839   std::unordered_set<std::string> full_test_names;
1840   bool dups_found = false;
1841   for (auto& test : tests) {
1842     const std::string full_test_name =
1843         test.test_case_name + "." + test.test_name;
1844     auto [it, inserted] = full_test_names.insert(full_test_name);
1845     if (!inserted) {
1846       LOG(WARNING) << "Duplicate test name found: " << full_test_name;
1847       dups_found = true;
1848     }
1849   }
1850   CHECK(!dups_found);
1851 
1852   std::vector<std::string> uninstantiated_tests;
1853   for (const TestIdentifier& test_id : tests) {
1854     TestInfo test_info(test_id);
1855     if (test_id.test_case_name == "GoogleTestVerification") {
1856       // GoogleTestVerification is used by googletest to detect tests that are
1857       // parameterized but not instantiated.
1858       uninstantiated_tests.push_back(test_id.test_name);
1859       continue;
1860     }
1861     // TODO(isamsonov): crbug.com/1004417 remove when windows builders
1862     // stop flaking on MANAUAL_ tests.
1863     if (launcher_delegate_->ShouldRunTest(test_id))
1864       tests_.push_back(test_info);
1865   }
1866   if (!uninstantiated_tests.empty()) {
1867     LOG(ERROR) << "Found uninstantiated parameterized tests. These test suites "
1868                   "will not run:";
1869     for (const std::string& name : uninstantiated_tests)
1870       LOG(ERROR) << "  " << name;
1871     LOG(ERROR) << "Please use INSTANTIATE_TEST_SUITE_P to instantiate the "
1872                   "tests, or GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST if "
1873                   "the parameter list can be intentionally empty. See "
1874                   "//third_party/googletest/src/docs/advanced.md";
1875     return false;
1876   }
1877   return true;
1878 }
1879 
ShuffleTests(CommandLine * command_line)1880 bool TestLauncher::ShuffleTests(CommandLine* command_line) {
1881   if (command_line->HasSwitch(kGTestShuffleFlag)) {
1882     uint32_t shuffle_seed;
1883     if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1884       const std::string custom_seed_str =
1885           command_line->GetSwitchValueASCII(kGTestRandomSeedFlag);
1886       uint32_t custom_seed = 0;
1887       if (!StringToUint(custom_seed_str, &custom_seed)) {
1888         LOG(ERROR) << "Unable to parse seed \"" << custom_seed_str << "\".";
1889         return false;
1890       }
1891       if (custom_seed >= kRandomSeedUpperBound) {
1892         LOG(ERROR) << "Seed " << custom_seed << " outside of expected range "
1893                    << "[0, " << kRandomSeedUpperBound << ")";
1894         return false;
1895       }
1896       shuffle_seed = custom_seed;
1897     } else {
1898       std::uniform_int_distribution<uint32_t> dist(0, kRandomSeedUpperBound);
1899       std::random_device random_dev;
1900       shuffle_seed = dist(random_dev);
1901     }
1902 
1903     std::mt19937 randomizer;
1904     randomizer.seed(shuffle_seed);
1905     ranges::shuffle(tests_, randomizer);
1906 
1907     fprintf(stdout, "Randomizing with seed %u\n", shuffle_seed);
1908     fflush(stdout);
1909   } else if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1910     LOG(ERROR) << kGTestRandomSeedFlag << " requires " << kGTestShuffleFlag;
1911     return false;
1912   }
1913   return true;
1914 }
1915 
ProcessAndValidateTests()1916 bool TestLauncher::ProcessAndValidateTests() {
1917   bool result = true;
1918   std::unordered_set<std::string> disabled_tests;
1919   std::unordered_map<std::string, TestInfo> pre_tests;
1920 
1921   // Find disabled and pre tests
1922   for (const TestInfo& test_info : tests_) {
1923     std::string test_name = test_info.GetFullName();
1924     results_tracker_.AddTest(test_name);
1925     if (test_info.disabled()) {
1926       disabled_tests.insert(test_info.GetDisabledStrippedName());
1927       results_tracker_.AddDisabledTest(test_name);
1928     }
1929     if (test_info.pre_test())
1930       pre_tests[test_info.GetDisabledStrippedName()] = test_info;
1931   }
1932 
1933   std::vector<TestInfo> tests_to_run;
1934   for (const TestInfo& test_info : tests_) {
1935     std::string test_name = test_info.GetFullName();
1936     // If any test has a matching disabled test, fail and log for audit.
1937     if (base::Contains(disabled_tests, test_name)) {
1938       LOG(ERROR) << test_name << " duplicated by a DISABLED_ test";
1939       result = false;
1940     }
1941 
1942     // Passes on PRE tests, those will append when final test is found.
1943     if (test_info.pre_test())
1944       continue;
1945 
1946     std::vector<TestInfo> test_sequence;
1947     test_sequence.push_back(test_info);
1948     // Move Pre Tests prior to final test in order.
1949     while (base::Contains(pre_tests, test_sequence.back().GetPreName())) {
1950       test_sequence.push_back(pre_tests[test_sequence.back().GetPreName()]);
1951       pre_tests.erase(test_sequence.back().GetDisabledStrippedName());
1952     }
1953     // Skip disabled tests unless explicitly requested.
1954     if (!test_info.disabled() || !skip_disabled_tests_)
1955       tests_to_run.insert(tests_to_run.end(), test_sequence.rbegin(),
1956                           test_sequence.rend());
1957   }
1958   tests_ = std::move(tests_to_run);
1959 
1960   // If any tests remain in |pre_tests| map, fail and log for audit.
1961   for (const auto& i : pre_tests) {
1962     LOG(ERROR) << i.first << " is an orphaned pre test";
1963     result = false;
1964   }
1965   return result;
1966 }
1967 
CreateAndStartThreadPool(size_t num_parallel_jobs)1968 void TestLauncher::CreateAndStartThreadPool(size_t num_parallel_jobs) {
1969   base::ThreadPoolInstance::Create("TestLauncher");
1970   base::ThreadPoolInstance::Get()->Start({num_parallel_jobs});
1971 }
1972 
CombinePositiveTestFilters(std::vector<std::string> filter_a,std::vector<std::string> filter_b)1973 void TestLauncher::CombinePositiveTestFilters(
1974     std::vector<std::string> filter_a,
1975     std::vector<std::string> filter_b) {
1976   has_at_least_one_positive_filter_ = !filter_a.empty() || !filter_b.empty();
1977   if (!has_at_least_one_positive_filter_) {
1978     return;
1979   }
1980   // If two positive filters are present, only run tests that match a pattern
1981   // in both filters.
1982   if (!filter_a.empty() && !filter_b.empty()) {
1983     for (const auto& i : tests_) {
1984       std::string test_name = i.GetFullName();
1985       bool found_a = false;
1986       bool found_b = false;
1987       for (const auto& k : filter_a) {
1988         found_a = found_a || MatchPattern(test_name, k);
1989       }
1990       for (const auto& k : filter_b) {
1991         found_b = found_b || MatchPattern(test_name, k);
1992       }
1993       if (found_a && found_b) {
1994         positive_test_filter_.push_back(test_name);
1995       }
1996     }
1997   } else if (!filter_a.empty()) {
1998     positive_test_filter_ = std::move(filter_a);
1999   } else {
2000     positive_test_filter_ = std::move(filter_b);
2001   }
2002 }
2003 
ShouldRunInCurrentShard(std::string_view prefix_stripped_name) const2004 bool TestLauncher::ShouldRunInCurrentShard(
2005     std::string_view prefix_stripped_name) const {
2006   CHECK(!StartsWith(prefix_stripped_name, kPreTestPrefix));
2007   CHECK(!StartsWith(prefix_stripped_name, kDisabledTestPrefix));
2008   return PersistentHash(prefix_stripped_name) % total_shards_ ==
2009          static_cast<uint32_t>(shard_index_);
2010 }
2011 
CollectTests()2012 std::vector<std::string> TestLauncher::CollectTests() {
2013   std::vector<std::string> test_names;
2014   // To support RTS(regression test selection), which may have 100,000 or
2015   // more exact gtest filter, we first split filter into exact filter
2016   // and wildcards filter, then exact filter can match faster.
2017   std::vector<std::string_view> positive_wildcards_filter;
2018   std::unordered_set<std::string_view> positive_exact_filter;
2019   positive_exact_filter.reserve(positive_test_filter_.size());
2020   std::unordered_set<std::string> enforced_positive_tests;
2021   for (const std::string& filter : positive_test_filter_) {
2022     if (filter.find('*') != std::string::npos) {
2023       positive_wildcards_filter.push_back(filter);
2024     } else {
2025       positive_exact_filter.insert(filter);
2026     }
2027   }
2028 
2029   std::vector<std::string_view> negative_wildcards_filter;
2030   std::unordered_set<std::string_view> negative_exact_filter;
2031   negative_exact_filter.reserve(negative_test_filter_.size());
2032   for (const std::string& filter : negative_test_filter_) {
2033     if (filter.find('*') != std::string::npos) {
2034       negative_wildcards_filter.push_back(filter);
2035     } else {
2036       negative_exact_filter.insert(filter);
2037     }
2038   }
2039 
2040   for (const TestInfo& test_info : tests_) {
2041     std::string test_name = test_info.GetFullName();
2042 
2043     std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
2044 
2045     // Skip the test that doesn't match the filter (if given).
2046     if (has_at_least_one_positive_filter_) {
2047       bool found = positive_exact_filter.find(test_name) !=
2048                        positive_exact_filter.end() ||
2049                    positive_exact_filter.find(prefix_stripped_name) !=
2050                        positive_exact_filter.end();
2051       if (found && enforce_exact_postive_filter_) {
2052         enforced_positive_tests.insert(prefix_stripped_name);
2053       }
2054       if (!found) {
2055         for (std::string_view filter : positive_wildcards_filter) {
2056           if (MatchPattern(test_name, filter) ||
2057               MatchPattern(prefix_stripped_name, filter)) {
2058             found = true;
2059             break;
2060           }
2061         }
2062       }
2063 
2064       if (!found)
2065         continue;
2066     }
2067 
2068     if (negative_exact_filter.find(test_name) != negative_exact_filter.end() ||
2069         negative_exact_filter.find(prefix_stripped_name) !=
2070             negative_exact_filter.end()) {
2071       continue;
2072     }
2073 
2074     bool excluded = false;
2075     for (std::string_view filter : negative_wildcards_filter) {
2076       if (MatchPattern(test_name, filter) ||
2077           MatchPattern(prefix_stripped_name, filter)) {
2078         excluded = true;
2079         break;
2080       }
2081     }
2082     if (excluded)
2083       continue;
2084 
2085     // Tests with the name XYZ will cause tests with the name PRE_XYZ to run. We
2086     // should bucket all of these tests together.
2087     if (!ShouldRunInCurrentShard(prefix_stripped_name)) {
2088       continue;
2089     }
2090 
2091     // Report test locations after applying all filters, so that we report test
2092     // locations only for those tests that were run as part of this shard.
2093     results_tracker_.AddTestLocation(test_name, test_info.file(),
2094                                      test_info.line());
2095 
2096     if (!test_info.pre_test()) {
2097       // Only a subset of tests that are run require placeholders -- namely,
2098       // those that will output results. Note that the results for PRE_XYZ will
2099       // be merged into XYZ's results if the former fails, so we don't need a
2100       // placeholder for it.
2101       results_tracker_.AddTestPlaceholder(test_name);
2102     }
2103 
2104     test_names.push_back(test_name);
2105   }
2106 
2107   // If `kEnforceExactPositiveFilter` is set, all test cases listed in the
2108   // exact positive filter for the current shard should exist in the
2109   // `enforced_positive_tests`. Otherwise, print the missing cases and fail
2110   // loudly.
2111   if (enforce_exact_postive_filter_) {
2112     bool found_exact_positive_filter_not_enforced = false;
2113     for (const auto& filter : positive_exact_filter) {
2114       if (!ShouldRunInCurrentShard(filter) ||
2115           Contains(enforced_positive_tests, std::string(filter))) {
2116         continue;
2117       }
2118       if (!found_exact_positive_filter_not_enforced) {
2119         LOG(ERROR) << "Found exact positive filter not enforced:";
2120         found_exact_positive_filter_not_enforced = true;
2121       }
2122       LOG(ERROR) << filter;
2123     }
2124     CHECK(!found_exact_positive_filter_not_enforced);
2125   }
2126 
2127   return test_names;
2128 }
2129 
RunTests()2130 void TestLauncher::RunTests() {
2131   std::vector<std::string> original_test_names = CollectTests();
2132 
2133   std::vector<std::string> test_names;
2134   for (int i = 0; i < repeats_per_iteration_; ++i) {
2135     test_names.insert(test_names.end(), original_test_names.begin(),
2136                       original_test_names.end());
2137   }
2138 
2139   broken_threshold_ = std::max(static_cast<size_t>(20), tests_.size() / 10);
2140 
2141   test_started_count_ = test_names.size();
2142 
2143   // If there are no matching tests, warn and notify of any matches against
2144   // *<filter>*.
2145   if (test_started_count_ == 0) {
2146     PrintFuzzyMatchingTestNames();
2147     fprintf(stdout, "WARNING: No matching tests to run.\n");
2148     fflush(stdout);
2149   }
2150 
2151   // Save an early test summary in case the launcher crashes or gets killed.
2152   results_tracker_.GeneratePlaceholderIteration();
2153   MaybeSaveSummaryAsJSON({"EARLY_SUMMARY"});
2154 
2155   // If we are repeating the test, set batch size to 1 to ensure that batch size
2156   // does not interfere with repeats (unittests are using filter for batches and
2157   // can't run the same test twice in the same batch).
2158   size_t batch_size =
2159       repeats_per_iteration_ > 1 ? 1U : launcher_delegate_->GetBatchSize();
2160 
2161   TestRunner test_runner(this, parallel_jobs_, batch_size);
2162   test_runner.Run(test_names);
2163 }
2164 
PrintFuzzyMatchingTestNames()2165 void TestLauncher::PrintFuzzyMatchingTestNames() {
2166   for (auto filter : positive_test_filter_) {
2167     if (filter.empty())
2168       continue;
2169     std::string almost_filter;
2170     if (filter.front() != '*')
2171       almost_filter += '*';
2172     almost_filter += filter;
2173     if (filter.back() != '*')
2174       almost_filter += '*';
2175 
2176     for (const TestInfo& test_info : tests_) {
2177       std::string test_name = test_info.GetFullName();
2178       std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
2179       if (MatchPattern(test_name, almost_filter) ||
2180           MatchPattern(prefix_stripped_name, almost_filter)) {
2181         fprintf(stdout, "Filter \"%s\" would have matched: %s\n",
2182                 almost_filter.c_str(), test_name.c_str());
2183         fflush(stdout);
2184       }
2185     }
2186   }
2187 }
2188 
RunRetryTests()2189 bool TestLauncher::RunRetryTests() {
2190   while (!tests_to_retry_.empty() && retries_left_ > 0) {
2191     // Retry all tests that depend on a failing test.
2192     std::vector<std::string> test_names;
2193     for (const TestInfo& test_info : tests_) {
2194       if (base::Contains(tests_to_retry_, test_info.GetPrefixStrippedName()))
2195         test_names.push_back(test_info.GetFullName());
2196     }
2197     tests_to_retry_.clear();
2198 
2199     size_t retry_started_count = test_names.size();
2200     test_started_count_ += retry_started_count;
2201 
2202     // Only invoke RunLoop if there are any tasks to run.
2203     if (retry_started_count == 0)
2204       return false;
2205 
2206     fprintf(stdout, "Retrying %zu test%s (retry #%zu)\n", retry_started_count,
2207             retry_started_count > 1 ? "s" : "", retry_limit_ - retries_left_);
2208     fflush(stdout);
2209 
2210     --retries_left_;
2211     TestRunner test_runner(this);
2212     test_runner.Run(test_names);
2213   }
2214   return tests_to_retry_.empty();
2215 }
2216 
OnTestIterationStart()2217 void TestLauncher::OnTestIterationStart() {
2218   test_started_count_ = 0;
2219   test_finished_count_ = 0;
2220   test_success_count_ = 0;
2221   test_broken_count_ = 0;
2222   tests_to_retry_.clear();
2223   results_tracker_.OnTestIterationStarting();
2224 }
2225 
2226 #if BUILDFLAG(IS_POSIX)
2227 // I/O watcher for the reading end of the self-pipe above.
2228 // Terminates any launched child processes and exits the process.
OnShutdownPipeReadable()2229 void TestLauncher::OnShutdownPipeReadable() {
2230   fprintf(stdout, "\nCaught signal. Killing spawned test processes...\n");
2231   fflush(stdout);
2232 
2233   KillSpawnedTestProcesses();
2234 
2235   MaybeSaveSummaryAsJSON({"CAUGHT_TERMINATION_SIGNAL"});
2236 
2237   // The signal would normally kill the process, so exit now.
2238   _exit(1);
2239 }
2240 #endif  // BUILDFLAG(IS_POSIX)
2241 
MaybeSaveSummaryAsJSON(const std::vector<std::string> & additional_tags)2242 void TestLauncher::MaybeSaveSummaryAsJSON(
2243     const std::vector<std::string>& additional_tags) {
2244   if (!summary_path_.empty()) {
2245     if (!results_tracker_.SaveSummaryAsJSON(summary_path_, additional_tags)) {
2246       LOG(ERROR) << "Failed to save test launcher output summary.";
2247     }
2248   }
2249   if (!trace_path_.empty()) {
2250     if (!GetTestLauncherTracer()->Dump(trace_path_)) {
2251       LOG(ERROR) << "Failed to save test launcher trace.";
2252     }
2253   }
2254 }
2255 
OnTestIterationFinished()2256 void TestLauncher::OnTestIterationFinished() {
2257   TestResultsTracker::TestStatusMap tests_by_status(
2258       results_tracker_.GetTestStatusMapForCurrentIteration());
2259   if (!tests_by_status[TestResult::TEST_UNKNOWN].empty())
2260     results_tracker_.AddGlobalTag(kUnreliableResultsTag);
2261 
2262   results_tracker_.PrintSummaryOfCurrentIteration();
2263 }
2264 
OnOutputTimeout()2265 void TestLauncher::OnOutputTimeout() {
2266   DCHECK(thread_checker_.CalledOnValidThread());
2267 
2268   AutoLock lock(*GetLiveProcessesLock());
2269 
2270   fprintf(stdout, "Still waiting for the following processes to finish:\n");
2271 
2272   for (const auto& pair : *GetLiveProcesses()) {
2273 #if BUILDFLAG(IS_WIN)
2274     fwprintf(stdout, L"\t%s\n", pair.second.GetCommandLineString().c_str());
2275 #else
2276     fprintf(stdout, "\t%s\n", pair.second.GetCommandLineString().c_str());
2277 #endif
2278   }
2279 
2280   fflush(stdout);
2281 
2282   if (CommandLine::ForCurrentProcess()->HasSwitch(
2283           switches::kTestLauncherPrintTimestamps)) {
2284     ::logging::ScopedLoggingSettings scoped_logging_setting;
2285     ::logging::SetLogItems(true, true, true, true);
2286     LOG(INFO) << "Waiting_timestamp";
2287   }
2288   // Arm the timer again - otherwise it would fire only once.
2289   watchdog_timer_.Reset();
2290 }
2291 
NumParallelJobs(unsigned int cores_per_job)2292 size_t NumParallelJobs(unsigned int cores_per_job) {
2293   const CommandLine* command_line = CommandLine::ForCurrentProcess();
2294   if (command_line->HasSwitch(switches::kTestLauncherJobs)) {
2295     // If the number of test launcher jobs was specified, return that number.
2296     size_t jobs = 0U;
2297 
2298     if (!StringToSizeT(
2299             command_line->GetSwitchValueASCII(switches::kTestLauncherJobs),
2300             &jobs) ||
2301         !jobs) {
2302       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherJobs;
2303       return 0U;
2304     }
2305     return jobs;
2306   }
2307   if (!BotModeEnabled(command_line) &&
2308       (command_line->HasSwitch(kGTestFilterFlag) ||
2309        command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
2310     // Do not run jobs in parallel by default if we are running a subset of
2311     // the tests and if bot mode is off.
2312     return 1U;
2313   }
2314 
2315 #if BUILDFLAG(IS_WIN)
2316   // Use processors in all groups (Windows splits more than 64 logical
2317   // processors into groups).
2318   size_t cores = base::checked_cast<size_t>(
2319       ::GetActiveProcessorCount(ALL_PROCESSOR_GROUPS));
2320 #else
2321   size_t cores = base::checked_cast<size_t>(SysInfo::NumberOfProcessors());
2322 #if BUILDFLAG(IS_MAC)
2323   // This is necessary to allow tests to call SetCpuSecurityMitigationsEnabled()
2324   // despite NumberOfProcessors() having already been called in the process.
2325   SysInfo::ResetCpuSecurityMitigationsEnabledForTesting();
2326 #endif  // BUILDFLAG(IS_MAC)
2327 #endif  // BUILDFLAG(IS_WIN)
2328 
2329 #if BUILDFLAG(IS_IOS) && TARGET_OS_SIMULATOR
2330   // If we are targeting the simulator increase the number of jobs we use by 2x
2331   // the number of cores. This is necessary because the startup of each
2332   // process is slow, so using 2x empirically approaches the total machine
2333   // utilization.
2334   cores *= 2;
2335 #endif
2336   return std::max(size_t(1), cores / cores_per_job);
2337 }
2338 
GetTestOutputSnippet(const TestResult & result,const std::string & full_output)2339 std::string GetTestOutputSnippet(const TestResult& result,
2340                                  const std::string& full_output) {
2341   size_t run_pos = full_output.find(std::string("[ RUN      ] ") +
2342                                     result.full_name);
2343   if (run_pos == std::string::npos)
2344     return std::string();
2345 
2346   size_t end_pos = full_output.find(std::string("[  FAILED  ] ") +
2347                                     result.full_name,
2348                                     run_pos);
2349   // Only clip the snippet to the "OK" message if the test really
2350   // succeeded or was skipped. It still might have e.g. crashed
2351   // after printing it.
2352   if (end_pos == std::string::npos) {
2353     if (result.status == TestResult::TEST_SUCCESS) {
2354       end_pos = full_output.find(std::string("[       OK ] ") +
2355                                 result.full_name,
2356                                 run_pos);
2357 
2358       // Also handle SKIPPED next to SUCCESS because the GTest XML output
2359       // doesn't make a difference between SKIPPED and SUCCESS
2360       if (end_pos == std::string::npos)
2361         end_pos = full_output.find(
2362             std::string("[  SKIPPED ] ") + result.full_name, run_pos);
2363     } else {
2364       // If test is not successful, include all output until subsequent test.
2365       end_pos = full_output.find(std::string("[ RUN      ]"), run_pos + 1);
2366       if (end_pos != std::string::npos)
2367         end_pos--;
2368     }
2369   }
2370   if (end_pos != std::string::npos) {
2371     size_t newline_pos = full_output.find("\n", end_pos);
2372     if (newline_pos != std::string::npos)
2373       end_pos = newline_pos + 1;
2374   }
2375 
2376   std::string snippet(full_output.substr(run_pos));
2377   if (end_pos != std::string::npos)
2378     snippet = full_output.substr(run_pos, end_pos - run_pos);
2379 
2380   return snippet;
2381 }
2382 
TruncateSnippetFocused(const std::string_view snippet,size_t byte_limit)2383 std::string TruncateSnippetFocused(const std::string_view snippet,
2384                                    size_t byte_limit) {
2385   // Find the start of anything that looks like a fatal log message.
2386   // We want to preferentially preserve these from truncation as we
2387   // run extraction of fatal test errors from snippets in result_adapter
2388   // to populate failure reasons in ResultDB. It is also convenient for
2389   // the user to see them.
2390   // Refer to LogMessage::Init in base/logging[_platform].cc for patterns.
2391   size_t fatal_message_pos =
2392       std::min(snippet.find("FATAL:"), snippet.find("FATAL "));
2393 
2394   size_t fatal_message_start = 0;
2395   size_t fatal_message_end = 0;
2396   if (fatal_message_pos != std::string::npos) {
2397     // Find the line-endings before and after the fatal message.
2398     size_t start_pos = snippet.rfind("\n", fatal_message_pos);
2399     if (start_pos != std::string::npos) {
2400       fatal_message_start = start_pos;
2401     }
2402     size_t end_pos = snippet.find("\n", fatal_message_pos);
2403     if (end_pos != std::string::npos) {
2404       // Include the new-line character.
2405       fatal_message_end = end_pos + 1;
2406     } else {
2407       fatal_message_end = snippet.length();
2408     }
2409   }
2410   // Limit fatal message length to half the snippet byte quota. This ensures
2411   // we have space for some context at the beginning and end of the snippet.
2412   fatal_message_end =
2413       std::min(fatal_message_end, fatal_message_start + (byte_limit / 2));
2414 
2415   // Distribute remaining bytes between start and end of snippet.
2416   // The split is either even, or if one is small enough to be displayed
2417   // without truncation, it gets displayed in full and the other split gets
2418   // the remaining bytes.
2419   size_t remaining_bytes =
2420       byte_limit - (fatal_message_end - fatal_message_start);
2421   size_t start_split_bytes;
2422   size_t end_split_bytes;
2423   if (fatal_message_start < remaining_bytes / 2) {
2424     start_split_bytes = fatal_message_start;
2425     end_split_bytes = remaining_bytes - fatal_message_start;
2426   } else if ((snippet.length() - fatal_message_end) < remaining_bytes / 2) {
2427     start_split_bytes =
2428         remaining_bytes - (snippet.length() - fatal_message_end);
2429     end_split_bytes = (snippet.length() - fatal_message_end);
2430   } else {
2431     start_split_bytes = remaining_bytes / 2;
2432     end_split_bytes = remaining_bytes - start_split_bytes;
2433   }
2434   return base::StrCat(
2435       {TruncateSnippet(snippet.substr(0, fatal_message_start),
2436                        start_split_bytes),
2437        snippet.substr(fatal_message_start,
2438                       fatal_message_end - fatal_message_start),
2439        TruncateSnippet(snippet.substr(fatal_message_end), end_split_bytes)});
2440 }
2441 
2442 }  // namespace base
2443