1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <atomic>
18 #include <string>
19 #include <vector>
20
21 #include <fcntl.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <unistd.h>
28 #include <optional>
29
30 #include "perfetto/base/build_config.h"
31 #include "perfetto/base/logging.h"
32 #include "perfetto/ext/base/file_utils.h"
33 #include "perfetto/ext/base/pipe.h"
34 #include "perfetto/ext/base/string_utils.h"
35 #include "perfetto/ext/base/subprocess.h"
36 #include "perfetto/heap_profile.h"
37 #include "perfetto/trace_processor/trace_processor.h"
38 #include "perfetto/tracing/default_socket.h"
39 #include "protos/perfetto/trace/trace.gen.h"
40 #include "protos/perfetto/trace/trace.pbzero.h"
41 #include "src/base/test/test_task_runner.h"
42 #include "src/profiling/memory/heapprofd_producer.h"
43 #include "test/gtest_and_gmock.h"
44 #include "test/integrationtest_initializer.h"
45 #include "test/test_helper.h"
46
47 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
48 #include <sys/system_properties.h>
49 #endif
50
51 #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
52 #include "protos/perfetto/trace/interned_data/interned_data.gen.h"
53 #include "protos/perfetto/trace/profiling/profile_common.gen.h"
54 #include "protos/perfetto/trace/profiling/profile_packet.gen.h"
55
56 namespace perfetto {
57
58 namespace profiling {
59 namespace {
60
61 constexpr useconds_t kMsToUs = 1000;
62
63 constexpr auto kTracingDisabledTimeoutMs = 30000;
64 constexpr auto kWaitForReadDataTimeoutMs = 10000;
65 constexpr size_t kStartupAllocSize = 10;
66 constexpr size_t kFirstIterationBytes = 5;
67 constexpr size_t kSecondIterationBytes = 7;
68
69 enum class TestMode { kCentral, kStatic };
70 enum class AllocatorMode { kMalloc, kCustom };
71
72 using ::testing::AllOf;
73 using ::testing::AnyOf;
74 using ::testing::Bool;
75 using ::testing::Contains;
76 using ::testing::Eq;
77 using ::testing::Field;
78 using ::testing::HasSubstr;
79 using ::testing::Values;
80
81 constexpr const char* kOnlyFlamegraph = R"(
82 SELECT
83 id,
84 name,
85 map_name,
86 count,
87 cumulative_count,
88 size,
89 cumulative_size,
90 alloc_count,
91 cumulative_alloc_count,
92 alloc_size,
93 cumulative_alloc_size,
94 parent_id
95 FROM (SELECT distinct ts, upid from heap_profile_allocation) hpa
96 JOIN experimental_flamegraph(
97 'native',
98 hpa.ts,
99 NULL,
100 hpa.upid,
101 NULL,
102 NULL
103 )
104 order by abs(cumulative_size) desc;
105 )";
106
107 struct FlamegraphNode {
108 int64_t id;
109 std::string name;
110 std::string map_name;
111 int64_t count;
112 int64_t cumulative_count;
113 int64_t size;
114 int64_t cumulative_size;
115 int64_t alloc_count;
116 int64_t cumulative_alloc_count;
117 int64_t alloc_size;
118 int64_t cumulative_alloc_size;
119 std::optional<int64_t> parent_id;
120 };
121
GetFlamegraph(trace_processor::TraceProcessor * tp)122 std::vector<FlamegraphNode> GetFlamegraph(trace_processor::TraceProcessor* tp) {
123 std::vector<FlamegraphNode> result;
124 auto it = tp->ExecuteQuery(kOnlyFlamegraph);
125 while (it.Next()) {
126 result.push_back({
127 it.Get(0).AsLong(),
128 it.Get(1).AsString(),
129 it.Get(2).AsString(),
130 it.Get(3).AsLong(),
131 it.Get(4).AsLong(),
132 it.Get(5).AsLong(),
133 it.Get(6).AsLong(),
134 it.Get(7).AsLong(),
135 it.Get(8).AsLong(),
136 it.Get(9).AsLong(),
137 it.Get(10).AsLong(),
138 it.Get(11).is_null() ? std::nullopt
139 : std::optional<int64_t>(it.Get(11).AsLong()),
140 });
141 }
142 PERFETTO_CHECK(it.Status().ok());
143 return result;
144 }
145
AllocatorName(AllocatorMode mode)146 std::string AllocatorName(AllocatorMode mode) {
147 switch (mode) {
148 case AllocatorMode::kMalloc:
149 return "libc.malloc";
150 case AllocatorMode::kCustom:
151 return "test";
152 }
153 }
154
AllocatorModeFromNameOrDie(std::string s)155 AllocatorMode AllocatorModeFromNameOrDie(std::string s) {
156 if (s == "libc.malloc")
157 return AllocatorMode::kMalloc;
158 if (s == "test")
159 return AllocatorMode::kCustom;
160 PERFETTO_FATAL("Invalid allocator mode [malloc | test]: %s", s.c_str());
161 }
162
ContinuousDump(HeapprofdConfig * cfg)163 void ContinuousDump(HeapprofdConfig* cfg) {
164 auto* cont_config = cfg->mutable_continuous_dump_config();
165 cont_config->set_dump_phase_ms(0);
166 cont_config->set_dump_interval_ms(100);
167 }
168
169 template <typename F>
MakeTraceConfig(F fn)170 TraceConfig MakeTraceConfig(F fn) {
171 TraceConfig trace_config;
172 trace_config.add_buffers()->set_size_kb(10 * 1024);
173 trace_config.set_duration_ms(2000);
174 trace_config.set_data_source_stop_timeout_ms(10000);
175
176 auto* ds_config = trace_config.add_data_sources()->mutable_config();
177 ds_config->set_name("android.heapprofd");
178 ds_config->set_target_buffer(0);
179
180 protos::gen::HeapprofdConfig heapprofd_config;
181 fn(&heapprofd_config);
182 ds_config->set_heapprofd_config_raw(heapprofd_config.SerializeAsString());
183 return trace_config;
184 }
185
CustomAllocateAndFree(size_t bytes)186 void CustomAllocateAndFree(size_t bytes) {
187 static uint32_t heap_id = AHeapProfile_registerHeap(AHeapInfo_create("test"));
188 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
189 AHeapProfile_reportFree(heap_id, 0x1234abc);
190 }
191
SecondaryAllocAndFree(size_t bytes)192 void SecondaryAllocAndFree(size_t bytes) {
193 static uint32_t heap_id =
194 AHeapProfile_registerHeap(AHeapInfo_create("secondary"));
195 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
196 AHeapProfile_reportFree(heap_id, 0x1234abc);
197 }
198
AllocateAndFree(size_t bytes)199 void AllocateAndFree(size_t bytes) {
200 // This volatile is needed to prevent the compiler from trying to be
201 // helpful and compiling a "useless" malloc + free into a noop.
202 volatile char* x = static_cast<char*>(malloc(bytes));
203 if (x) {
204 if (bytes > 0)
205 x[0] = 'x';
206 free(const_cast<char*>(x));
207 }
208 }
209
DoAllocation(AllocatorMode mode,size_t bytes)210 void DoAllocation(AllocatorMode mode, size_t bytes) {
211 switch (mode) {
212 case AllocatorMode::kMalloc:
213 AllocateAndFree(bytes);
214 break;
215 case AllocatorMode::kCustom:
216 // We need to run malloc(0) even if we want to test the custom allocator,
217 // as the init mechanism assumes the application uses malloc.
218 AllocateAndFree(1);
219 CustomAllocateAndFree(bytes);
220 break;
221 }
222 }
223
ContinuousMalloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes,ssize_t max_iter=-1)224 void ContinuousMalloc(AllocatorMode mode,
225 size_t primary_bytes,
226 size_t secondary_bytes,
227 ssize_t max_iter = -1) {
228 for (ssize_t i = 0; max_iter == -1 || i < max_iter; ++i) {
229 DoAllocation(mode, primary_bytes);
230 if (secondary_bytes)
231 SecondaryAllocAndFree(secondary_bytes);
232 usleep(10 * kMsToUs);
233 }
234 }
235
StartAndWaitForHandshake(base::Subprocess * child)236 void StartAndWaitForHandshake(base::Subprocess* child) {
237 // We cannot use base::Pipe because that assumes we want CLOEXEC.
238 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
239 int ready_pipe[2];
240 PERFETTO_CHECK(pipe(ready_pipe) == 0); // NOLINT(android-cloexec-pipe)
241
242 int ready_pipe_rd = ready_pipe[0];
243 int ready_pipe_wr = ready_pipe[1];
244 child->args.preserve_fds.push_back(ready_pipe_wr);
245 child->args.env.push_back("HEAPPROFD_TESTING_READY_PIPE=" +
246 std::to_string(ready_pipe_wr));
247 child->Start();
248 close(ready_pipe_wr);
249 // Wait for libc to initialize the signal handler. If we signal before the
250 // handler is installed, we can kill the process.
251 char buf[1];
252 PERFETTO_CHECK(PERFETTO_EINTR(read(ready_pipe_rd, buf, sizeof(buf))) == 0);
253 close(ready_pipe_rd);
254 }
255
ChildFinishHandshake()256 void ChildFinishHandshake() {
257 const char* ready_pipe = getenv("HEAPPROFD_TESTING_READY_PIPE");
258 if (ready_pipe != nullptr) {
259 close(static_cast<int>(base::StringToInt64(ready_pipe).value()));
260 }
261 }
262
ForkContinuousAlloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes=0,ssize_t max_iter=-1)263 base::Subprocess ForkContinuousAlloc(AllocatorMode mode,
264 size_t primary_bytes,
265 size_t secondary_bytes = 0,
266 ssize_t max_iter = -1) {
267 base::Subprocess child({"/proc/self/exe"});
268 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
269 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
270 AllocatorName(mode));
271 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
272 std::to_string(primary_bytes));
273 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
274 std::to_string(secondary_bytes));
275 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
276 std::to_string(max_iter));
277
278 StartAndWaitForHandshake(&child);
279 return child;
280 }
281
RunContinuousMalloc()282 void RunContinuousMalloc() {
283 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG0");
284 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG1");
285 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG2");
286 const char* a3 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG3");
287 if (a0 == nullptr)
288 return;
289
290 AllocatorMode arg0 = AllocatorModeFromNameOrDie(a0);
291 uint32_t arg1 = a1 ? base::StringToUInt32(a1).value() : 0;
292 uint32_t arg2 = a2 ? base::StringToUInt32(a2).value() : 0;
293 int32_t arg3 = a3 ? base::StringToInt32(a3).value() : -1;
294
295 ChildFinishHandshake();
296
297 ContinuousMalloc(arg0, arg1, arg2, arg3);
298 exit(0);
299 }
300
RunAccurateMalloc()301 void PERFETTO_NO_INLINE RunAccurateMalloc() {
302 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC");
303 if (a0 == nullptr)
304 return;
305
306 static std::atomic<bool> initialized{false};
307 static uint32_t heap_id =
308 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
309 AHeapInfo_create("test"),
310 [](void*, const AHeapProfileEnableCallbackInfo*) {
311 initialized = true;
312 },
313 nullptr));
314
315 ChildFinishHandshake();
316
317 // heapprofd_client needs malloc to see the signal.
318 while (!initialized)
319 AllocateAndFree(1);
320 // We call the callback before setting enabled=true on the heap, so we
321 // wait a bit for the assignment to happen.
322 usleep(100000);
323 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
324 PERFETTO_FATAL("Expected allocation to be sampled.");
325 AHeapProfile_reportFree(heap_id, 0x1);
326 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
327 PERFETTO_FATAL("Expected allocation to be sampled.");
328 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
329 PERFETTO_FATAL("Expected allocation to be sampled.");
330 AHeapProfile_reportFree(heap_id, 0x2);
331
332 // Wait around so we can verify it did't crash.
333 for (;;) {
334 // Call sleep, otherwise an empty busy loop is undefined behavior:
335 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
336 sleep(1);
337 }
338 }
339
RunAccurateMallocWithVforkCommon()340 void __attribute__((noreturn)) RunAccurateMallocWithVforkCommon() {
341 static std::atomic<bool> initialized{false};
342 static uint32_t heap_id =
343 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
344 AHeapInfo_create("test"),
345 [](void*, const AHeapProfileEnableCallbackInfo*) {
346 initialized = true;
347 },
348 nullptr));
349
350 ChildFinishHandshake();
351
352 // heapprofd_client needs malloc to see the signal.
353 while (!initialized)
354 AllocateAndFree(1);
355 // We call the callback before setting enabled=true on the heap, so we
356 // wait a bit for the assignment to happen.
357 usleep(100000);
358 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
359 PERFETTO_FATAL("Expected allocation to be sampled.");
360 AHeapProfile_reportFree(heap_id, 0x1);
361 pid_t pid = vfork();
362 PERFETTO_CHECK(pid != -1);
363 if (pid == 0) {
364 AHeapProfile_reportAllocation(heap_id, 0x2, 15u);
365 AHeapProfile_reportAllocation(heap_id, 0x3, 15u);
366 exit(0);
367 }
368 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
369 PERFETTO_FATAL("Expected allocation to be sampled.");
370 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
371 PERFETTO_FATAL("Expected allocation to be sampled.");
372 AHeapProfile_reportFree(heap_id, 0x2);
373
374 // Wait around so we can verify it did't crash.
375 for (;;) {
376 // Call sleep, otherwise an empty busy loop is undefined behavior:
377 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
378 sleep(1);
379 }
380 }
381
RunAccurateSample()382 void RunAccurateSample() {
383 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE");
384 if (a0 == nullptr)
385 return;
386
387 static std::atomic<bool> initialized{false};
388 static uint32_t heap_id =
389 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
390 AHeapInfo_create("test"),
391 [](void*, const AHeapProfileEnableCallbackInfo*) {
392 initialized = true;
393 },
394 nullptr));
395
396 ChildFinishHandshake();
397
398 // heapprofd_client needs malloc to see the signal.
399 while (!initialized)
400 AllocateAndFree(1);
401 // We call the callback before setting enabled=true on the heap, so we
402 // wait a bit for the assignment to happen.
403 usleep(100000);
404 if (!AHeapProfile_reportSample(heap_id, 0x1, 10u))
405 PERFETTO_FATAL("Expected allocation to be sampled.");
406 AHeapProfile_reportFree(heap_id, 0x1);
407 if (!AHeapProfile_reportSample(heap_id, 0x2, 15u))
408 PERFETTO_FATAL("Expected allocation to be sampled.");
409 if (!AHeapProfile_reportSample(heap_id, 0x3, 15u))
410 PERFETTO_FATAL("Expected allocation to be sampled.");
411 AHeapProfile_reportFree(heap_id, 0x2);
412
413 // Wait around so we can verify it did't crash.
414 for (;;) {
415 // Call sleep, otherwise an empty busy loop is undefined behavior:
416 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
417 sleep(1);
418 }
419 }
420
RunAccurateMallocWithVfork()421 void RunAccurateMallocWithVfork() {
422 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK");
423 if (a0 == nullptr)
424 return;
425 RunAccurateMallocWithVforkCommon();
426 }
427
RunAccurateMallocWithVforkThread()428 void RunAccurateMallocWithVforkThread() {
429 const char* a0 =
430 getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD");
431 if (a0 == nullptr)
432 return;
433 std::thread th(RunAccurateMallocWithVforkCommon);
434 th.join();
435 }
436
RunReInit()437 void RunReInit() {
438 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG0");
439 if (a0 == nullptr)
440 return;
441
442 AllocatorMode mode = AllocatorModeFromNameOrDie(a0);
443 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG1");
444 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG2");
445 PERFETTO_CHECK(a1 != nullptr && a2 != nullptr);
446 int signal_pipe_rd = static_cast<int>(base::StringToInt64(a1).value());
447 int ack_pipe_wr = static_cast<int>(base::StringToInt64(a2).value());
448
449 ChildFinishHandshake();
450
451 size_t bytes = kFirstIterationBytes;
452 bool signalled = false;
453 for (;;) {
454 DoAllocation(mode, bytes);
455 char buf[1];
456 if (!signalled && read(signal_pipe_rd, buf, sizeof(buf)) == 1) {
457 signalled = true;
458 close(signal_pipe_rd);
459
460 // make sure the client has noticed that the session has stopped
461 DoAllocation(mode, bytes);
462
463 bytes = kSecondIterationBytes;
464 PERFETTO_CHECK(PERFETTO_EINTR(write(ack_pipe_wr, "1", 1)) == 1);
465 close(ack_pipe_wr);
466 }
467 usleep(10 * kMsToUs);
468 }
469 PERFETTO_FATAL("Should be unreachable");
470 }
471
RunCustomLifetime()472 void RunCustomLifetime() {
473 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0");
474 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1");
475 if (a0 == nullptr)
476 return;
477 uint64_t arg0 = a0 ? base::StringToUInt64(a0).value() : 0;
478 uint64_t arg1 = a0 ? base::StringToUInt64(a1).value() : 0;
479
480 PERFETTO_CHECK(arg1);
481
482 static std::atomic<bool> initialized{false};
483 static std::atomic<bool> disabled{false};
484 static std::atomic<uint64_t> sampling_interval;
485
486 static uint32_t other_heap_id = 0;
487 auto enabled_callback = [](void*,
488 const AHeapProfileEnableCallbackInfo* info) {
489 sampling_interval =
490 AHeapProfileEnableCallbackInfo_getSamplingInterval(info);
491 initialized = true;
492 };
493 auto disabled_callback = [](void*, const AHeapProfileDisableCallbackInfo*) {
494 PERFETTO_CHECK(other_heap_id);
495 AHeapProfile_reportFree(other_heap_id, 0);
496 disabled = true;
497 };
498 static uint32_t heap_id =
499 AHeapProfile_registerHeap(AHeapInfo_setDisabledCallback(
500 AHeapInfo_setEnabledCallback(AHeapInfo_create("test"),
501 enabled_callback, nullptr),
502 disabled_callback, nullptr));
503
504 other_heap_id = AHeapProfile_registerHeap(AHeapInfo_create("othertest"));
505 ChildFinishHandshake();
506
507 // heapprofd_client needs malloc to see the signal.
508 while (!initialized)
509 AllocateAndFree(1);
510
511 if (sampling_interval.load() != arg0) {
512 PERFETTO_FATAL("%" PRIu64 " != %" PRIu64, sampling_interval.load(), arg0);
513 }
514
515 while (!disabled)
516 AHeapProfile_reportFree(heap_id, 0x2);
517
518 char x = 'x';
519 PERFETTO_CHECK(base::WriteAll(static_cast<int>(arg1), &x, sizeof(x)) == 1);
520 close(static_cast<int>(arg1));
521
522 // Wait around so we can verify it didn't crash.
523 for (;;) {
524 // Call sleep, otherwise an empty busy loop is undefined behavior:
525 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
526 sleep(1);
527 }
528 }
529
MainInitializer()530 void MainInitializer() {
531 // *** TRICKY ***
532 //
533 // The tests want to launch another binary and attach heapprofd to it.
534 // Carrying another binary is difficult, so another approach is taken:
535 // * The test execute its own binary with special environment variables.
536 // * If these environment variables are detected, instead of running the
537 // gtest tests, the binary just need to do some allocation an exit.
538
539 // This is run before all the gtest tests are executed.
540 //
541 // If one of these function recognizes the environment variable, it will do
542 // its job and exit().
543 RunContinuousMalloc();
544 RunAccurateMalloc();
545 RunAccurateMallocWithVfork();
546 RunAccurateMallocWithVforkThread();
547 RunReInit();
548 RunCustomLifetime();
549 RunAccurateSample();
550 }
551
552 int PERFETTO_UNUSED initializer =
553 integration_tests::RegisterHeapprofdEndToEndTestInitializer(
554 MainInitializer);
555
556 class TraceProcessorTestHelper : public TestHelper {
557 public:
TraceProcessorTestHelper(base::TestTaskRunner * task_runner)558 explicit TraceProcessorTestHelper(base::TestTaskRunner* task_runner)
559 : TestHelper(task_runner),
560 tp_(trace_processor::TraceProcessor::CreateInstance({})) {}
561
ReadTraceData(std::vector<TracePacket> packets)562 void ReadTraceData(std::vector<TracePacket> packets) override {
563 for (auto& packet : packets) {
564 auto preamble = packet.GetProtoPreamble();
565 std::string payload = packet.GetRawBytesForTesting();
566 char* preamble_payload = std::get<0>(preamble);
567 size_t preamble_size = std::get<1>(preamble);
568 size_t buf_size = preamble_size + payload.size();
569 std::unique_ptr<uint8_t[]> buf =
570 std::unique_ptr<uint8_t[]>(new uint8_t[buf_size]);
571 memcpy(&buf[0], preamble_payload, preamble_size);
572 memcpy(&buf[preamble_size], payload.data(), payload.size());
573 PERFETTO_CHECK(tp_->Parse(std::move(buf), buf_size).ok());
574 }
575 TestHelper::ReadTraceData(std::move(packets));
576 }
577
tp()578 trace_processor::TraceProcessor& tp() { return *tp_; }
579
580 private:
581 std::unique_ptr<trace_processor::TraceProcessor> tp_;
582 };
583
GetHelper(base::TestTaskRunner * task_runner)584 std::unique_ptr<TraceProcessorTestHelper> GetHelper(
585 base::TestTaskRunner* task_runner) {
586 std::unique_ptr<TraceProcessorTestHelper> helper(
587 new TraceProcessorTestHelper(task_runner));
588 helper->StartServiceIfRequired();
589
590 helper->ConnectConsumer();
591 helper->WaitForConsumerConnect();
592 return helper;
593 }
594
ReadAndWait(TraceProcessorTestHelper * helper)595 void ReadAndWait(TraceProcessorTestHelper* helper) {
596 helper->WaitForTracingDisabled(kTracingDisabledTimeoutMs);
597 helper->ReadData();
598 helper->WaitForReadData(0, kWaitForReadDataTimeoutMs);
599 helper->tp().NotifyEndOfFile();
600 }
601
ToTraceString(const std::vector<protos::gen::TracePacket> & packets)602 std::string ToTraceString(
603 const std::vector<protos::gen::TracePacket>& packets) {
604 protos::gen::Trace trace;
605 for (const protos::gen::TracePacket& packet : packets) {
606 *trace.add_packet() = packet;
607 }
608 return trace.SerializeAsString();
609 }
610
611 #define WRITE_TRACE(trace) \
612 do { \
613 WriteTrace(trace, __FILE__, __LINE__); \
614 } while (0)
615
FormatHistogram(const protos::gen::ProfilePacket_Histogram & hist)616 std::string FormatHistogram(const protos::gen::ProfilePacket_Histogram& hist) {
617 std::string out;
618 std::string prev_upper_limit = "-inf";
619 for (const auto& bucket : hist.buckets()) {
620 std::string upper_limit;
621 if (bucket.max_bucket())
622 upper_limit = "inf";
623 else
624 upper_limit = std::to_string(bucket.upper_limit());
625
626 out += "[" + prev_upper_limit + ", " + upper_limit +
627 "]: " + std::to_string(bucket.count()) + "; ";
628 prev_upper_limit = std::move(upper_limit);
629 }
630 return out + "\n";
631 }
632
FormatStats(const protos::gen::ProfilePacket_ProcessStats & stats)633 std::string FormatStats(const protos::gen::ProfilePacket_ProcessStats& stats) {
634 return std::string("unwinding_errors: ") +
635 std::to_string(stats.unwinding_errors()) + "\n" +
636 "heap_samples: " + std::to_string(stats.heap_samples()) + "\n" +
637 "map_reparses: " + std::to_string(stats.map_reparses()) + "\n" +
638 "unwinding_time_us: " + FormatHistogram(stats.unwinding_time_us());
639 }
640
Suffix(const std::tuple<TestMode,AllocatorMode> & param)641 std::string Suffix(const std::tuple<TestMode, AllocatorMode>& param) {
642 TestMode tm = std::get<0>(param);
643 AllocatorMode am = std::get<1>(param);
644
645 std::string result;
646 switch (tm) {
647 case TestMode::kCentral:
648 result += "CentralMode";
649 break;
650 case TestMode::kStatic:
651 result += "StaticMode";
652 break;
653 }
654 switch (am) {
655 case AllocatorMode::kMalloc:
656 result += "Malloc";
657 break;
658 case AllocatorMode::kCustom:
659 result += "Custom";
660 break;
661 }
662 return result;
663 }
664
TestSuffix(const::testing::TestParamInfo<std::tuple<TestMode,AllocatorMode>> & info)665 __attribute__((unused)) std::string TestSuffix(
666 const ::testing::TestParamInfo<std::tuple<TestMode, AllocatorMode>>& info) {
667 return Suffix(info.param);
668 }
669
670 class HeapprofdEndToEnd
671 : public ::testing::TestWithParam<std::tuple<TestMode, AllocatorMode>> {
672 protected:
673 base::TestTaskRunner task_runner;
674
test_mode()675 TestMode test_mode() { return std::get<0>(GetParam()); }
allocator_mode()676 AllocatorMode allocator_mode() { return std::get<1>(GetParam()); }
allocator_name()677 std::string allocator_name() { return AllocatorName(allocator_mode()); }
678
WriteTrace(const std::vector<protos::gen::TracePacket> & packets,const char * filename,uint64_t lineno)679 void WriteTrace(const std::vector<protos::gen::TracePacket>& packets,
680 const char* filename,
681 uint64_t lineno) {
682 const char* outdir = getenv("HEAPPROFD_TEST_PROFILE_OUT");
683 if (!outdir)
684 return;
685 const std::string fq_filename =
686 std::string(outdir) + "/" + basename(filename) + ":" +
687 std::to_string(lineno) + "_" + Suffix(GetParam());
688 base::ScopedFile fd(base::OpenFile(fq_filename, O_WRONLY | O_CREAT, 0666));
689 PERFETTO_CHECK(*fd);
690 std::string trace_string = ToTraceString(packets);
691 PERFETTO_CHECK(
692 base::WriteAll(*fd, trace_string.data(), trace_string.size()) >= 0);
693 }
694
Trace(const TraceConfig & trace_config)695 std::unique_ptr<TraceProcessorTestHelper> Trace(
696 const TraceConfig& trace_config) {
697 auto helper = GetHelper(&task_runner);
698
699 helper->StartTracing(trace_config);
700
701 ReadAndWait(helper.get());
702 return helper;
703 }
704
GetUnwindingErrors(TraceProcessorTestHelper * helper)705 std::vector<std::string> GetUnwindingErrors(
706 TraceProcessorTestHelper* helper) {
707 std::vector<std::string> out;
708 const auto& packets = helper->trace();
709 for (const protos::gen::TracePacket& packet : packets) {
710 for (const protos::gen::InternedString& fn :
711 packet.interned_data().function_names()) {
712 if (fn.str().find("ERROR ") == 0) {
713 out.push_back(fn.str());
714 }
715 }
716 }
717 return out;
718 }
719
PrintStats(TraceProcessorTestHelper * helper)720 void PrintStats(TraceProcessorTestHelper* helper) {
721 const auto& packets = helper->trace();
722 for (const protos::gen::TracePacket& packet : packets) {
723 for (const auto& dump : packet.profile_packet().process_dumps()) {
724 // protobuf uint64 does not like the PRIu64 formatter.
725 PERFETTO_LOG("Stats for %s: %s", std::to_string(dump.pid()).c_str(),
726 FormatStats(dump.stats()).c_str());
727 }
728 }
729 std::vector<std::string> errors = GetUnwindingErrors(helper);
730 for (const std::string& err : errors) {
731 PERFETTO_LOG("Unwinding error: %s", err.c_str());
732 }
733 }
734
ValidateSampleSizes(TraceProcessorTestHelper * helper,uint64_t pid,uint64_t alloc_size,const std::string & heap_name="")735 void ValidateSampleSizes(TraceProcessorTestHelper* helper,
736 uint64_t pid,
737 uint64_t alloc_size,
738 const std::string& heap_name = "") {
739 const auto& packets = helper->trace();
740 for (const protos::gen::TracePacket& packet : packets) {
741 for (const auto& dump : packet.profile_packet().process_dumps()) {
742 if (dump.pid() != pid ||
743 (!heap_name.empty() && heap_name != dump.heap_name())) {
744 continue;
745 }
746 for (const auto& sample : dump.samples()) {
747 EXPECT_EQ(sample.self_allocated() % alloc_size, 0u);
748 EXPECT_EQ(sample.self_freed() % alloc_size, 0u);
749 EXPECT_THAT(sample.self_allocated() - sample.self_freed(),
750 AnyOf(Eq(0u), Eq(alloc_size)));
751 }
752 }
753 }
754 }
755
ValidateFromStartup(TraceProcessorTestHelper * helper,uint64_t pid,bool from_startup)756 void ValidateFromStartup(TraceProcessorTestHelper* helper,
757 uint64_t pid,
758 bool from_startup) {
759 const auto& packets = helper->trace();
760 for (const protos::gen::TracePacket& packet : packets) {
761 for (const auto& dump : packet.profile_packet().process_dumps()) {
762 if (dump.pid() != pid)
763 continue;
764 EXPECT_EQ(dump.from_startup(), from_startup);
765 }
766 }
767 }
768
ValidateRejectedConcurrent(TraceProcessorTestHelper * helper,uint64_t pid,bool rejected_concurrent)769 void ValidateRejectedConcurrent(TraceProcessorTestHelper* helper,
770 uint64_t pid,
771 bool rejected_concurrent) {
772 const auto& packets = helper->trace();
773 for (const protos::gen::TracePacket& packet : packets) {
774 for (const auto& dump : packet.profile_packet().process_dumps()) {
775 if (dump.pid() != pid)
776 continue;
777 EXPECT_EQ(dump.rejected_concurrent(), rejected_concurrent);
778 }
779 }
780 }
781
ValidateNoSamples(TraceProcessorTestHelper * helper,uint64_t pid)782 void ValidateNoSamples(TraceProcessorTestHelper* helper, uint64_t pid) {
783 const auto& packets = helper->trace();
784 size_t samples = 0;
785 for (const protos::gen::TracePacket& packet : packets) {
786 for (const auto& dump : packet.profile_packet().process_dumps()) {
787 if (dump.pid() != pid)
788 continue;
789 samples += dump.samples().size();
790 }
791 }
792 EXPECT_EQ(samples, 0u);
793 }
794
ValidateHasSamples(TraceProcessorTestHelper * helper,uint64_t pid,const std::string & heap_name,uint64_t sampling_interval)795 void ValidateHasSamples(TraceProcessorTestHelper* helper,
796 uint64_t pid,
797 const std::string& heap_name,
798 uint64_t sampling_interval) {
799 const auto& packets = helper->trace();
800 ASSERT_GT(packets.size(), 0u);
801 size_t profile_packets = 0;
802 size_t samples = 0;
803 uint64_t last_allocated = 0;
804 uint64_t last_freed = 0;
805 for (const protos::gen::TracePacket& packet : packets) {
806 for (const auto& dump : packet.profile_packet().process_dumps()) {
807 if (dump.pid() != pid || dump.heap_name() != heap_name)
808 continue;
809 EXPECT_EQ(dump.sampling_interval_bytes(), sampling_interval);
810 for (const auto& sample : dump.samples()) {
811 last_allocated = sample.self_allocated();
812 last_freed = sample.self_freed();
813 samples++;
814 }
815 profile_packets++;
816 }
817 }
818 EXPECT_GT(profile_packets, 0u) << heap_name;
819 EXPECT_GT(samples, 0u) << heap_name;
820 EXPECT_GT(last_allocated, 0u) << heap_name;
821 EXPECT_GT(last_freed, 0u) << heap_name;
822 }
823
ValidateOnlyPID(TraceProcessorTestHelper * helper,uint64_t pid)824 void ValidateOnlyPID(TraceProcessorTestHelper* helper, uint64_t pid) {
825 size_t dumps = 0;
826 const auto& packets = helper->trace();
827 for (const protos::gen::TracePacket& packet : packets) {
828 for (const auto& dump : packet.profile_packet().process_dumps()) {
829 EXPECT_EQ(dump.pid(), pid);
830 dumps++;
831 }
832 }
833 EXPECT_GT(dumps, 0u);
834 }
835 };
836
837 // This checks that the child is still running (to ensure it didn't crash
838 // unxpectedly) and then kills it.
KillAssertRunning(base::Subprocess * child)839 void KillAssertRunning(base::Subprocess* child) {
840 ASSERT_EQ(child->Poll(), base::Subprocess::kRunning)
841 << "Target process not running. CHECK CRASH LOGS.";
842 PERFETTO_LOG("Shutting down profile target.");
843 child->KillAndWaitForTermination();
844 }
845
TEST_P(HeapprofdEndToEnd,Disabled)846 TEST_P(HeapprofdEndToEnd, Disabled) {
847 constexpr size_t kAllocSize = 1024;
848
849 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
850 const uint64_t pid = static_cast<uint64_t>(child.pid());
851
852 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
853 cfg->set_sampling_interval_bytes(1);
854 cfg->add_pid(pid);
855 cfg->add_heaps("invalid");
856 ContinuousDump(cfg);
857 });
858
859 auto helper = Trace(trace_config);
860 WRITE_TRACE(helper->full_trace());
861 PrintStats(helper.get());
862 KillAssertRunning(&child);
863
864 ValidateNoSamples(helper.get(), pid);
865 }
866
TEST_P(HeapprofdEndToEnd,Smoke)867 TEST_P(HeapprofdEndToEnd, Smoke) {
868 constexpr size_t kAllocSize = 1024;
869 constexpr size_t kSamplingInterval = 1;
870
871 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
872 const uint64_t pid = static_cast<uint64_t>(child.pid());
873
874 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
875 cfg->set_sampling_interval_bytes(kSamplingInterval);
876 cfg->add_pid(pid);
877 cfg->add_heaps(allocator_name());
878 ContinuousDump(cfg);
879 });
880
881 auto helper = Trace(trace_config);
882 WRITE_TRACE(helper->full_trace());
883 PrintStats(helper.get());
884 KillAssertRunning(&child);
885
886 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
887 ValidateOnlyPID(helper.get(), pid);
888 ValidateSampleSizes(helper.get(), pid, kAllocSize);
889 }
890
TEST_P(HeapprofdEndToEnd,TwoAllocators)891 TEST_P(HeapprofdEndToEnd, TwoAllocators) {
892 constexpr size_t kCustomAllocSize = 1024;
893 constexpr size_t kAllocSize = 7;
894 constexpr size_t kSamplingInterval = 1;
895
896 base::Subprocess child =
897 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
898 const uint64_t pid = static_cast<uint64_t>(child.pid());
899
900 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
901 cfg->set_sampling_interval_bytes(kSamplingInterval);
902 cfg->add_pid(pid);
903 cfg->add_heaps(allocator_name());
904 cfg->add_heaps("secondary");
905 ContinuousDump(cfg);
906 });
907
908 auto helper = Trace(trace_config);
909 WRITE_TRACE(helper->full_trace());
910 PrintStats(helper.get());
911 KillAssertRunning(&child);
912
913 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
914 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
915 ValidateOnlyPID(helper.get(), pid);
916 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
917 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
918 }
919
TEST_P(HeapprofdEndToEnd,TwoAllocatorsAll)920 TEST_P(HeapprofdEndToEnd, TwoAllocatorsAll) {
921 constexpr size_t kCustomAllocSize = 1024;
922 constexpr size_t kAllocSize = 7;
923 constexpr size_t kSamplingInterval = 1;
924
925 base::Subprocess child =
926 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
927 const uint64_t pid = static_cast<uint64_t>(child.pid());
928
929 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
930 cfg->set_sampling_interval_bytes(kSamplingInterval);
931 cfg->add_pid(pid);
932 cfg->set_all_heaps(true);
933 ContinuousDump(cfg);
934 });
935
936 auto helper = Trace(trace_config);
937 WRITE_TRACE(helper->full_trace());
938 PrintStats(helper.get());
939 KillAssertRunning(&child);
940
941 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
942 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
943 ValidateOnlyPID(helper.get(), pid);
944 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
945 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
946 }
947
TEST_P(HeapprofdEndToEnd,AccurateCustomReportAllocation)948 TEST_P(HeapprofdEndToEnd, AccurateCustomReportAllocation) {
949 if (allocator_mode() != AllocatorMode::kCustom)
950 GTEST_SKIP();
951
952 base::Subprocess child({"/proc/self/exe"});
953 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
954 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
955 StartAndWaitForHandshake(&child);
956
957 const uint64_t pid = static_cast<uint64_t>(child.pid());
958
959 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
960 cfg->set_sampling_interval_bytes(1);
961 cfg->add_pid(pid);
962 cfg->add_heaps("test");
963 });
964
965 auto helper = Trace(trace_config);
966 WRITE_TRACE(helper->full_trace());
967 PrintStats(helper.get());
968 KillAssertRunning(&child);
969
970 auto flamegraph = GetFlamegraph(&helper->tp());
971 EXPECT_THAT(flamegraph,
972 Contains(AllOf(
973 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
974 Field(&FlamegraphNode::cumulative_size, Eq(15)),
975 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
976
977 ValidateOnlyPID(helper.get(), pid);
978
979 size_t total_alloc = 0;
980 size_t total_freed = 0;
981 for (const protos::gen::TracePacket& packet : helper->trace()) {
982 for (const auto& dump : packet.profile_packet().process_dumps()) {
983 for (const auto& sample : dump.samples()) {
984 total_alloc += sample.self_allocated();
985 total_freed += sample.self_freed();
986 }
987 }
988 }
989 EXPECT_EQ(total_alloc, 40u);
990 EXPECT_EQ(total_freed, 25u);
991 }
992
993 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
994 #define MAYBE_AccurateCustomReportAllocationWithVfork \
995 AccurateCustomReportAllocationWithVfork
996 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
997 AccurateCustomReportAllocationWithVforkThread
998 #else
999 #define MAYBE_AccurateCustomReportAllocationWithVfork \
1000 DISABLED_AccurateCustomReportAllocationWithVfork
1001 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
1002 DISABLED_AccurateCustomReportAllocationWithVforkThread
1003 #endif
1004
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVfork)1005 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVfork) {
1006 if (allocator_mode() != AllocatorMode::kCustom)
1007 GTEST_SKIP();
1008
1009 base::Subprocess child({"/proc/self/exe"});
1010 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1011 child.args.env.push_back(
1012 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK=1");
1013 StartAndWaitForHandshake(&child);
1014
1015 const uint64_t pid = static_cast<uint64_t>(child.pid());
1016
1017 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1018 cfg->set_sampling_interval_bytes(1);
1019 cfg->add_pid(pid);
1020 cfg->add_heaps("test");
1021 });
1022
1023 auto helper = Trace(trace_config);
1024 WRITE_TRACE(helper->full_trace());
1025 PrintStats(helper.get());
1026 KillAssertRunning(&child);
1027
1028 auto flamegraph = GetFlamegraph(&helper->tp());
1029 EXPECT_THAT(flamegraph,
1030 Contains(AllOf(
1031 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
1032 Field(&FlamegraphNode::cumulative_size, Eq(15)),
1033 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
1034
1035 ValidateOnlyPID(helper.get(), pid);
1036
1037 size_t total_alloc = 0;
1038 size_t total_freed = 0;
1039 for (const protos::gen::TracePacket& packet : helper->trace()) {
1040 for (const auto& dump : packet.profile_packet().process_dumps()) {
1041 EXPECT_FALSE(dump.disconnected());
1042 for (const auto& sample : dump.samples()) {
1043 total_alloc += sample.self_allocated();
1044 total_freed += sample.self_freed();
1045 }
1046 }
1047 }
1048 EXPECT_EQ(total_alloc, 40u);
1049 EXPECT_EQ(total_freed, 25u);
1050 }
1051
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVforkThread)1052 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVforkThread) {
1053 if (allocator_mode() != AllocatorMode::kCustom)
1054 GTEST_SKIP();
1055
1056 base::Subprocess child({"/proc/self/exe"});
1057 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1058 child.args.env.push_back(
1059 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD=1");
1060 StartAndWaitForHandshake(&child);
1061
1062 const uint64_t pid = static_cast<uint64_t>(child.pid());
1063
1064 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1065 cfg->set_sampling_interval_bytes(1);
1066 cfg->add_pid(pid);
1067 cfg->add_heaps("test");
1068 });
1069
1070 auto helper = Trace(trace_config);
1071 WRITE_TRACE(helper->full_trace());
1072 PrintStats(helper.get());
1073 KillAssertRunning(&child);
1074
1075 auto flamegraph = GetFlamegraph(&helper->tp());
1076 EXPECT_THAT(flamegraph,
1077 Contains(AllOf(
1078 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
1079 Field(&FlamegraphNode::cumulative_size, Eq(15)),
1080 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
1081
1082 ValidateOnlyPID(helper.get(), pid);
1083
1084 size_t total_alloc = 0;
1085 size_t total_freed = 0;
1086 for (const protos::gen::TracePacket& packet : helper->trace()) {
1087 for (const auto& dump : packet.profile_packet().process_dumps()) {
1088 EXPECT_FALSE(dump.disconnected());
1089 for (const auto& sample : dump.samples()) {
1090 total_alloc += sample.self_allocated();
1091 total_freed += sample.self_freed();
1092 }
1093 }
1094 }
1095 EXPECT_EQ(total_alloc, 40u);
1096 EXPECT_EQ(total_freed, 25u);
1097 }
1098
TEST_P(HeapprofdEndToEnd,AccurateCustomReportSample)1099 TEST_P(HeapprofdEndToEnd, AccurateCustomReportSample) {
1100 if (allocator_mode() != AllocatorMode::kCustom)
1101 GTEST_SKIP();
1102
1103 base::Subprocess child({"/proc/self/exe"});
1104 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1105 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE=1");
1106 StartAndWaitForHandshake(&child);
1107
1108 const uint64_t pid = static_cast<uint64_t>(child.pid());
1109
1110 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1111 cfg->set_sampling_interval_bytes(1000000);
1112 cfg->add_pid(pid);
1113 cfg->add_heaps("test");
1114 });
1115
1116 auto helper = Trace(trace_config);
1117 WRITE_TRACE(helper->full_trace());
1118 PrintStats(helper.get());
1119 KillAssertRunning(&child);
1120
1121 ValidateOnlyPID(helper.get(), pid);
1122
1123 size_t total_alloc = 0;
1124 size_t total_freed = 0;
1125 for (const protos::gen::TracePacket& packet : helper->trace()) {
1126 for (const auto& dump : packet.profile_packet().process_dumps()) {
1127 for (const auto& sample : dump.samples()) {
1128 total_alloc += sample.self_allocated();
1129 total_freed += sample.self_freed();
1130 }
1131 }
1132 }
1133 EXPECT_EQ(total_alloc, 40u);
1134 EXPECT_EQ(total_freed, 25u);
1135 }
1136
TEST_P(HeapprofdEndToEnd,AccurateDumpAtMaxCustom)1137 TEST_P(HeapprofdEndToEnd, AccurateDumpAtMaxCustom) {
1138 if (allocator_mode() != AllocatorMode::kCustom)
1139 GTEST_SKIP();
1140
1141 base::Subprocess child({"/proc/self/exe"});
1142 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1143 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
1144 StartAndWaitForHandshake(&child);
1145
1146 const uint64_t pid = static_cast<uint64_t>(child.pid());
1147
1148 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1149 cfg->set_sampling_interval_bytes(1);
1150 cfg->add_pid(pid);
1151 cfg->add_heaps("test");
1152 cfg->set_dump_at_max(true);
1153 });
1154
1155 auto helper = Trace(trace_config);
1156 WRITE_TRACE(helper->full_trace());
1157 PrintStats(helper.get());
1158 KillAssertRunning(&child);
1159
1160 ValidateOnlyPID(helper.get(), pid);
1161
1162 size_t total_alloc = 0;
1163 size_t total_count = 0;
1164 for (const protos::gen::TracePacket& packet : helper->trace()) {
1165 for (const auto& dump : packet.profile_packet().process_dumps()) {
1166 for (const auto& sample : dump.samples()) {
1167 total_alloc += sample.self_max();
1168 total_count += sample.self_max_count();
1169 }
1170 }
1171 }
1172 EXPECT_EQ(total_alloc, 30u);
1173 EXPECT_EQ(total_count, 2u);
1174 }
1175
TEST_P(HeapprofdEndToEnd,CustomLifetime)1176 TEST_P(HeapprofdEndToEnd, CustomLifetime) {
1177 if (allocator_mode() != AllocatorMode::kCustom)
1178 GTEST_SKIP();
1179
1180 int disabled_pipe[2];
1181 PERFETTO_CHECK(pipe(disabled_pipe) == 0); // NOLINT(android-cloexec-pipe)
1182
1183 int disabled_pipe_rd = disabled_pipe[0];
1184 int disabled_pipe_wr = disabled_pipe[1];
1185
1186 base::Subprocess child({"/proc/self/exe"});
1187 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1188 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0=1000000");
1189 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1=" +
1190 std::to_string(disabled_pipe_wr));
1191 child.args.preserve_fds.push_back(disabled_pipe_wr);
1192 StartAndWaitForHandshake(&child);
1193 close(disabled_pipe_wr);
1194
1195 const uint64_t pid = static_cast<uint64_t>(child.pid());
1196
1197 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1198 cfg->set_sampling_interval_bytes(1000000);
1199 cfg->add_pid(pid);
1200 cfg->add_heaps("test");
1201 cfg->add_heaps("othertest");
1202 });
1203
1204 auto helper = Trace(trace_config);
1205 WRITE_TRACE(helper->full_trace());
1206 PrintStats(helper.get());
1207 // Give client some time to notice the disconnect.
1208 sleep(2);
1209 KillAssertRunning(&child);
1210
1211 char x;
1212 EXPECT_EQ(base::Read(disabled_pipe_rd, &x, sizeof(x)), 1);
1213 close(disabled_pipe_rd);
1214 }
1215
TEST_P(HeapprofdEndToEnd,TwoProcesses)1216 TEST_P(HeapprofdEndToEnd, TwoProcesses) {
1217 constexpr size_t kAllocSize = 1024;
1218 constexpr size_t kAllocSize2 = 7;
1219 constexpr size_t kSamplingInterval = 1;
1220
1221 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1222 base::Subprocess child2 = ForkContinuousAlloc(allocator_mode(), kAllocSize2);
1223 const uint64_t pid = static_cast<uint64_t>(child.pid());
1224 const auto pid2 = child2.pid();
1225
1226 TraceConfig trace_config =
1227 MakeTraceConfig([this, pid, pid2](HeapprofdConfig* cfg) {
1228 cfg->set_sampling_interval_bytes(kSamplingInterval);
1229 cfg->add_pid(pid);
1230 cfg->add_pid(static_cast<uint64_t>(pid2));
1231 cfg->add_heaps(allocator_name());
1232 });
1233
1234 auto helper = Trace(trace_config);
1235 WRITE_TRACE(helper->full_trace());
1236 PrintStats(helper.get());
1237
1238 KillAssertRunning(&child);
1239 KillAssertRunning(&child2);
1240
1241 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1242 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1243 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid2),
1244 allocator_name(), kSamplingInterval);
1245 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid2), kAllocSize2);
1246 }
1247
TEST_P(HeapprofdEndToEnd,FinalFlush)1248 TEST_P(HeapprofdEndToEnd, FinalFlush) {
1249 constexpr size_t kAllocSize = 1024;
1250 constexpr size_t kSamplingInterval = 1;
1251
1252 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1253 const uint64_t pid = static_cast<uint64_t>(child.pid());
1254 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1255 cfg->set_sampling_interval_bytes(kSamplingInterval);
1256 cfg->add_pid(pid);
1257 cfg->add_heaps(allocator_name());
1258 });
1259
1260 auto helper = Trace(trace_config);
1261 WRITE_TRACE(helper->full_trace());
1262 PrintStats(helper.get());
1263 KillAssertRunning(&child);
1264
1265 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1266 ValidateOnlyPID(helper.get(), pid);
1267 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1268 }
1269
TEST_P(HeapprofdEndToEnd,NativeStartup)1270 TEST_P(HeapprofdEndToEnd, NativeStartup) {
1271 if (test_mode() == TestMode::kStatic)
1272 GTEST_SKIP();
1273
1274 auto helper = GetHelper(&task_runner);
1275
1276 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1277 cfg->set_sampling_interval_bytes(1);
1278 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1279 cfg->add_heaps(allocator_name());
1280 });
1281 trace_config.set_duration_ms(5000);
1282
1283 helper->StartTracing(trace_config);
1284
1285 // Wait to guarantee that the process forked below is hooked by the profiler
1286 // by virtue of the startup check, and not by virtue of being seen as a
1287 // running process. This sleep is here to prevent that, accidentally, the
1288 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1289 // has received the trace config.
1290 sleep(1);
1291
1292 base::Subprocess child({"/proc/self/exe"});
1293 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1294 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1295 allocator_name());
1296 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1297 std::to_string(kStartupAllocSize));
1298 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1299 std::string("0"));
1300 StartAndWaitForHandshake(&child);
1301
1302 ReadAndWait(helper.get());
1303 WRITE_TRACE(helper->full_trace());
1304
1305 KillAssertRunning(&child);
1306
1307 const auto& packets = helper->trace();
1308 ASSERT_GT(packets.size(), 0u);
1309 size_t profile_packets = 0;
1310 size_t samples = 0;
1311 uint64_t total_allocated = 0;
1312 uint64_t total_freed = 0;
1313 for (const protos::gen::TracePacket& packet : packets) {
1314 if (packet.has_profile_packet() &&
1315 !packet.profile_packet().process_dumps().empty()) {
1316 const auto& dumps = packet.profile_packet().process_dumps();
1317 ASSERT_EQ(dumps.size(), 1u);
1318 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1319 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1320 profile_packets++;
1321 for (const auto& sample : dump.samples()) {
1322 samples++;
1323 total_allocated += sample.self_allocated();
1324 total_freed += sample.self_freed();
1325 }
1326 }
1327 }
1328 EXPECT_EQ(profile_packets, 1u);
1329 EXPECT_GT(samples, 0u);
1330 EXPECT_GT(total_allocated, 0u);
1331 EXPECT_GT(total_freed, 0u);
1332 }
1333
TEST_P(HeapprofdEndToEnd,NativeStartupDenormalizedCmdline)1334 TEST_P(HeapprofdEndToEnd, NativeStartupDenormalizedCmdline) {
1335 if (test_mode() == TestMode::kStatic)
1336 GTEST_SKIP();
1337
1338 auto helper = GetHelper(&task_runner);
1339
1340 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1341 cfg->set_sampling_interval_bytes(1);
1342 cfg->add_process_cmdline("[email protected]");
1343 cfg->add_heaps(allocator_name());
1344 });
1345 trace_config.set_duration_ms(5000);
1346
1347 helper->StartTracing(trace_config);
1348
1349 // Wait to guarantee that the process forked below is hooked by the profiler
1350 // by virtue of the startup check, and not by virtue of being seen as a
1351 // running process. This sleep is here to prevent that, accidentally, the
1352 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1353 // has received the trace config.
1354 sleep(1);
1355
1356 base::Subprocess child({"/proc/self/exe"});
1357 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1358 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1359 allocator_name());
1360 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1361 std::to_string(kStartupAllocSize));
1362 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1363 std::string("0"));
1364
1365 StartAndWaitForHandshake(&child);
1366
1367 ReadAndWait(helper.get());
1368 WRITE_TRACE(helper->full_trace());
1369
1370 KillAssertRunning(&child);
1371
1372 const auto& packets = helper->trace();
1373 ASSERT_GT(packets.size(), 0u);
1374 size_t profile_packets = 0;
1375 size_t samples = 0;
1376 uint64_t total_allocated = 0;
1377 uint64_t total_freed = 0;
1378 for (const protos::gen::TracePacket& packet : packets) {
1379 if (packet.has_profile_packet() &&
1380 !packet.profile_packet().process_dumps().empty()) {
1381 const auto& dumps = packet.profile_packet().process_dumps();
1382 ASSERT_EQ(dumps.size(), 1u);
1383 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1384 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1385 profile_packets++;
1386 for (const auto& sample : dump.samples()) {
1387 samples++;
1388 total_allocated += sample.self_allocated();
1389 total_freed += sample.self_freed();
1390 }
1391 }
1392 }
1393 EXPECT_EQ(profile_packets, 1u);
1394 EXPECT_GT(samples, 0u);
1395 EXPECT_GT(total_allocated, 0u);
1396 EXPECT_GT(total_freed, 0u);
1397 }
1398
TEST_P(HeapprofdEndToEnd,DiscoverByName)1399 TEST_P(HeapprofdEndToEnd, DiscoverByName) {
1400 auto helper = GetHelper(&task_runner);
1401
1402 base::Subprocess child({"/proc/self/exe"});
1403 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1404 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1405 allocator_name());
1406 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1407 std::to_string(kStartupAllocSize));
1408 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1409 std::string("0"));
1410
1411 StartAndWaitForHandshake(&child);
1412
1413 // Wait to make sure process is fully initialized, so we do not accidentally
1414 // match it by the startup logic.
1415 sleep(1);
1416
1417 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1418 cfg->set_sampling_interval_bytes(1);
1419 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1420 cfg->add_heaps(allocator_name());
1421 });
1422 trace_config.set_duration_ms(5000);
1423
1424 helper->StartTracing(trace_config);
1425 ReadAndWait(helper.get());
1426 WRITE_TRACE(helper->full_trace());
1427
1428 KillAssertRunning(&child);
1429
1430 const auto& packets = helper->trace();
1431 ASSERT_GT(packets.size(), 0u);
1432 size_t profile_packets = 0;
1433 size_t samples = 0;
1434 uint64_t total_allocated = 0;
1435 uint64_t total_freed = 0;
1436 for (const protos::gen::TracePacket& packet : packets) {
1437 if (packet.has_profile_packet() &&
1438 !packet.profile_packet().process_dumps().empty()) {
1439 const auto& dumps = packet.profile_packet().process_dumps();
1440 ASSERT_EQ(dumps.size(), 1u);
1441 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1442 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1443 profile_packets++;
1444 for (const auto& sample : dump.samples()) {
1445 samples++;
1446 total_allocated += sample.self_allocated();
1447 total_freed += sample.self_freed();
1448 }
1449 }
1450 }
1451 EXPECT_EQ(profile_packets, 1u);
1452 EXPECT_GT(samples, 0u);
1453 EXPECT_GT(total_allocated, 0u);
1454 EXPECT_GT(total_freed, 0u);
1455 }
1456
TEST_P(HeapprofdEndToEnd,DiscoverByNameDenormalizedCmdline)1457 TEST_P(HeapprofdEndToEnd, DiscoverByNameDenormalizedCmdline) {
1458 auto helper = GetHelper(&task_runner);
1459
1460 // Make sure the forked process does not get reparented to init.
1461 base::Subprocess child({"/proc/self/exe"});
1462 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1463 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1464 allocator_name());
1465 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1466 std::to_string(kStartupAllocSize));
1467 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1468 std::string("0"));
1469
1470 StartAndWaitForHandshake(&child);
1471
1472 // Wait to make sure process is fully initialized, so we do not accidentally
1473 // match it by the startup logic.
1474 sleep(1);
1475
1476 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1477 cfg->set_sampling_interval_bytes(1);
1478 cfg->add_process_cmdline("[email protected]");
1479 cfg->add_heaps(allocator_name());
1480 });
1481 trace_config.set_duration_ms(5000);
1482
1483 helper->StartTracing(trace_config);
1484 ReadAndWait(helper.get());
1485 WRITE_TRACE(helper->full_trace());
1486
1487 KillAssertRunning(&child);
1488
1489 const auto& packets = helper->trace();
1490 ASSERT_GT(packets.size(), 0u);
1491 size_t profile_packets = 0;
1492 size_t samples = 0;
1493 uint64_t total_allocated = 0;
1494 uint64_t total_freed = 0;
1495 for (const protos::gen::TracePacket& packet : packets) {
1496 if (packet.has_profile_packet() &&
1497 !packet.profile_packet().process_dumps().empty()) {
1498 const auto& dumps = packet.profile_packet().process_dumps();
1499 ASSERT_EQ(dumps.size(), 1u);
1500 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1501 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1502 profile_packets++;
1503 for (const auto& sample : dump.samples()) {
1504 samples++;
1505 total_allocated += sample.self_allocated();
1506 total_freed += sample.self_freed();
1507 }
1508 }
1509 }
1510 EXPECT_EQ(profile_packets, 1u);
1511 EXPECT_GT(samples, 0u);
1512 EXPECT_GT(total_allocated, 0u);
1513 EXPECT_GT(total_freed, 0u);
1514 }
1515
TEST_P(HeapprofdEndToEnd,ReInit)1516 TEST_P(HeapprofdEndToEnd, ReInit) {
1517 constexpr size_t kSamplingInterval = 1;
1518
1519 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1520 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1521 int signal_pipe[2];
1522 int ack_pipe[2];
1523
1524 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1525 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1526
1527 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1528 PERFETTO_CHECK(cur_flags >= 0);
1529 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1530 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1531 PERFETTO_CHECK(cur_flags >= 0);
1532 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1533
1534 int signal_pipe_rd = signal_pipe[0];
1535 int signal_pipe_wr = signal_pipe[1];
1536 int ack_pipe_rd = ack_pipe[0];
1537 int ack_pipe_wr = ack_pipe[1];
1538
1539 base::Subprocess child({"/proc/self/exe"});
1540 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1541 child.args.preserve_fds.push_back(signal_pipe_rd);
1542 child.args.preserve_fds.push_back(ack_pipe_wr);
1543 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1544 allocator_name());
1545 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1546 std::to_string(signal_pipe_rd));
1547 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1548 std::to_string(ack_pipe_wr));
1549 StartAndWaitForHandshake(&child);
1550
1551 const uint64_t pid = static_cast<uint64_t>(child.pid());
1552
1553 close(signal_pipe_rd);
1554 close(ack_pipe_wr);
1555
1556 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1557 cfg->set_sampling_interval_bytes(kSamplingInterval);
1558 cfg->add_pid(pid);
1559 cfg->add_heaps(allocator_name());
1560 });
1561
1562 auto helper = Trace(trace_config);
1563 WRITE_TRACE(helper->full_trace());
1564
1565 PrintStats(helper.get());
1566 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1567 ValidateOnlyPID(helper.get(), pid);
1568 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1569
1570 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1571 close(signal_pipe_wr);
1572 char buf[1];
1573 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1574 close(ack_pipe_rd);
1575
1576 // A brief sleep to allow the client to notice that the profiling session is
1577 // to be torn down (as it rejects concurrent sessions).
1578 usleep(500 * kMsToUs);
1579
1580 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1581
1582 // We must keep alive the original helper because it owns the service thread.
1583 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1584 std::unique_ptr<TraceProcessorTestHelper>(
1585 new TraceProcessorTestHelper(&task_runner));
1586
1587 helper2->ConnectConsumer();
1588 helper2->WaitForConsumerConnect();
1589 helper2->StartTracing(trace_config);
1590 ReadAndWait(helper2.get());
1591 WRITE_TRACE(helper2->trace());
1592
1593 PrintStats(helper2.get());
1594 KillAssertRunning(&child);
1595
1596 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1597 ValidateOnlyPID(helper2.get(), pid);
1598 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1599 }
1600
TEST_P(HeapprofdEndToEnd,ReInitAfterInvalid)1601 TEST_P(HeapprofdEndToEnd, ReInitAfterInvalid) {
1602 constexpr size_t kSamplingInterval = 1;
1603
1604 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1605 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1606 int signal_pipe[2];
1607 int ack_pipe[2];
1608
1609 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1610 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1611
1612 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1613 PERFETTO_CHECK(cur_flags >= 0);
1614 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1615 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1616 PERFETTO_CHECK(cur_flags >= 0);
1617 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1618
1619 int signal_pipe_rd = signal_pipe[0];
1620 int signal_pipe_wr = signal_pipe[1];
1621 int ack_pipe_rd = ack_pipe[0];
1622 int ack_pipe_wr = ack_pipe[1];
1623
1624 base::Subprocess child({"/proc/self/exe"});
1625 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1626 child.args.preserve_fds.push_back(signal_pipe_rd);
1627 child.args.preserve_fds.push_back(ack_pipe_wr);
1628 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1629 allocator_name());
1630 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1631 std::to_string(signal_pipe_rd));
1632 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1633 std::to_string(ack_pipe_wr));
1634 StartAndWaitForHandshake(&child);
1635
1636 const uint64_t pid = static_cast<uint64_t>(child.pid());
1637
1638 close(signal_pipe_rd);
1639 close(ack_pipe_wr);
1640
1641 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1642 cfg->set_sampling_interval_bytes(kSamplingInterval);
1643 cfg->add_pid(pid);
1644 cfg->add_heaps(allocator_name());
1645 });
1646
1647 auto helper = Trace(trace_config);
1648 WRITE_TRACE(helper->full_trace());
1649
1650 PrintStats(helper.get());
1651 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1652 ValidateOnlyPID(helper.get(), pid);
1653 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1654
1655 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1656 close(signal_pipe_wr);
1657 char buf[1];
1658 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1659 close(ack_pipe_rd);
1660
1661 // A brief sleep to allow the client to notice that the profiling session is
1662 // to be torn down (as it rejects concurrent sessions).
1663 usleep(500 * kMsToUs);
1664
1665 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1666
1667 // We must keep alive the original helper because it owns the service thread.
1668 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1669 std::unique_ptr<TraceProcessorTestHelper>(
1670 new TraceProcessorTestHelper(&task_runner));
1671
1672 helper2->ConnectConsumer();
1673 helper2->WaitForConsumerConnect();
1674 helper2->StartTracing(trace_config);
1675 ReadAndWait(helper2.get());
1676
1677 WRITE_TRACE(helper2->trace());
1678
1679 PrintStats(helper2.get());
1680 KillAssertRunning(&child);
1681
1682 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1683 ValidateOnlyPID(helper2.get(), pid);
1684 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1685 }
1686
TEST_P(HeapprofdEndToEnd,ConcurrentSession)1687 TEST_P(HeapprofdEndToEnd, ConcurrentSession) {
1688 constexpr size_t kAllocSize = 1024;
1689 constexpr size_t kSamplingInterval = 1;
1690
1691 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1692 const uint64_t pid = static_cast<uint64_t>(child.pid());
1693
1694 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1695 cfg->set_sampling_interval_bytes(kSamplingInterval);
1696 cfg->add_pid(pid);
1697 cfg->add_heaps(allocator_name());
1698 ContinuousDump(cfg);
1699 });
1700 trace_config.set_duration_ms(5000);
1701
1702 auto helper = GetHelper(&task_runner);
1703 helper->StartTracing(trace_config);
1704 sleep(1);
1705
1706 PERFETTO_LOG("Starting concurrent.");
1707 std::unique_ptr<TraceProcessorTestHelper> helper_concurrent(
1708 new TraceProcessorTestHelper(&task_runner));
1709 helper_concurrent->ConnectConsumer();
1710 helper_concurrent->WaitForConsumerConnect();
1711 helper_concurrent->StartTracing(trace_config);
1712
1713 ReadAndWait(helper.get());
1714 WRITE_TRACE(helper->full_trace());
1715 PrintStats(helper.get());
1716
1717 ReadAndWait(helper_concurrent.get());
1718 WRITE_TRACE(helper_concurrent->trace());
1719 PrintStats(helper_concurrent.get());
1720 KillAssertRunning(&child);
1721
1722 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1723 ValidateOnlyPID(helper.get(), pid);
1724 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1725 ValidateRejectedConcurrent(helper.get(), pid, false);
1726
1727 ValidateOnlyPID(helper_concurrent.get(), pid);
1728 ValidateRejectedConcurrent(helper_concurrent.get(), pid, true);
1729 }
1730
TEST_P(HeapprofdEndToEnd,NativeProfilingActiveAtProcessExit)1731 TEST_P(HeapprofdEndToEnd, NativeProfilingActiveAtProcessExit) {
1732 constexpr uint64_t kTestAllocSize = 128;
1733 base::Pipe start_pipe = base::Pipe::Create(base::Pipe::kBothBlock);
1734 int start_pipe_wr = *start_pipe.wr;
1735
1736 base::Subprocess child({"/proc/self/exe"});
1737 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1738 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1739 allocator_name());
1740 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1741 std::to_string(kTestAllocSize));
1742 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1743 std::to_string(0));
1744 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
1745 std::to_string(200));
1746 child.args.preserve_fds.push_back(start_pipe_wr);
1747 child.args.posix_entrypoint_for_testing = [start_pipe_wr] {
1748 PERFETTO_CHECK(PERFETTO_EINTR(write(start_pipe_wr, "1", 1)) == 1);
1749 PERFETTO_CHECK(close(start_pipe_wr) == 0 || errno == EINTR);
1750 };
1751
1752 StartAndWaitForHandshake(&child);
1753
1754 const uint64_t pid = static_cast<uint64_t>(child.pid());
1755 start_pipe.wr.reset();
1756
1757 // Construct tracing config (without starting profiling).
1758 auto helper = GetHelper(&task_runner);
1759
1760 // Wait for child to have been scheduled at least once.
1761 char buf[1] = {};
1762 ASSERT_EQ(PERFETTO_EINTR(read(*start_pipe.rd, buf, sizeof(buf))), 1);
1763 start_pipe.rd.reset();
1764
1765 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1766 cfg->set_sampling_interval_bytes(1);
1767 cfg->add_pid(pid);
1768 cfg->add_heaps(allocator_name());
1769 });
1770 trace_config.set_duration_ms(5000);
1771
1772 // Trace until child exits.
1773 helper->StartTracing(trace_config);
1774
1775 // Wait for the child and assert that it exited successfully.
1776 EXPECT_TRUE(child.Wait(30000));
1777 EXPECT_EQ(child.status(), base::Subprocess::kTerminated);
1778 EXPECT_EQ(child.returncode(), 0);
1779
1780 // Assert that we did profile the process.
1781 helper->FlushAndWait(2000);
1782 helper->DisableTracing();
1783 ReadAndWait(helper.get());
1784 WRITE_TRACE(helper->full_trace());
1785
1786 const auto& packets = helper->trace();
1787 ASSERT_GT(packets.size(), 0u);
1788 size_t profile_packets = 0;
1789 size_t samples = 0;
1790 uint64_t total_allocated = 0;
1791 for (const protos::gen::TracePacket& packet : packets) {
1792 if (packet.has_profile_packet() &&
1793 !packet.profile_packet().process_dumps().empty()) {
1794 const auto& dumps = packet.profile_packet().process_dumps();
1795 ASSERT_EQ(dumps.size(), 1u);
1796 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1797 EXPECT_EQ(dump.pid(), pid);
1798 profile_packets++;
1799 for (const auto& sample : dump.samples()) {
1800 samples++;
1801 total_allocated += sample.self_allocated();
1802 }
1803 }
1804 }
1805 EXPECT_EQ(profile_packets, 1u);
1806 EXPECT_GT(samples, 0u);
1807 EXPECT_GT(total_allocated, 0u);
1808 }
1809
1810 // On in-tree Android, we use the system heapprofd in fork or central mode.
1811 // For Linux and out-of-tree Android, we statically include a copy of
1812 // heapprofd and use that. This one does not support intercepting malloc.
1813 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
1814 #if !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1815 #error "Need to start daemons for Linux test."
1816 #endif
1817
1818 INSTANTIATE_TEST_SUITE_P(Run,
1819 HeapprofdEndToEnd,
1820 Values(std::make_tuple(TestMode::kStatic,
1821 AllocatorMode::kCustom)),
1822 TestSuffix);
1823 #elif !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1824 INSTANTIATE_TEST_SUITE_P(
1825 Run,
1826 HeapprofdEndToEnd,
1827 Values(std::make_tuple(TestMode::kCentral, AllocatorMode::kMalloc),
1828 std::make_tuple(TestMode::kCentral, AllocatorMode::kCustom)),
1829 TestSuffix);
1830 #endif
1831
1832 } // namespace
1833 } // namespace profiling
1834 } // namespace perfetto
1835