xref: /aosp_15_r20/system/extras/simpleperf/RecordReadThread_test.cpp (revision 288bf5226967eb3dac5cce6c939ccc2a7f2b4fe5)
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "RecordReadThread.h"
18 
19 #include <gmock/gmock.h>
20 #include <gtest/gtest.h>
21 
22 #include "event_type.h"
23 #include "get_test_data.h"
24 #include "record.h"
25 #include "record_equal_test.h"
26 #include "record_file.h"
27 
28 using ::testing::_;
29 using ::testing::Eq;
30 using ::testing::Return;
31 using ::testing::Truly;
32 
33 using namespace simpleperf;
34 
35 // @CddTest = 6.1/C-0-2
36 class RecordBufferTest : public ::testing::Test {
37  protected:
PushRecord(uint32_t type,size_t size)38   void PushRecord(uint32_t type, size_t size) {
39     char* p = buffer_->AllocWriteSpace(size);
40     ASSERT_NE(p, nullptr);
41     perf_event_header header;
42     header.type = type;
43     header.size = size;
44     memcpy(p, &header, sizeof(header));
45     buffer_->FinishWrite();
46   }
47 
PopRecord(uint32_t type,uint32_t size)48   void PopRecord(uint32_t type, uint32_t size) {
49     char* p = buffer_->GetCurrentRecord();
50     ASSERT_NE(p, nullptr);
51     perf_event_header header;
52     memcpy(&header, p, sizeof(header));
53     ASSERT_EQ(header.type, type);
54     ASSERT_EQ(header.size, size);
55     buffer_->MoveToNextRecord();
56   }
57 
58   std::unique_ptr<RecordBuffer> buffer_;
59 };
60 
61 // @CddTest = 6.1/C-0-2
TEST_F(RecordBufferTest,fifo)62 TEST_F(RecordBufferTest, fifo) {
63   for (size_t loop = 0; loop < 10; ++loop) {
64     buffer_.reset(new RecordBuffer(sizeof(perf_event_header) * 10));
65     size_t record_size = sizeof(perf_event_header) + loop;
66     size_t max_records_in_buffer = (buffer_->size() - 2 * record_size + 1) / record_size;
67     uint32_t write_id = 0;
68     uint32_t read_id = 0;
69     while (read_id < 100) {
70       while (write_id < 100 && write_id - read_id < max_records_in_buffer) {
71         ASSERT_NO_FATAL_FAILURE(PushRecord(write_id++, record_size));
72       }
73       ASSERT_NO_FATAL_FAILURE(PopRecord(read_id++, record_size));
74     }
75   }
76 }
77 
78 // @CddTest = 6.1/C-0-2
TEST(RecordParser,smoke)79 TEST(RecordParser, smoke) {
80   std::unique_ptr<RecordFileReader> reader =
81       RecordFileReader::CreateInstance(GetTestData(PERF_DATA_NO_UNWIND));
82   ASSERT_TRUE(reader);
83   RecordParser parser(reader->AttrSection()[0].attr);
84   auto process_record = [&](std::unique_ptr<Record> record) {
85     if (record->type() == PERF_RECORD_MMAP || record->type() == PERF_RECORD_COMM ||
86         record->type() == PERF_RECORD_FORK || record->type() == PERF_RECORD_SAMPLE) {
87       perf_event_header header;
88       memcpy(&header, record->Binary(), sizeof(header));
89       auto read_record_fn = [&](size_t pos, size_t size, void* dest) {
90         memcpy(dest, record->Binary() + pos, size);
91       };
92       size_t pos = parser.GetTimePos(header);
93       ASSERT_NE(0u, pos);
94       uint64_t time;
95       read_record_fn(pos, sizeof(time), &time);
96       ASSERT_EQ(record->Timestamp(), time);
97       if (record->type() == PERF_RECORD_SAMPLE) {
98         auto sr = static_cast<SampleRecord*>(record.get());
99         pos = parser.GetStackSizePos(read_record_fn);
100         ASSERT_NE(0u, pos);
101         uint64_t stack_size;
102         read_record_fn(pos, sizeof(stack_size), &stack_size);
103         ASSERT_EQ(sr->stack_user_data.size, stack_size);
104 
105         // Test pid pos in sample records.
106         pos = parser.GetPidPosInSampleRecord();
107         uint32_t pid;
108         read_record_fn(pos, sizeof(pid), &pid);
109         ASSERT_EQ(sr->tid_data.pid, pid);
110       }
111     }
112   };
113   ASSERT_TRUE(reader->ReadDataSection([&](std::unique_ptr<Record> record) {
114     process_record(std::move(record));
115     return !HasFatalFailure();
116   }));
117 }
118 
119 // @CddTest = 6.1/C-0-2
TEST(RecordParser,GetStackSizePos_with_PerfSampleReadType)120 TEST(RecordParser, GetStackSizePos_with_PerfSampleReadType) {
121   const EventType* type = FindEventTypeByName("cpu-clock");
122   ASSERT_TRUE(type != nullptr);
123   perf_event_attr event_attr = CreateDefaultPerfEventAttr(*type);
124   event_attr.sample_type = PERF_SAMPLE_READ | PERF_SAMPLE_STACK_USER;
125   event_attr.read_format =
126       PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
127   uint64_t nr = 10;
128   RecordParser parser(event_attr);
129   size_t pos =
130       parser.GetStackSizePos([&](size_t, size_t size, void* dest) { memcpy(dest, &nr, size); });
131   ASSERT_EQ(pos, sizeof(perf_event_header) + 4 * sizeof(uint64_t));
132 
133   event_attr.read_format |= PERF_FORMAT_GROUP;
134   RecordParser parser2(event_attr);
135   pos = parser2.GetStackSizePos([&](size_t, size_t size, void* dest) { memcpy(dest, &nr, size); });
136   ASSERT_EQ(pos, sizeof(perf_event_header) + (nr * 2 + 3) * sizeof(uint64_t));
137 }
138 
139 struct MockEventFd : public EventFd {
MockEventFdMockEventFd140   MockEventFd(const perf_event_attr& attr, int cpu, char* buffer, size_t buffer_size,
141               bool mock_aux_buffer)
142       : EventFd(attr, -1, "", 0, cpu) {
143     mmap_data_buffer_ = buffer;
144     mmap_data_buffer_size_ = buffer_size;
145     if (mock_aux_buffer) {
146       aux_buffer_size_ = 1;  // Make HasAuxBuffer() return true.
147     }
148   }
149 
150   MOCK_METHOD2(CreateMappedBuffer, bool(size_t, bool));
151   MOCK_METHOD0(DestroyMappedBuffer, void());
152   MOCK_METHOD2(StartPolling, bool(IOEventLoop&, const std::function<bool()>&));
153   MOCK_METHOD0(StopPolling, bool());
154   MOCK_METHOD1(GetAvailableMmapDataSize, size_t(size_t&));
155   MOCK_METHOD1(DiscardMmapData, void(size_t));
156 
157   MOCK_METHOD2(CreateAuxBuffer, bool(size_t, bool));
158   MOCK_METHOD0(DestroyAuxBuffer, void());
159   MOCK_METHOD4(GetAvailableAuxData, uint64_t(char**, size_t*, char**, size_t*));
160   MOCK_METHOD1(DiscardAuxData, void(size_t));
161 };
162 
CreateFakeEventAttr()163 static perf_event_attr CreateFakeEventAttr() {
164   const EventType* type = FindEventTypeByName("cpu-clock");
165   CHECK(type != nullptr);
166   return CreateDefaultPerfEventAttr(*type);
167 }
168 
CreateFakeRecords(const perf_event_attr & attr,size_t record_count,size_t stack_size,size_t dyn_stack_size)169 static std::vector<std::unique_ptr<Record>> CreateFakeRecords(const perf_event_attr& attr,
170                                                               size_t record_count,
171                                                               size_t stack_size,
172                                                               size_t dyn_stack_size) {
173   std::vector<std::unique_ptr<Record>> records;
174   for (size_t i = 0; i < record_count; ++i) {
175     SampleRecord* r = new SampleRecord(attr, i, i + 1, i + 2, i + 3, i + 4, i + 5, i + 6, {}, {},
176                                        std::vector<char>(stack_size), dyn_stack_size);
177     records.emplace_back(r);
178   }
179   return records;
180 }
181 
AlignToPowerOfTwo(size_t value)182 static size_t AlignToPowerOfTwo(size_t value) {
183   size_t result = 1;
184   while (result < value) {
185     result <<= 1;
186   }
187   return result;
188 }
189 
SetArg(size_t value)190 static inline std::function<bool(size_t&)> SetArg(size_t value) {
191   return [value](size_t& arg) {
192     arg = value;
193     return true;
194   };
195 }
196 
197 // @CddTest = 6.1/C-0-2
TEST(KernelRecordReader,smoke)198 TEST(KernelRecordReader, smoke) {
199   // 1. Create fake records.
200   perf_event_attr attr = CreateFakeEventAttr();
201   std::vector<std::unique_ptr<Record>> records = CreateFakeRecords(attr, 10, 0, 0);
202   // 2. Create a buffer whose size is power of two.
203   size_t data_size = records.size() * records[0]->size();
204   std::vector<char> buffer(AlignToPowerOfTwo(data_size));
205   // 3. Copy record data into the buffer. Since a record in a kernel buffer can be wrapped around
206   // to the beginning of the buffer, create the case in the first record.
207   size_t data_pos = buffer.size() - 4;
208   memcpy(&buffer[data_pos], records[0]->Binary(), 4);
209   memcpy(&buffer[0], records[0]->Binary() + 4, records[0]->size() - 4);
210   size_t pos = records[0]->size() - 4;
211   for (size_t i = 1; i < records.size(); ++i) {
212     memcpy(&buffer[pos], records[i]->Binary(), records[i]->size());
213     pos += records[i]->size();
214   }
215   // Read records using KernelRecordReader.
216   MockEventFd event_fd(attr, 0, buffer.data(), buffer.size(), false);
217 
218   EXPECT_CALL(event_fd, GetAvailableMmapDataSize(Truly(SetArg(data_pos))))
219       .Times(1)
220       .WillOnce(Return(data_size));
221   EXPECT_CALL(event_fd, DiscardMmapData(Eq(data_size))).Times(1);
222   KernelRecordReader reader(&event_fd);
223   RecordParser parser(attr);
224   ASSERT_TRUE(reader.GetDataFromKernelBuffer());
225   for (size_t i = 0; i < records.size(); ++i) {
226     ASSERT_TRUE(reader.MoveToNextRecord(parser));
227     ASSERT_EQ(reader.RecordHeader().type, records[i]->type());
228     ASSERT_EQ(reader.RecordHeader().size, records[i]->size());
229     ASSERT_EQ(reader.RecordTime(), records[i]->Timestamp());
230     std::vector<char> data(reader.RecordHeader().size);
231     reader.ReadRecord(0, data.size(), &data[0]);
232     ASSERT_EQ(0, memcmp(&data[0], records[i]->Binary(), records[i]->size()));
233   }
234   ASSERT_FALSE(reader.MoveToNextRecord(parser));
235 }
236 
237 // @CddTest = 6.1/C-0-2
238 class RecordReadThreadTest : public ::testing::Test {
239  protected:
CreateFakeEventFds(const perf_event_attr & attr,size_t event_fd_count)240   std::vector<EventFd*> CreateFakeEventFds(const perf_event_attr& attr, size_t event_fd_count) {
241     size_t records_per_fd = records_.size() / event_fd_count;
242     buffers_.clear();
243     buffers_.resize(event_fd_count);
244     for (size_t i = 0; i < records_.size(); ++i) {
245       std::vector<char>& buffer = buffers_[i % event_fd_count];
246       buffer.insert(buffer.end(), records_[i]->Binary(),
247                     records_[i]->Binary() + records_[i]->size());
248     }
249     size_t data_size = records_per_fd * records_[0]->size();
250     size_t buffer_size = AlignToPowerOfTwo(data_size);
251     for (auto& buffer : buffers_) {
252       buffer.resize(buffer_size);
253     }
254     event_fds_.resize(event_fd_count);
255     for (size_t i = 0; i < event_fd_count; ++i) {
256       event_fds_[i].reset(new MockEventFd(attr, i, buffers_[i].data(), buffer_size, false));
257       EXPECT_CALL(*event_fds_[i], CreateMappedBuffer(_, _)).Times(1).WillOnce(Return(true));
258       EXPECT_CALL(*event_fds_[i], StartPolling(_, _)).Times(1).WillOnce(Return(true));
259       EXPECT_CALL(*event_fds_[i], GetAvailableMmapDataSize(Truly(SetArg(0))))
260           .Times(1)
261           .WillOnce(Return(data_size));
262       EXPECT_CALL(*event_fds_[i], DiscardMmapData(Eq(data_size))).Times(1);
263       EXPECT_CALL(*event_fds_[i], StopPolling()).Times(1).WillOnce(Return(true));
264       EXPECT_CALL(*event_fds_[i], DestroyMappedBuffer()).Times(1);
265       EXPECT_CALL(*event_fds_[i], DestroyAuxBuffer()).Times(1);
266     }
267     std::vector<EventFd*> result;
268     for (auto& fd : event_fds_) {
269       result.push_back(fd.get());
270     }
271     return result;
272   }
273 
274   std::vector<std::unique_ptr<Record>> records_;
275   std::vector<std::vector<char>> buffers_;
276   std::vector<std::unique_ptr<MockEventFd>> event_fds_;
277 };
278 
279 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,handle_cmds)280 TEST_F(RecordReadThreadTest, handle_cmds) {
281   perf_event_attr attr = CreateFakeEventAttr();
282   records_ = CreateFakeRecords(attr, 2, 0, 0);
283   std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 2);
284   RecordReadThread thread(128 * 1024, event_fds[0]->attr(), 1, 1, 0);
285   IOEventLoop loop;
286   bool has_notify = false;
287   auto callback = [&]() {
288     has_notify = true;
289     return loop.ExitLoop();
290   };
291   ASSERT_TRUE(thread.RegisterDataCallback(loop, callback));
292   ASSERT_TRUE(thread.AddEventFds(event_fds));
293   ASSERT_TRUE(thread.SyncKernelBuffer());
294   ASSERT_TRUE(loop.RunLoop());
295   ASSERT_TRUE(has_notify);
296   ASSERT_TRUE(thread.GetRecord());
297   ASSERT_TRUE(thread.RemoveEventFds(event_fds));
298   ASSERT_TRUE(thread.StopReadThread());
299 }
300 
301 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,read_records)302 TEST_F(RecordReadThreadTest, read_records) {
303   perf_event_attr attr = CreateFakeEventAttr();
304   RecordReadThread thread(128 * 1024, attr, 1, 1, 0);
305   IOEventLoop loop;
306   size_t record_index;
307   auto callback = [&]() {
308     while (true) {
309       std::unique_ptr<Record> r = thread.GetRecord();
310       if (!r) {
311         break;
312       }
313       std::unique_ptr<Record>& expected = records_[record_index++];
314       if (r->size() != expected->size() ||
315           memcmp(r->Binary(), expected->Binary(), r->size()) != 0) {
316         return false;
317       }
318     }
319     return loop.ExitLoop();
320   };
321   ASSERT_TRUE(thread.RegisterDataCallback(loop, callback));
322   for (size_t event_fd_count = 1; event_fd_count < 10; ++event_fd_count) {
323     records_ = CreateFakeRecords(attr, event_fd_count * 10, 0, 0);
324     std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, event_fd_count);
325     record_index = 0;
326     ASSERT_TRUE(thread.AddEventFds(event_fds));
327     ASSERT_TRUE(thread.SyncKernelBuffer());
328     ASSERT_TRUE(loop.RunLoop());
329     ASSERT_EQ(record_index, records_.size());
330     ASSERT_TRUE(thread.RemoveEventFds(event_fds));
331   }
332 }
333 
334 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,process_sample_record)335 TEST_F(RecordReadThreadTest, process_sample_record) {
336   perf_event_attr attr = CreateFakeEventAttr();
337   attr.sample_type |= PERF_SAMPLE_STACK_USER;
338   attr.sample_stack_user = 64 * 1024;
339   size_t record_buffer_size = 128 * 1024;
340   RecordReadThread thread(record_buffer_size, attr, 1, 1, 0);
341   IOEventLoop loop;
342   ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; }));
343 
344   auto read_record = [&](std::unique_ptr<Record>& r) {
345     std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1);
346     ASSERT_TRUE(thread.AddEventFds(event_fds));
347     ASSERT_TRUE(thread.SyncKernelBuffer());
348     ASSERT_TRUE(thread.RemoveEventFds(event_fds));
349     r = thread.GetRecord();
350   };
351 
352   // When the free space in record buffer is above low level, only invalid stack data in sample
353   // records is removed.
354   thread.SetBufferLevels(0, 0);
355   records_ = CreateFakeRecords(attr, 1, 8192, 8192);
356   std::unique_ptr<Record> r;
357   read_record(r);
358   ASSERT_TRUE(r);
359   SampleRecord* sr = static_cast<SampleRecord*>(r.get());
360   ASSERT_EQ(sr->stack_user_data.size, 8192u);
361   ASSERT_EQ(sr->stack_user_data.dyn_size, 8192u);
362   records_ = CreateFakeRecords(attr, 1, 8192, 4096);
363   read_record(r);
364   ASSERT_TRUE(r);
365   sr = static_cast<SampleRecord*>(r.get());
366   ASSERT_EQ(sr->stack_user_data.size, 4096u);
367   ASSERT_EQ(sr->stack_user_data.dyn_size, 4096u);
368 
369   // When the free space in record buffer is below low level but above critical level, only
370   // 1K stack data in sample records is left.
371   thread.SetBufferLevels(record_buffer_size, 0);
372   read_record(r);
373   ASSERT_TRUE(r);
374   sr = static_cast<SampleRecord*>(r.get());
375   ASSERT_EQ(sr->stack_user_data.size, 1024u);
376   ASSERT_EQ(sr->stack_user_data.dyn_size, 1024u);
377 
378   // When the free space in record buffer is below critical level, sample records are dropped.
379   thread.SetBufferLevels(record_buffer_size, record_buffer_size);
380   read_record(r);
381   ASSERT_FALSE(r);
382   ASSERT_EQ(thread.GetStat().userspace_lost_samples, 1u);
383   ASSERT_EQ(thread.GetStat().userspace_lost_non_samples, 0u);
384   ASSERT_EQ(thread.GetStat().userspace_truncated_stack_samples, 1u);
385 }
386 
387 // Test that the data notification exists until the RecordBuffer is empty. So we can read all
388 // records even if reading one record at a time.
389 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,has_data_notification_until_buffer_empty)390 TEST_F(RecordReadThreadTest, has_data_notification_until_buffer_empty) {
391   perf_event_attr attr = CreateFakeEventAttr();
392   RecordReadThread thread(128 * 1024, attr, 1, 1, 0);
393   IOEventLoop loop;
394   size_t record_index = 0;
395   auto read_one_record = [&]() {
396     std::unique_ptr<Record> r = thread.GetRecord();
397     if (!r) {
398       return loop.ExitLoop();
399     }
400     std::unique_ptr<Record>& expected = records_[record_index++];
401     if (r->size() != expected->size() || memcmp(r->Binary(), expected->Binary(), r->size()) != 0) {
402       return false;
403     }
404     return true;
405   };
406   ASSERT_TRUE(thread.RegisterDataCallback(loop, read_one_record));
407   records_ = CreateFakeRecords(attr, 2, 0, 0);
408   std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1);
409   ASSERT_TRUE(thread.AddEventFds(event_fds));
410   ASSERT_TRUE(thread.SyncKernelBuffer());
411   ASSERT_TRUE(loop.RunLoop());
412   ASSERT_EQ(record_index, records_.size());
413   ASSERT_TRUE(thread.RemoveEventFds(event_fds));
414 }
415 
416 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,no_truncated_samples)417 TEST_F(RecordReadThreadTest, no_truncated_samples) {
418   perf_event_attr attr = CreateFakeEventAttr();
419   attr.sample_type |= PERF_SAMPLE_STACK_USER;
420   attr.sample_stack_user = 64 * 1024;
421   RecordReadThread thread(128 * 1024, attr, 1, 1, 0, false);
422   IOEventLoop loop;
423   ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; }));
424   const size_t total_samples = 100;
425   records_ = CreateFakeRecords(attr, total_samples, 8 * 1024, 8 * 1024);
426   std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1);
427   ASSERT_TRUE(thread.AddEventFds(event_fds));
428   ASSERT_TRUE(thread.SyncKernelBuffer());
429   ASSERT_TRUE(thread.RemoveEventFds(event_fds));
430   size_t received_samples = 0;
431   while (thread.GetRecord()) {
432     received_samples++;
433   }
434   ASSERT_GT(received_samples, 0u);
435   ASSERT_GT(thread.GetStat().userspace_lost_samples, 0u);
436   ASSERT_EQ(thread.GetStat().userspace_lost_samples, total_samples - received_samples);
437   ASSERT_EQ(thread.GetStat().userspace_truncated_stack_samples, 0u);
438 }
439 
440 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,exclude_perf)441 TEST_F(RecordReadThreadTest, exclude_perf) {
442   perf_event_attr attr = CreateFakeEventAttr();
443   attr.sample_type |= PERF_SAMPLE_STACK_USER;
444   size_t stack_size = 1024;
445   attr.sample_stack_user = stack_size;
446   records_.emplace_back(new SampleRecord(attr, 0, 1, getpid(), 3, 4, 5, 6, {}, {},
447                                          std::vector<char>(stack_size), stack_size));
448   records_.emplace_back(new SampleRecord(attr, 0, 1, getpid() + 1, 3, 4, 5, 6, {}, {},
449                                          std::vector<char>(stack_size), stack_size));
450 
451   auto read_records = [&](RecordReadThread& thread, std::vector<std::unique_ptr<Record>>& records) {
452     records.clear();
453     std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1);
454     ASSERT_TRUE(thread.AddEventFds(event_fds));
455     ASSERT_TRUE(thread.SyncKernelBuffer());
456     ASSERT_TRUE(thread.RemoveEventFds(event_fds));
457     while (auto r = thread.GetRecord()) {
458       records.emplace_back(std::move(r));
459     }
460   };
461 
462   // By default, no samples are excluded.
463   RecordReadThread thread(128 * 1024, attr, 1, 1, 0);
464   IOEventLoop loop;
465   ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; }));
466   std::vector<std::unique_ptr<Record>> received_records;
467   read_records(thread, received_records);
468   ASSERT_EQ(received_records.size(), 2);
469   CheckRecordEqual(*received_records[0], *records_[0]);
470   CheckRecordEqual(*received_records[1], *records_[1]);
471 
472   // With exclude_perf, the first sample is excluded.
473   RecordReadThread thread2(128 * 1024, attr, 1, 1, 0, true, true);
474   ASSERT_TRUE(thread2.RegisterDataCallback(loop, []() { return true; }));
475   read_records(thread2, received_records);
476   ASSERT_EQ(received_records.size(), 1);
477   CheckRecordEqual(*received_records[0], *records_[1]);
478 }
479 
480 struct FakeAuxData {
481   std::vector<char> buf1;
482   std::vector<char> buf2;
483   std::vector<char> pad;
484   bool lost;
485 
FakeAuxDataFakeAuxData486   FakeAuxData(size_t buf1_size, size_t buf2_size, char c, size_t pad_size, bool lost)
487       : buf1(buf1_size, c), buf2(buf2_size, c), pad(pad_size, 0), lost(lost) {}
488 };
489 
490 // @CddTest = 6.1/C-0-2
TEST_F(RecordReadThreadTest,read_aux_data)491 TEST_F(RecordReadThreadTest, read_aux_data) {
492   ScopedEventTypes scoped_types("cs-etm,0,0");
493   const EventType* type = FindEventTypeByName("cs-etm");
494   ASSERT_TRUE(type != nullptr);
495   std::vector<FakeAuxData> aux_data;
496   aux_data.emplace_back(40, 0, '0', 0, false);   // one buffer
497   aux_data.emplace_back(40, 40, '1', 0, false);  // two buffers
498   aux_data.emplace_back(36, 0, '2', 4, false);   // one buffer needs padding to 8 bytes alignment
499   // one buffer too big to fit in record buffer, failing at checking free size
500   aux_data.emplace_back(1024, 0, '3', 0, true);
501   // one buffer too big to fit in record buffer, failing at AllocWriteSpace()
502   aux_data.emplace_back(800, 0, '4', 0, true);
503   size_t test_index = 0;
504 
505   auto SetBuf1 = [&](char** buf1) {
506     *buf1 = aux_data[test_index].buf1.data();
507     return true;
508   };
509   auto SetSize1 = [&](size_t* size1) {
510     *size1 = aux_data[test_index].buf1.size();
511     return true;
512   };
513   auto SetBuf2 = [&](char** buf2) {
514     *buf2 = aux_data[test_index].buf2.data();
515     return true;
516   };
517   auto SetSize2 = [&](size_t* size2) {
518     *size2 = aux_data[test_index].buf2.size();
519     return true;
520   };
521   auto CheckDiscardSize = [&](size_t size) {
522     return size == aux_data[test_index].buf1.size() + aux_data[test_index].buf2.size();
523   };
524 
525   const size_t AUX_BUFFER_SIZE = 4096;
526 
527   perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
528   MockEventFd fd(attr, 0, nullptr, 1, true);
529   EXPECT_CALL(fd, CreateMappedBuffer(_, _)).Times(1).WillOnce(Return(true));
530   EXPECT_CALL(fd, CreateAuxBuffer(Eq(AUX_BUFFER_SIZE), _)).Times(1).WillOnce(Return(true));
531   EXPECT_CALL(fd, StartPolling(_, _)).Times(1).WillOnce(Return(true));
532   EXPECT_CALL(fd, GetAvailableMmapDataSize(_)).Times(aux_data.size()).WillRepeatedly(Return(0));
533   EXPECT_CALL(fd,
534               GetAvailableAuxData(Truly(SetBuf1), Truly(SetSize1), Truly(SetBuf2), Truly(SetSize2)))
535       .Times(aux_data.size());
536   EXPECT_CALL(fd, DiscardAuxData(Truly(CheckDiscardSize))).Times(aux_data.size());
537   EXPECT_CALL(fd, StopPolling()).Times(1).WillOnce(Return(true));
538   EXPECT_CALL(fd, DestroyMappedBuffer()).Times(1);
539   EXPECT_CALL(fd, DestroyAuxBuffer()).Times(1);
540 
541   RecordReadThread thread(1024, attr, 1, 1, AUX_BUFFER_SIZE);
542   IOEventLoop loop;
543   ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; }));
544   ASSERT_TRUE(thread.AddEventFds({&fd}));
545   for (; test_index < aux_data.size(); ++test_index) {
546     ASSERT_TRUE(thread.SyncKernelBuffer());
547     std::unique_ptr<Record> r = thread.GetRecord();
548     if (aux_data[test_index].lost) {
549       ASSERT_TRUE(r == nullptr);
550       continue;
551     }
552     ASSERT_TRUE(r);
553     ASSERT_EQ(r->type(), PERF_RECORD_AUXTRACE);
554     auto auxtrace = static_cast<AuxTraceRecord*>(r.get());
555     auto& expected = aux_data[test_index];
556     ASSERT_EQ(auxtrace->data->aux_size,
557               expected.buf1.size() + expected.buf2.size() + expected.pad.size());
558     const char* p = auxtrace->location.addr;
559     ASSERT_TRUE(p != nullptr);
560     if (!expected.buf1.empty()) {
561       ASSERT_EQ(memcmp(p, expected.buf1.data(), expected.buf1.size()), 0);
562       p += expected.buf1.size();
563     }
564     if (!expected.buf2.empty()) {
565       ASSERT_EQ(memcmp(p, expected.buf2.data(), expected.buf2.size()), 0);
566       p += expected.buf2.size();
567     }
568     if (!expected.pad.empty()) {
569       ASSERT_EQ(memcmp(p, expected.pad.data(), expected.pad.size()), 0);
570     }
571   }
572   ASSERT_TRUE(thread.GetRecord() == nullptr);
573   ASSERT_TRUE(thread.RemoveEventFds({&fd}));
574   size_t aux_data_size = 0;
575   size_t lost_aux_data_size = 0;
576   for (auto& aux : aux_data) {
577     if (aux.lost) {
578       lost_aux_data_size += aux.buf1.size() + aux.buf2.size();
579     } else {
580       aux_data_size += aux.buf1.size() + aux.buf2.size();
581     }
582   }
583   ASSERT_EQ(aux_data_size, thread.GetStat().aux_data_size);
584   ASSERT_EQ(lost_aux_data_size, thread.GetStat().lost_aux_data_size);
585 }
586