xref: /aosp_15_r20/system/extras/memory_replay/TraceBenchmark.cpp (revision 288bf5226967eb3dac5cce6c939ccc2a7f2b4fe5)
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <err.h>
18 #include <inttypes.h>
19 #include <malloc.h>
20 #include <sched.h>
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/mman.h>
26 #include <unistd.h>
27 
28 #include <algorithm>
29 #include <stack>
30 #include <string>
31 #include <unordered_map>
32 #include <vector>
33 
34 #include <android-base/file.h>
35 #include <android-base/strings.h>
36 #include <benchmark/benchmark.h>
37 
38 #include <memory_trace/MemoryTrace.h>
39 
40 #include "File.h"
41 #include "Utils.h"
42 
43 struct TraceDataType {
44   memory_trace::Entry* entries = nullptr;
45   size_t num_entries = 0;
46   void** ptrs = nullptr;
47   size_t num_ptrs = 0;
48 };
49 
GetIndex(std::stack<size_t> & free_indices,size_t * max_index)50 static size_t GetIndex(std::stack<size_t>& free_indices, size_t* max_index) {
51   if (free_indices.empty()) {
52     return (*max_index)++;
53   }
54   size_t index = free_indices.top();
55   free_indices.pop();
56   return index;
57 }
58 
FreePtrs(TraceDataType * trace_data)59 static void FreePtrs(TraceDataType* trace_data) {
60   for (size_t i = 0; i < trace_data->num_ptrs; i++) {
61     void* ptr = trace_data->ptrs[i];
62     if (ptr != nullptr) {
63       free(ptr);
64       trace_data->ptrs[i] = nullptr;
65     }
66   }
67 }
68 
FreeTraceData(TraceDataType * trace_data)69 static void FreeTraceData(TraceDataType* trace_data) {
70   if (trace_data->ptrs == nullptr) {
71     return;
72   }
73 
74   munmap(trace_data->ptrs, sizeof(void*) * trace_data->num_ptrs);
75   FreeEntries(trace_data->entries, trace_data->num_entries);
76 }
77 
GetTraceData(const std::string & filename,TraceDataType * trace_data)78 static void GetTraceData(const std::string& filename, TraceDataType* trace_data) {
79   // Only keep last trace encountered cached.
80   static std::string cached_filename;
81   static TraceDataType cached_trace_data;
82   if (cached_filename == filename) {
83     *trace_data = cached_trace_data;
84     return;
85   } else {
86     FreeTraceData(&cached_trace_data);
87   }
88 
89   cached_filename = filename;
90   GetUnwindInfo(filename.c_str(), &trace_data->entries, &trace_data->num_entries);
91 
92   // This loop will convert the ptr field into an index into the ptrs array.
93   // Creating this index allows the trace run to quickly store or retrieve the
94   // allocation.
95   // For free, the ptr field will be index + one, where a zero represents
96   // a free(nullptr) call.
97   // For realloc, the old_pointer field will be index + one, where a zero
98   // represents a realloc(nullptr, XX).
99   trace_data->num_ptrs = 0;
100   std::stack<size_t> free_indices;
101   std::unordered_map<uint64_t, size_t> ptr_to_index;
102   for (size_t i = 0; i < trace_data->num_entries; i++) {
103     memory_trace::Entry* entry = &trace_data->entries[i];
104     switch (entry->type) {
105       case memory_trace::MALLOC:
106       case memory_trace::CALLOC:
107       case memory_trace::MEMALIGN: {
108         size_t idx = GetIndex(free_indices, &trace_data->num_ptrs);
109         ptr_to_index[entry->ptr] = idx;
110         entry->ptr = idx;
111         break;
112       }
113       case memory_trace::REALLOC: {
114         if (entry->u.old_ptr != 0) {
115           auto idx_entry = ptr_to_index.find(entry->u.old_ptr);
116           if (idx_entry == ptr_to_index.end()) {
117             errx(1, "File Error: Failed to find realloc pointer %" PRIx64, entry->u.old_ptr);
118           }
119           size_t old_pointer_idx = idx_entry->second;
120           free_indices.push(old_pointer_idx);
121           ptr_to_index.erase(idx_entry);
122           entry->u.old_ptr = old_pointer_idx + 1;
123         }
124         size_t idx = GetIndex(free_indices, &trace_data->num_ptrs);
125         ptr_to_index[entry->ptr] = idx;
126         entry->ptr = idx;
127         break;
128       }
129       case memory_trace::FREE:
130         if (entry->ptr != 0) {
131           auto idx_entry = ptr_to_index.find(entry->ptr);
132           if (idx_entry == ptr_to_index.end()) {
133             errx(1, "File Error: Unable to find free pointer %" PRIx64, entry->ptr);
134           }
135           free_indices.push(idx_entry->second);
136           entry->ptr = idx_entry->second + 1;
137           ptr_to_index.erase(idx_entry);
138         }
139         break;
140       case memory_trace::THREAD_DONE:
141         break;
142     }
143   }
144   void* map = mmap(nullptr, sizeof(void*) * trace_data->num_ptrs, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
145   if (map == MAP_FAILED) {
146     err(1, "mmap failed");
147   }
148   trace_data->ptrs = reinterpret_cast<void**>(map);
149 
150   cached_trace_data = *trace_data;
151 }
152 
RunTrace(benchmark::State & state,TraceDataType * trace_data)153 static void RunTrace(benchmark::State& state, TraceDataType* trace_data) {
154   int pagesize = getpagesize();
155   uint64_t total_ns = 0;
156   uint64_t start_ns;
157   void** ptrs = trace_data->ptrs;
158   for (size_t i = 0; i < trace_data->num_entries; i++) {
159     void* ptr;
160     const memory_trace::Entry& entry = trace_data->entries[i];
161     switch (entry.type) {
162       case memory_trace::MALLOC:
163         start_ns = Nanotime();
164         ptr = malloc(entry.size);
165         if (ptr == nullptr) {
166           errx(1, "malloc returned nullptr");
167         }
168         MakeAllocationResident(ptr, entry.size, pagesize);
169         total_ns += Nanotime() - start_ns;
170 
171         if (ptrs[entry.ptr] != nullptr) {
172           errx(1, "Internal Error: malloc pointer being replaced is not nullptr");
173         }
174         ptrs[entry.ptr] = ptr;
175         break;
176 
177       case memory_trace::CALLOC:
178         start_ns = Nanotime();
179         ptr = calloc(entry.u.n_elements, entry.size);
180         if (ptr == nullptr) {
181           errx(1, "calloc returned nullptr");
182         }
183         MakeAllocationResident(ptr, entry.size, pagesize);
184         total_ns += Nanotime() - start_ns;
185 
186         if (ptrs[entry.ptr] != nullptr) {
187           errx(1, "Internal Error: calloc pointer being replaced is not nullptr");
188         }
189         ptrs[entry.ptr] = ptr;
190         break;
191 
192       case memory_trace::MEMALIGN:
193         start_ns = Nanotime();
194         ptr = memalign(entry.u.align, entry.size);
195         if (ptr == nullptr) {
196           errx(1, "memalign returned nullptr");
197         }
198         MakeAllocationResident(ptr, entry.size, pagesize);
199         total_ns += Nanotime() - start_ns;
200 
201         if (ptrs[entry.ptr] != nullptr) {
202           errx(1, "Internal Error: memalign pointer being replaced is not nullptr");
203         }
204         ptrs[entry.ptr] = ptr;
205         break;
206 
207       case memory_trace::REALLOC:
208         start_ns = Nanotime();
209         if (entry.u.old_ptr == 0) {
210           ptr = realloc(nullptr, entry.size);
211         } else {
212           ptr = realloc(ptrs[entry.u.old_ptr - 1], entry.size);
213           ptrs[entry.u.old_ptr - 1] = nullptr;
214         }
215         if (entry.size > 0) {
216           if (ptr == nullptr) {
217             errx(1, "realloc returned nullptr");
218           }
219           MakeAllocationResident(ptr, entry.size, pagesize);
220         }
221         total_ns += Nanotime() - start_ns;
222 
223         if (ptrs[entry.ptr] != nullptr) {
224           errx(1, "Internal Error: realloc pointer being replaced is not nullptr");
225         }
226         ptrs[entry.ptr] = ptr;
227         break;
228 
229       case memory_trace::FREE:
230         if (entry.ptr != 0) {
231           ptr = ptrs[entry.ptr - 1];
232           ptrs[entry.ptr - 1] = nullptr;
233         } else {
234           ptr = nullptr;
235         }
236         start_ns = Nanotime();
237         free(ptr);
238         total_ns += Nanotime() - start_ns;
239         break;
240 
241       case memory_trace::THREAD_DONE:
242         break;
243     }
244   }
245   state.SetIterationTime(total_ns / double(1000000000.0));
246 
247   FreePtrs(trace_data);
248 }
249 
250 // Run a trace as if all of the allocations occurred in a single thread.
251 // This is not completely realistic, but it is a possible worst case that
252 // could happen in an app.
BenchmarkTrace(benchmark::State & state,const char * filename,bool enable_decay_time)253 static void BenchmarkTrace(benchmark::State& state, const char* filename,
254                            [[maybe_unused]] bool enable_decay_time) {
255 #if defined(__BIONIC__)
256   if (enable_decay_time) {
257     mallopt(M_DECAY_TIME, 1);
258   } else {
259     mallopt(M_DECAY_TIME, 0);
260   }
261 #endif
262   std::string full_filename(android::base::GetExecutableDirectory() + "/traces/" + filename);
263 
264   TraceDataType trace_data;
265   GetTraceData(full_filename, &trace_data);
266 
267   for (auto _ : state) {
268     RunTrace(state, &trace_data);
269   }
270 
271   // Don't free the trace_data, it is cached. The last set of trace data
272   // will be leaked away.
273 }
274 
275 #define BENCH_OPTIONS                 \
276   UseManualTime()                     \
277       ->Unit(benchmark::kMicrosecond) \
278       ->MinTime(15.0)                 \
279       ->Repetitions(4)                \
280       ->ReportAggregatesOnly(true)
281 
BM_angry_birds2_default(benchmark::State & state)282 static void BM_angry_birds2_default(benchmark::State& state) {
283   BenchmarkTrace(state, "angry_birds2.zip", true);
284 }
285 BENCHMARK(BM_angry_birds2_default)->BENCH_OPTIONS;
286 
287 #if defined(__BIONIC__)
BM_angry_birds2_no_decay(benchmark::State & state)288 static void BM_angry_birds2_no_decay(benchmark::State& state) {
289   BenchmarkTrace(state, "angry_birds2.zip", false);
290 }
291 BENCHMARK(BM_angry_birds2_no_decay)->BENCH_OPTIONS;
292 #endif
293 
BM_camera_default(benchmark::State & state)294 static void BM_camera_default(benchmark::State& state) {
295   BenchmarkTrace(state, "camera.zip", true);
296 }
297 BENCHMARK(BM_camera_default)->BENCH_OPTIONS;
298 
299 #if defined(__BIONIC__)
BM_camera_no_decay(benchmark::State & state)300 static void BM_camera_no_decay(benchmark::State& state) {
301   BenchmarkTrace(state, "camera.zip", false);
302 }
303 BENCHMARK(BM_camera_no_decay)->BENCH_OPTIONS;
304 #endif
305 
BM_candy_crush_saga_default(benchmark::State & state)306 static void BM_candy_crush_saga_default(benchmark::State& state) {
307   BenchmarkTrace(state, "candy_crush_saga.zip", true);
308 }
309 BENCHMARK(BM_candy_crush_saga_default)->BENCH_OPTIONS;
310 
311 #if defined(__BIONIC__)
BM_candy_crush_saga_no_decay(benchmark::State & state)312 static void BM_candy_crush_saga_no_decay(benchmark::State& state) {
313   BenchmarkTrace(state, "candy_crush_saga.zip", false);
314 }
315 BENCHMARK(BM_candy_crush_saga_no_decay)->BENCH_OPTIONS;
316 #endif
317 
BM_gmail_default(benchmark::State & state)318 void BM_gmail_default(benchmark::State& state) {
319   BenchmarkTrace(state, "gmail.zip", true);
320 }
321 BENCHMARK(BM_gmail_default)->BENCH_OPTIONS;
322 
323 #if defined(__BIONIC__)
BM_gmail_no_decay(benchmark::State & state)324 void BM_gmail_no_decay(benchmark::State& state) {
325   BenchmarkTrace(state, "gmail.zip", false);
326 }
327 BENCHMARK(BM_gmail_no_decay)->BENCH_OPTIONS;
328 #endif
329 
BM_maps_default(benchmark::State & state)330 void BM_maps_default(benchmark::State& state) {
331   BenchmarkTrace(state, "maps.zip", true);
332 }
333 BENCHMARK(BM_maps_default)->BENCH_OPTIONS;
334 
335 #if defined(__BIONIC__)
BM_maps_no_decay(benchmark::State & state)336 void BM_maps_no_decay(benchmark::State& state) {
337   BenchmarkTrace(state, "maps.zip", false);
338 }
339 BENCHMARK(BM_maps_no_decay)->BENCH_OPTIONS;
340 #endif
341 
BM_photos_default(benchmark::State & state)342 void BM_photos_default(benchmark::State& state) {
343   BenchmarkTrace(state, "photos.zip", true);
344 }
345 BENCHMARK(BM_photos_default)->BENCH_OPTIONS;
346 
347 #if defined(__BIONIC__)
BM_photos_no_decay(benchmark::State & state)348 void BM_photos_no_decay(benchmark::State& state) {
349   BenchmarkTrace(state, "photos.zip", false);
350 }
351 BENCHMARK(BM_photos_no_decay)->BENCH_OPTIONS;
352 #endif
353 
BM_pubg_default(benchmark::State & state)354 void BM_pubg_default(benchmark::State& state) {
355   BenchmarkTrace(state, "pubg.zip", true);
356 }
357 BENCHMARK(BM_pubg_default)->BENCH_OPTIONS;
358 
359 #if defined(__BIONIC__)
BM_pubg_no_decay(benchmark::State & state)360 void BM_pubg_no_decay(benchmark::State& state) {
361   BenchmarkTrace(state, "pubg.zip", false);
362 }
363 BENCHMARK(BM_pubg_no_decay)->BENCH_OPTIONS;
364 #endif
365 
BM_surfaceflinger_default(benchmark::State & state)366 void BM_surfaceflinger_default(benchmark::State& state) {
367   BenchmarkTrace(state, "surfaceflinger.zip", true);
368 }
369 BENCHMARK(BM_surfaceflinger_default)->BENCH_OPTIONS;
370 
371 #if defined(__BIONIC__)
BM_surfaceflinger_no_decay(benchmark::State & state)372 void BM_surfaceflinger_no_decay(benchmark::State& state) {
373   BenchmarkTrace(state, "surfaceflinger.zip", false);
374 }
375 BENCHMARK(BM_surfaceflinger_no_decay)->BENCH_OPTIONS;
376 #endif
377 
BM_system_server_default(benchmark::State & state)378 void BM_system_server_default(benchmark::State& state) {
379   BenchmarkTrace(state, "system_server.zip", true);
380 }
381 BENCHMARK(BM_system_server_default)->BENCH_OPTIONS;
382 
383 #if defined(__BIONIC__)
BM_system_server_no_decay(benchmark::State & state)384 void BM_system_server_no_decay(benchmark::State& state) {
385   BenchmarkTrace(state, "system_server.zip", false);
386 }
387 BENCHMARK(BM_system_server_no_decay)->BENCH_OPTIONS;
388 #endif
389 
BM_systemui_default(benchmark::State & state)390 void BM_systemui_default(benchmark::State& state) {
391   BenchmarkTrace(state, "systemui.zip", true);
392 }
393 BENCHMARK(BM_systemui_default)->BENCH_OPTIONS;
394 
395 #if defined(__BIONIC__)
BM_systemui_no_decay(benchmark::State & state)396 void BM_systemui_no_decay(benchmark::State& state) {
397   BenchmarkTrace(state, "systemui.zip", false);
398 }
399 BENCHMARK(BM_systemui_no_decay)->BENCH_OPTIONS;
400 #endif
401 
BM_youtube_default(benchmark::State & state)402 void BM_youtube_default(benchmark::State& state) {
403   BenchmarkTrace(state, "youtube.zip", true);
404 }
405 BENCHMARK(BM_youtube_default)->BENCH_OPTIONS;
406 
407 #if defined(__BIONIC__)
BM_youtube_no_decay(benchmark::State & state)408 void BM_youtube_no_decay(benchmark::State& state) {
409   BenchmarkTrace(state, "youtube.zip", false);
410 }
411 BENCHMARK(BM_youtube_no_decay)->BENCH_OPTIONS;
412 #endif
413 
main(int argc,char ** argv)414 int main(int argc, char** argv) {
415   std::vector<char*> args;
416   args.push_back(argv[0]);
417 
418   // Look for the --cpu=XX option.
419   for (int i = 1; i < argc; i++) {
420     if (strncmp(argv[i], "--cpu=", 6) == 0) {
421       char* endptr;
422       int cpu = strtol(&argv[i][6], &endptr, 10);
423       if (argv[i][0] == '\0' || endptr == nullptr || *endptr != '\0') {
424         printf("Invalid format of --cpu option, '%s' must be an integer value.\n", argv[i] + 6);
425         return 1;
426       }
427       cpu_set_t cpuset;
428       CPU_ZERO(&cpuset);
429       CPU_SET(cpu, &cpuset);
430       if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
431         if (errno == EINVAL) {
432           printf("Invalid cpu %d\n", cpu);
433           return 1;
434         }
435         perror("sched_setaffinity failed");
436         return 1;
437       }
438       printf("Locking to cpu %d\n", cpu);
439     } else {
440       args.push_back(argv[i]);
441     }
442   }
443 
444   argc = args.size();
445   ::benchmark::Initialize(&argc, args.data());
446   if (::benchmark::ReportUnrecognizedArguments(argc, args.data())) return 1;
447   ::benchmark::RunSpecifiedBenchmarks();
448 }
449