1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "profile_saver.h"
18
19 #include <fcntl.h>
20 #include <sys/resource.h>
21 #include <sys/stat.h>
22 #include <sys/types.h>
23 #include <unistd.h>
24
25 #include <algorithm>
26 #include <string>
27 #include <utility>
28
29 #include "android-base/file.h"
30 #include "android-base/strings.h"
31 #include "app_info.h"
32 #include "art_method-inl.h"
33 #include "base/compiler_filter.h"
34 #include "base/logging.h" // For VLOG.
35 #include "base/pointer_size.h"
36 #include "base/safe_map.h"
37 #include "base/scoped_arena_containers.h"
38 #include "base/stl_util.h"
39 #include "base/systrace.h"
40 #include "base/time_utils.h"
41 #include "base/unix_file/fd_file.h"
42 #include "class_table-inl.h"
43 #include "dex/dex_file_loader.h"
44 #include "dex_reference_collection.h"
45 #include "gc/collector_type.h"
46 #include "gc/gc_cause.h"
47 #include "jit/jit.h"
48 #include "jit/profiling_info.h"
49 #include "oat/oat_file_manager.h"
50 #include "profile/profile_compilation_info.h"
51 #include "scoped_thread_state_change-inl.h"
52
53 namespace art HIDDEN {
54
55 using Hotness = ProfileCompilationInfo::MethodHotness;
56
57 ProfileSaver* ProfileSaver::instance_ = nullptr;
58 pthread_t ProfileSaver::profiler_pthread_ = 0U;
59
60 static_assert(ProfileCompilationInfo::kIndividualInlineCacheSize ==
61 InlineCache::kIndividualCacheSize,
62 "InlineCache and ProfileCompilationInfo do not agree on kIndividualCacheSize");
63
64 // At what priority to schedule the saver threads. 9 is the lowest foreground priority on device.
65 static constexpr int kProfileSaverPthreadPriority = 9;
66
SetProfileSaverThreadPriority(pthread_t thread,int priority)67 static void SetProfileSaverThreadPriority(pthread_t thread, int priority) {
68 #if defined(ART_TARGET_ANDROID)
69 int result = setpriority(PRIO_PROCESS, pthread_gettid_np(thread), priority);
70 if (result != 0) {
71 LOG(ERROR) << "Failed to setpriority to :" << priority;
72 }
73 #else
74 UNUSED(thread);
75 UNUSED(priority);
76 #endif
77 }
78
GetDefaultThreadPriority()79 static int GetDefaultThreadPriority() {
80 #if defined(ART_TARGET_ANDROID)
81 pthread_attr_t attr;
82 sched_param param;
83 pthread_attr_init(&attr);
84 pthread_attr_getschedparam(&attr, ¶m);
85 return param.sched_priority;
86 #else
87 return 0;
88 #endif
89 }
90
ProfileSaver(const ProfileSaverOptions & options,jit::JitCodeCache * jit_code_cache)91 ProfileSaver::ProfileSaver(const ProfileSaverOptions& options, jit::JitCodeCache* jit_code_cache)
92 : jit_code_cache_(jit_code_cache),
93 shutting_down_(false),
94 last_time_ns_saver_woke_up_(0),
95 jit_activity_notifications_(0),
96 wait_lock_("ProfileSaver wait lock"),
97 period_condition_("ProfileSaver period condition", wait_lock_),
98 total_bytes_written_(0),
99 total_number_of_writes_(0),
100 total_number_of_code_cache_queries_(0),
101 total_number_of_skipped_writes_(0),
102 total_number_of_failed_writes_(0),
103 total_ms_of_sleep_(0),
104 total_ns_of_work_(0),
105 total_number_of_hot_spikes_(0),
106 total_number_of_wake_ups_(0),
107 options_(options) {
108 DCHECK(options_.IsEnabled());
109 }
110
~ProfileSaver()111 ProfileSaver::~ProfileSaver() {
112 for (auto& it : profile_cache_) {
113 delete it.second;
114 }
115 }
116
NotifyStartupCompleted()117 void ProfileSaver::NotifyStartupCompleted() {
118 Thread* self = Thread::Current();
119 MutexLock mu(self, *Locks::profiler_lock_);
120 if (instance_ == nullptr || instance_->shutting_down_) {
121 return;
122 }
123 MutexLock mu2(self, instance_->wait_lock_);
124 instance_->period_condition_.Signal(self);
125 }
126
Run()127 void ProfileSaver::Run() {
128 Thread* self = Thread::Current();
129
130 // For thread annotalysis, the setup is more complicated than it should be. Run needs to start
131 // under mutex, but should drop it.
132 Locks::profiler_lock_->ExclusiveUnlock(self);
133
134 // Fetch the resolved classes for the app images after waiting for Startup
135 // completion notification.
136 const uint64_t thread_start_time = NanoTime();
137
138 // Wait for startup to complete with a timeout at StartupCompletedTask.
139 // Note that we may be woken up by JIT notifications.
140 // We need to wait for startup to complete to make sure we have
141 // the resolved classes and methods.
142 while (!Runtime::Current()->GetStartupCompleted() && !ShuttingDown(self)) {
143 MutexLock mu(self, wait_lock_);
144 // Make sure to sleep again until startup is completed.
145 period_condition_.Wait(self);
146 }
147
148 // Mark collected classes/methods as startup.
149 FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
150
151 bool is_min_first_save_set =
152 options_.GetMinFirstSaveMs() != ProfileSaverOptions::kMinFirstSaveMsNotSet;
153 bool force_first_save = is_min_first_save_set && IsFirstSave();
154
155 // When we save without waiting for JIT notifications we use a simple
156 // exponential back off policy bounded by max_wait_without_jit.
157 uint32_t max_wait_without_jit = options_.GetMinSavePeriodMs() * 16;
158 uint64_t cur_wait_without_jit = options_.GetMinSavePeriodMs();
159
160 // Loop for the profiled methods.
161 while (!ShuttingDown(self)) {
162 // In case of force_first_save we need to count from the start of the thread.
163 uint64_t sleep_start = force_first_save ? thread_start_time : NanoTime();
164 uint64_t sleep_time = 0;
165 {
166 MutexLock mu(self, wait_lock_);
167 if (options_.GetWaitForJitNotificationsToSave()) {
168 period_condition_.Wait(self);
169 } else {
170 period_condition_.TimedWait(self, cur_wait_without_jit, 0);
171 if (cur_wait_without_jit < max_wait_without_jit) {
172 cur_wait_without_jit *= 2;
173 }
174 }
175 sleep_time = NanoTime() - sleep_start;
176 }
177 // Check if the thread was woken up for shutdown.
178 if (ShuttingDown(self)) {
179 break;
180 }
181 total_number_of_wake_ups_++;
182 // We might have been woken up by a huge number of notifications to guarantee saving.
183 // If we didn't meet the minimum saving period go back to sleep (only if missed by
184 // a reasonable margin).
185 uint64_t min_save_period_ns = MsToNs(force_first_save ? options_.GetMinFirstSaveMs() :
186 options_.GetMinSavePeriodMs());
187 while (min_save_period_ns * 0.9 > sleep_time) {
188 {
189 MutexLock mu(self, wait_lock_);
190 period_condition_.TimedWait(self, NsToMs(min_save_period_ns - sleep_time), 0);
191 sleep_time = NanoTime() - sleep_start;
192 }
193 // Check if the thread was woken up for shutdown.
194 if (ShuttingDown(self)) {
195 break;
196 }
197 total_number_of_wake_ups_++;
198 }
199 total_ms_of_sleep_ += NsToMs(NanoTime() - sleep_start);
200
201 if (ShuttingDown(self)) {
202 break;
203 }
204
205 uint16_t number_of_new_methods = 0;
206 uint64_t start_work = NanoTime();
207 bool profile_saved_to_disk = ProcessProfilingInfo(
208 /*force_save=*/ false,
209 &number_of_new_methods);
210
211 // Reset the flag, so we can continue on the normal schedule.
212 force_first_save = false;
213
214 // Update the notification counter based on result. Note that there might be contention on this
215 // but we don't care about to be 100% precise.
216 if (!profile_saved_to_disk) {
217 // If we didn't save to disk it may be because we didn't have enough new methods.
218 // Set the jit activity notifications to number_of_new_methods so we can wake up earlier
219 // if needed.
220 jit_activity_notifications_ = number_of_new_methods;
221 }
222 total_ns_of_work_ += NanoTime() - start_work;
223 }
224 }
225
226 // Checks if the profile file is empty.
227 // Return true if the size of the profile file is 0 or if there were errors when
228 // trying to open the file.
IsProfileEmpty(const std::string & location)229 static bool IsProfileEmpty(const std::string& location) {
230 if (location.empty()) {
231 return true;
232 }
233
234 struct stat stat_buffer;
235 if (stat(location.c_str(), &stat_buffer) != 0) {
236 if (VLOG_IS_ON(profiler)) {
237 PLOG(WARNING) << "Failed to stat profile location for IsFirstUse: " << location;
238 }
239 return true;
240 }
241
242 VLOG(profiler) << "Profile " << location << " size=" << stat_buffer.st_size;
243 return stat_buffer.st_size == 0;
244 }
245
IsFirstSave()246 bool ProfileSaver::IsFirstSave() {
247 Thread* self = Thread::Current();
248 SafeMap<std::string, std::pair<std::string, AppInfo::CodeType>> tracked_locations;
249 {
250 // Make a copy so that we don't hold the lock while doing I/O.
251 MutexLock mu(self, *Locks::profiler_lock_);
252 tracked_locations = tracked_profiles_;
253 }
254
255 for (const auto& it : tracked_locations) {
256 if (ShuttingDown(self)) {
257 return false;
258 }
259 const std::string& cur_profile = it.first;
260 const std::string& ref_profile = it.second.first;
261
262 // Check if any profile is non empty. If so, then this is not the first save.
263 if (!IsProfileEmpty(cur_profile) || !IsProfileEmpty(ref_profile)) {
264 return false;
265 }
266 }
267
268 // All locations are empty. Assume this is the first use.
269 VLOG(profiler) << "All profile locations are empty. This is considered to be first save";
270 return true;
271 }
272
NotifyJitActivity()273 void ProfileSaver::NotifyJitActivity() {
274 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
275 if (instance_ == nullptr || instance_->shutting_down_) {
276 return;
277 }
278 instance_->NotifyJitActivityInternal();
279 }
280
WakeUpSaver()281 void ProfileSaver::WakeUpSaver() {
282 jit_activity_notifications_ = 0;
283 last_time_ns_saver_woke_up_ = NanoTime();
284 period_condition_.Signal(Thread::Current());
285 }
286
NotifyJitActivityInternal()287 void ProfileSaver::NotifyJitActivityInternal() {
288 // Unlikely to overflow but if it happens,
289 // we would have waken up the saver long before that.
290 jit_activity_notifications_++;
291 // Note that we are not as precise as we could be here but we don't want to wake the saver
292 // every time we see a hot method.
293 if (jit_activity_notifications_ > options_.GetMinNotificationBeforeWake()) {
294 MutexLock wait_mutex(Thread::Current(), wait_lock_);
295 if ((NanoTime() - last_time_ns_saver_woke_up_) > MsToNs(options_.GetMinSavePeriodMs())) {
296 WakeUpSaver();
297 } else if (jit_activity_notifications_ > options_.GetMaxNotificationBeforeWake()) {
298 // Make sure to wake up the saver if we see a spike in the number of notifications.
299 // This is a precaution to avoid losing a big number of methods in case
300 // this is a spike with no jit after.
301 total_number_of_hot_spikes_++;
302 WakeUpSaver();
303 }
304 }
305 }
306
307 class ProfileSaver::ScopedDefaultPriority {
308 public:
ScopedDefaultPriority(pthread_t thread)309 explicit ScopedDefaultPriority(pthread_t thread) : thread_(thread) {
310 SetProfileSaverThreadPriority(thread_, GetDefaultThreadPriority());
311 }
312
~ScopedDefaultPriority()313 ~ScopedDefaultPriority() {
314 SetProfileSaverThreadPriority(thread_, kProfileSaverPthreadPriority);
315 }
316
317 private:
318 const pthread_t thread_;
319 };
320
321 class ProfileSaver::GetClassesAndMethodsHelper {
322 public:
GetClassesAndMethodsHelper(bool startup,const ProfileSaverOptions & options,const ProfileCompilationInfo::ProfileSampleAnnotation & annotation)323 GetClassesAndMethodsHelper(bool startup,
324 const ProfileSaverOptions& options,
325 const ProfileCompilationInfo::ProfileSampleAnnotation& annotation)
326 REQUIRES_SHARED(Locks::mutator_lock_)
327 : startup_(startup),
328 profile_boot_class_path_(options.GetProfileBootClassPath()),
329 extra_flags_(GetExtraMethodHotnessFlags(options)),
330 annotation_(annotation),
331 arena_stack_(Runtime::Current()->GetArenaPool()),
332 allocator_(&arena_stack_),
333 class_loaders_(std::nullopt),
334 dex_file_records_map_(allocator_.Adapter(kArenaAllocProfile)),
335 number_of_hot_methods_(0u),
336 number_of_sampled_methods_(0u) {
337 std::fill_n(max_primitive_array_dimensions_.data(), max_primitive_array_dimensions_.size(), 0u);
338 }
339
REQUIRES_SHARED(Locks::mutator_lock_)340 ~GetClassesAndMethodsHelper() REQUIRES_SHARED(Locks::mutator_lock_) {
341 // The `class_loaders_` member destructor needs the mutator lock.
342 // We need to destroy arena-allocated dex file records.
343 for (const auto& entry : dex_file_records_map_) {
344 delete entry.second;
345 }
346 }
347
348 void CollectClasses(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
349 void UpdateProfile(const std::set<std::string>& locations, ProfileCompilationInfo* profile_info);
350
GetNumberOfHotMethods() const351 size_t GetNumberOfHotMethods() const {
352 return number_of_hot_methods_;
353 }
354
GetNumberOfSampledMethods() const355 size_t GetNumberOfSampledMethods() const {
356 return number_of_sampled_methods_;
357 }
358
359 private:
360 class CollectInternalVisitor {
361 public:
CollectInternalVisitor(GetClassesAndMethodsHelper * helper)362 explicit CollectInternalVisitor(GetClassesAndMethodsHelper* helper)
363 : helper_(helper) {}
364
VisitRootIfNonNull(StackReference<mirror::Object> * ref)365 void VisitRootIfNonNull(StackReference<mirror::Object>* ref)
366 REQUIRES_SHARED(Locks::mutator_lock_) {
367 if (!ref->IsNull()) {
368 helper_->CollectInternal</*kBootClassLoader=*/ false>(ref->AsMirrorPtr()->AsClassLoader());
369 }
370 }
371
372 private:
373 GetClassesAndMethodsHelper* helper_;
374 };
375
376 struct ClassRecord {
377 dex::TypeIndex type_index;
378 uint16_t array_dimension;
379 uint32_t copied_methods_start;
380 LengthPrefixedArray<ArtMethod>* methods;
381 };
382
383 struct DexFileRecords : public DeletableArenaObject<kArenaAllocProfile> {
DexFileRecordsart::ProfileSaver::GetClassesAndMethodsHelper::DexFileRecords384 explicit DexFileRecords(ScopedArenaAllocator* allocator)
385 : class_records(allocator->Adapter(kArenaAllocProfile)),
386 copied_methods(allocator->Adapter(kArenaAllocProfile)) {
387 class_records.reserve(kInitialClassRecordsReservation);
388 }
389
390 static constexpr size_t kInitialClassRecordsReservation = 512;
391
392 ScopedArenaVector<ClassRecord> class_records;
393 ScopedArenaVector<ArtMethod*> copied_methods;
394 };
395
396 using DexFileRecordsMap = ScopedArenaHashMap<const DexFile*, DexFileRecords*>;
397
ShouldCollectClasses(bool startup)398 ALWAYS_INLINE static bool ShouldCollectClasses(bool startup) {
399 // We only record classes for the startup case. This may change in the future.
400 return startup;
401 }
402
403 // Collect classes and methods from one class loader.
404 template <bool kBootClassLoader>
405 void CollectInternal(ObjPtr<mirror::ClassLoader> class_loader) NO_INLINE
406 REQUIRES_SHARED(Locks::mutator_lock_);
407
408 const bool startup_;
409 const bool profile_boot_class_path_;
410 const uint32_t extra_flags_;
411 const ProfileCompilationInfo::ProfileSampleAnnotation annotation_;
412 ArenaStack arena_stack_;
413 ScopedArenaAllocator allocator_;
414 std::optional<VariableSizedHandleScope> class_loaders_;
415 DexFileRecordsMap dex_file_records_map_;
416
417 static_assert(Primitive::kPrimLast == Primitive::kPrimVoid); // There are no arrays of void.
418 std::array<uint8_t, static_cast<size_t>(Primitive::kPrimLast)> max_primitive_array_dimensions_;
419
420 size_t number_of_hot_methods_;
421 size_t number_of_sampled_methods_;
422 };
423
424 template <bool kBootClassLoader>
CollectInternal(ObjPtr<mirror::ClassLoader> class_loader)425 void ProfileSaver::GetClassesAndMethodsHelper::CollectInternal(
426 ObjPtr<mirror::ClassLoader> class_loader) {
427 ScopedTrace trace(__PRETTY_FUNCTION__);
428 DCHECK_EQ(kBootClassLoader, class_loader == nullptr);
429
430 // If the class loader has not loaded any classes, it may have a null table.
431 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
432 ClassTable* const table =
433 class_linker->ClassTableForClassLoader(kBootClassLoader ? nullptr : class_loader);
434 if (table == nullptr) {
435 return;
436 }
437
438 // Move members to local variables to allow the compiler to optimize this properly.
439 const bool startup = startup_;
440 table->Visit([&](ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
441 if (kBootClassLoader ? (!klass->IsBootStrapClassLoaded())
442 : (klass->GetClassLoader() != class_loader)) {
443 // To avoid processing a class more than once, we process each class only
444 // when we encounter it in the defining class loader's class table.
445 // This class has a different defining class loader, skip it.
446 return true;
447 }
448
449 uint16_t dim = 0u;
450 ObjPtr<mirror::Class> k = klass;
451 if (klass->IsArrayClass()) {
452 DCHECK_EQ(klass->NumMethods(), 0u); // No methods to collect.
453 if (!ShouldCollectClasses(startup)) {
454 return true;
455 }
456 do {
457 DCHECK(k->IsResolved()); // Array classes are always resolved.
458 ++dim;
459 // At the time of array class creation, the element type is already either
460 // resolved or erroneous unresoved and either shall remain an invariant.
461 // Similarly, the access flag indicating a proxy class is an invariant.
462 // Read barrier is unnecessary for reading a chain of constant references
463 // in order to read primitive fields to check such invariants, or to read
464 // other constant primitive fields (dex file, primitive type) below.
465 k = k->GetComponentType<kDefaultVerifyFlags, kWithoutReadBarrier>();
466 } while (k->IsArrayClass());
467
468 DCHECK(kBootClassLoader || !k->IsPrimitive());
469 if (kBootClassLoader && UNLIKELY(k->IsPrimitive())) {
470 size_t index = enum_cast<size_t>(k->GetPrimitiveType());
471 DCHECK_LT(index, max_primitive_array_dimensions_.size());
472 if (dim > max_primitive_array_dimensions_[index]) {
473 // Enforce an upper limit of 255 for primitive array dimensions.
474 max_primitive_array_dimensions_[index] =
475 std::min<size_t>(dim, std::numeric_limits<uint8_t>::max());
476 }
477 return true;
478 }
479
480 // Attribute the array class to the defining dex file of the element class.
481 DCHECK_EQ(klass->GetCopiedMethodsStartOffset(), 0u);
482 DCHECK(klass->GetMethodsPtr() == nullptr);
483 } else {
484 // Non-array class. There is no need to collect primitive types.
485 DCHECK(kBootClassLoader || !k->IsPrimitive());
486 if (kBootClassLoader && UNLIKELY(klass->IsPrimitive())) {
487 DCHECK(profile_boot_class_path_);
488 DCHECK_EQ(klass->NumMethods(), 0u); // No methods to collect.
489 return true;
490 }
491 }
492
493 if (!k->IsResolved() || k->IsProxyClass()) {
494 return true;
495 }
496
497 const DexFile& dex_file = k->GetDexFile();
498 dex::TypeIndex type_index = k->GetDexTypeIndex();
499 uint32_t copied_methods_start = klass->GetCopiedMethodsStartOffset();
500 LengthPrefixedArray<ArtMethod>* methods = klass->GetMethodsPtr();
501 if (methods != nullptr) {
502 CHECK_LE(copied_methods_start, methods->size()) << k->PrettyClass();
503 }
504
505 DexFileRecords* dex_file_records;
506 auto it = dex_file_records_map_.find(&dex_file);
507 if (it != dex_file_records_map_.end()) {
508 dex_file_records = it->second;
509 } else {
510 dex_file_records = new (&allocator_) DexFileRecords(&allocator_);
511 dex_file_records_map_.insert(std::make_pair(&dex_file, dex_file_records));
512 }
513 dex_file_records->class_records.push_back(
514 ClassRecord{type_index, dim, copied_methods_start, methods});
515 return true;
516 });
517 }
518
CollectClasses(Thread * self)519 void ProfileSaver::GetClassesAndMethodsHelper::CollectClasses(Thread* self) {
520 ScopedTrace trace(__PRETTY_FUNCTION__);
521
522 // Collect class loaders into a `VariableSizedHandleScope` to prevent contention
523 // problems on the class_linker_classes_lock. Hold those class loaders in
524 // a member variable to keep them alive and prevent unloading their classes,
525 // so that methods referenced in collected `DexFileRecords` remain valid.
526 class_loaders_.emplace(self);
527 Runtime::Current()->GetClassLinker()->GetClassLoaders(self, &class_loaders_.value());
528
529 // Collect classes and their method array pointers.
530 if (profile_boot_class_path_) {
531 // Collect classes from the boot class loader since visit classloaders doesn't visit it.
532 CollectInternal</*kBootClassLoader=*/ true>(/*class_loader=*/ nullptr);
533 }
534 {
535 CollectInternalVisitor visitor(this);
536 class_loaders_->VisitRoots(visitor);
537 }
538
539 // Attribute copied methods to defining dex files while holding the mutator lock.
540 for (const auto& entry : dex_file_records_map_) {
541 const DexFile* dex_file = entry.first;
542 DexFileRecords* dex_file_records = entry.second;
543
544 for (const ClassRecord& class_record : dex_file_records->class_records) {
545 LengthPrefixedArray<ArtMethod>* methods = class_record.methods;
546 if (methods == nullptr) {
547 continue;
548 }
549 const size_t methods_size = methods->size();
550 CHECK_LE(class_record.copied_methods_start, methods_size)
551 << dex_file->PrettyType(class_record.type_index);
552 for (size_t index = class_record.copied_methods_start; index != methods_size; ++index) {
553 // Note: Using `ArtMethod` array with implicit `kRuntimePointerSize`.
554 ArtMethod& method = methods->At(index);
555 CHECK(method.IsCopied()) << dex_file->PrettyType(class_record.type_index);
556 CHECK(!method.IsNative()) << dex_file->PrettyType(class_record.type_index);
557 if (method.IsInvokable()) {
558 const DexFile* method_dex_file = method.GetDexFile();
559 DexFileRecords* method_dex_file_records = dex_file_records;
560 if (method_dex_file != dex_file) {
561 auto it = dex_file_records_map_.find(method_dex_file);
562 if (it == dex_file_records_map_.end()) {
563 // We have not seen any class in the dex file that defines the interface with this
564 // copied method. This can happen if the interface is in the boot class path and
565 // we are not profiling boot class path; or when we first visit classes for the
566 // interface's defining class loader before it has any resolved classes and then
567 // the interface is resolved and an implementing class is defined in a child class
568 // loader before we visit that child class loader's classes.
569 continue;
570 }
571 method_dex_file_records = it->second;
572 }
573 method_dex_file_records->copied_methods.push_back(&method);
574 }
575 }
576 }
577 }
578 }
579
UpdateProfile(const std::set<std::string> & locations,ProfileCompilationInfo * profile_info)580 void ProfileSaver::GetClassesAndMethodsHelper::UpdateProfile(const std::set<std::string>& locations,
581 ProfileCompilationInfo* profile_info) {
582 // Move members to local variables to allow the compiler to optimize this properly.
583 const bool startup = startup_;
584 const uint32_t base_flags =
585 (startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup) | extra_flags_;
586
587 // Collect the number of hot and sampled methods.
588 size_t number_of_hot_methods = 0u;
589 size_t number_of_sampled_methods = 0u;
590
591 uint16_t initial_value = Runtime::Current()->GetJITOptions()->GetWarmupThreshold();
592 auto get_method_flags = [&](ArtMethod& method) {
593 // Mark methods as hot if they are marked as such (warm for the runtime
594 // means hot for the profile).
595 if (method.PreviouslyWarm()) {
596 ++number_of_hot_methods;
597 return enum_cast<ProfileCompilationInfo::MethodHotness::Flag>(base_flags | Hotness::kFlagHot);
598 } else if (method.CounterHasChanged(initial_value)) {
599 ++number_of_sampled_methods;
600 return enum_cast<ProfileCompilationInfo::MethodHotness::Flag>(base_flags);
601 } else {
602 return enum_cast<ProfileCompilationInfo::MethodHotness::Flag>(0u);
603 }
604 };
605
606 // Use a single string for array descriptors to avoid too many reallocations.
607 std::string array_class_descriptor;
608
609 // Process classes and methods.
610 for (const auto& entry : dex_file_records_map_) {
611 const DexFile* dex_file = entry.first;
612 const DexFileRecords* dex_file_records = entry.second;
613
614 // Check if this is a profiled dex file.
615 const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
616 if (locations.find(base_location) == locations.end()) {
617 continue;
618 }
619
620 // Get the profile index.
621 ProfileCompilationInfo::ProfileIndexType profile_index =
622 profile_info->FindOrAddDexFile(*dex_file, annotation_);
623 if (profile_index == ProfileCompilationInfo::MaxProfileIndex()) {
624 // Error adding dex file to the `profile_info`.
625 continue;
626 }
627
628 for (const ClassRecord& class_record : dex_file_records->class_records) {
629 if (class_record.array_dimension != 0u) {
630 DCHECK(ShouldCollectClasses(startup));
631 DCHECK(class_record.methods == nullptr); // No methods to process.
632 array_class_descriptor.assign(class_record.array_dimension, '[');
633 array_class_descriptor += dex_file->GetTypeDescriptorView(class_record.type_index);
634 dex::TypeIndex type_index =
635 profile_info->FindOrCreateTypeIndex(*dex_file, array_class_descriptor);
636 if (type_index.IsValid()) {
637 profile_info->AddClass(profile_index, type_index);
638 }
639 } else {
640 // Non-array class.
641 if (ShouldCollectClasses(startup)) {
642 profile_info->AddClass(profile_index, class_record.type_index);
643 }
644 const size_t num_declared_methods = class_record.copied_methods_start;
645 LengthPrefixedArray<ArtMethod>* methods = class_record.methods;
646 for (size_t index = 0; index != num_declared_methods; ++index) {
647 // Note: Using `ArtMethod` array with implicit `kRuntimePointerSize`.
648 ArtMethod& method = methods->At(index);
649 DCHECK(!method.IsCopied());
650 // We do not record native methods. Once we AOT-compile the app,
651 // all native methods shall have their JNI stubs compiled.
652 if (method.IsInvokable() && !method.IsNative()) {
653 ProfileCompilationInfo::MethodHotness::Flag flags = get_method_flags(method);
654 if (flags != 0u) {
655 profile_info->AddMethod(profile_index, method.GetDexMethodIndex(), flags);
656 }
657 }
658 }
659 }
660 }
661
662 for (ArtMethod* method : dex_file_records->copied_methods) {
663 DCHECK(method->IsCopied());
664 DCHECK(method->IsInvokable());
665 DCHECK(!method->IsNative());
666 ProfileCompilationInfo::MethodHotness::Flag flags = get_method_flags(*method);
667 if (flags != 0u) {
668 profile_info->AddMethod(profile_index, method->GetDexMethodIndex(), flags);
669 }
670 }
671 }
672
673 if (profile_boot_class_path_) {
674 // Attribute primitive arrays to the first dex file in the boot class path (should
675 // be core-oj). We collect primitive array types to know the needed dimensions.
676 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
677 DCHECK(!class_linker->GetBootClassPath().empty());
678 const DexFile* dex_file = class_linker->GetBootClassPath().front();
679 ProfileCompilationInfo::ProfileIndexType profile_index =
680 profile_info->FindOrAddDexFile(*dex_file, annotation_);
681 if (profile_index != ProfileCompilationInfo::MaxProfileIndex()) {
682 for (size_t i = 0; i != max_primitive_array_dimensions_.size(); ++i) {
683 size_t max_dim = max_primitive_array_dimensions_[i];
684 // Insert descriptors for all dimensions up to `max_dim`.
685 for (size_t dim = 1; dim <= max_dim; ++dim) {
686 array_class_descriptor.assign(dim, '[');
687 array_class_descriptor += Primitive::Descriptor(enum_cast<Primitive::Type>(i));
688 dex::TypeIndex type_index =
689 profile_info->FindOrCreateTypeIndex(*dex_file, array_class_descriptor);
690 if (type_index.IsValid()) {
691 profile_info->AddClass(profile_index, type_index);
692 }
693 }
694 }
695 } else {
696 // Error adding dex file to the `profile_info`.
697 }
698 } else {
699 DCHECK(std::all_of(max_primitive_array_dimensions_.begin(),
700 max_primitive_array_dimensions_.end(),
701 [](uint8_t dim) { return dim == 0u; }));
702 }
703
704 // Store the number of hot and sampled methods.
705 number_of_hot_methods_ = number_of_hot_methods;
706 number_of_sampled_methods_ = number_of_sampled_methods;
707 }
708
FetchAndCacheResolvedClassesAndMethods(bool startup)709 void ProfileSaver::FetchAndCacheResolvedClassesAndMethods(bool startup) {
710 ScopedTrace trace(__PRETTY_FUNCTION__);
711 const uint64_t start_time = NanoTime();
712
713 // Resolve any new registered locations.
714 ResolveTrackedLocations();
715
716 Thread* const self = Thread::Current();
717 pthread_t profiler_pthread;
718 {
719 MutexLock mu(self, *Locks::profiler_lock_);
720 profiler_pthread = profiler_pthread_;
721 }
722
723 size_t number_of_hot_methods = 0u;
724 size_t number_of_sampled_methods = 0u;
725 {
726 // Restore profile saver thread priority while holding the mutator lock. This helps
727 // prevent priority inversions blocking the GC for long periods of time.
728 // Only restore default priority if we are the profile saver thread. Other threads
729 // that call this are threads calling Stop and the signal catcher (for SIGUSR1).
730 std::optional<ScopedDefaultPriority> sdp = std::nullopt;
731 if (pthread_self() == profiler_pthread) {
732 sdp.emplace(profiler_pthread);
733 }
734
735 ScopedObjectAccess soa(self);
736 GetClassesAndMethodsHelper helper(startup, options_, GetProfileSampleAnnotation());
737 helper.CollectClasses(self);
738
739 // Release the mutator lock. We shall need to re-acquire the lock for a moment to
740 // destroy the `VariableSizedHandleScope` inside the `helper` which shall be
741 // conveniently handled by destroying `sts`, then `helper` and then `soa`.
742 ScopedThreadSuspension sts(self, ThreadState::kNative);
743 // Get back to the previous thread priority. We shall not increase the priority
744 // for the short time we need to re-acquire mutator lock for `helper` destructor.
745 sdp.reset();
746
747 MutexLock mu(self, *Locks::profiler_lock_);
748 for (const auto& it : tracked_dex_base_locations_) {
749 const std::string& filename = it.first;
750 auto info_it = profile_cache_.find(filename);
751 if (info_it == profile_cache_.end()) {
752 info_it = profile_cache_.Put(
753 filename,
754 new ProfileCompilationInfo(
755 Runtime::Current()->GetArenaPool(), options_.GetProfileBootClassPath()));
756 }
757 ProfileCompilationInfo* cached_info = info_it->second;
758
759 const std::set<std::string>& locations = it.second;
760 VLOG(profiler) << "Locations for " << it.first << " " << android::base::Join(locations, ':');
761 helper.UpdateProfile(locations, cached_info);
762
763 // Update statistics. Note that a method shall be counted for each
764 // tracked location that covers the dex file where it is defined.
765 number_of_hot_methods += helper.GetNumberOfHotMethods();
766 number_of_sampled_methods += helper.GetNumberOfSampledMethods();
767 }
768 }
769 VLOG(profiler) << "Profile saver recorded " << number_of_hot_methods
770 << " hot methods and " << number_of_sampled_methods
771 << " sampled methods in " << PrettyDuration(NanoTime() - start_time);
772 }
773
ProcessProfilingInfo(bool force_save,uint16_t * number_of_new_methods)774 bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods) {
775 ScopedTrace trace(__PRETTY_FUNCTION__);
776
777 // Resolve any new registered locations.
778 ResolveTrackedLocations();
779
780 std::vector<std::pair<std::string, std::set<std::string>>> tracked_locations;
781 SafeMap<std::string, AppInfo::CodeType> profile_to_code_type;
782 {
783 // Make a copy so that we don't hold the lock while doing I/O.
784 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
785 tracked_locations.assign(tracked_dex_base_locations_.begin(),
786 tracked_dex_base_locations_.end());
787 for (const auto& [key, value] : tracked_profiles_) {
788 profile_to_code_type.Put(key, value.second);
789 }
790 }
791
792 // Put "primary.prof" at the end. `artd` relies on the fact that "primary.prof" is the last one to
793 // write when it waits for a profile save to be done.
794 std::sort(tracked_locations.begin(),
795 tracked_locations.end(),
796 [&](const auto& pair1, const auto& pair2) {
797 return profile_to_code_type.Get(pair1.first) != AppInfo::CodeType::kPrimaryApk &&
798 profile_to_code_type.Get(pair2.first) == AppInfo::CodeType::kPrimaryApk;
799 });
800
801 bool profile_file_saved = false;
802 if (number_of_new_methods != nullptr) {
803 *number_of_new_methods = 0;
804 }
805
806 FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false);
807
808 for (const auto& it : tracked_locations) {
809 if (!force_save && ShuttingDown(Thread::Current())) {
810 // The ProfileSaver is in shutdown mode, meaning a stop request was made and
811 // we need to exit cleanly (by waiting for the saver thread to finish). Unless
812 // we have a request for a forced save, do not do any processing so that we
813 // speed up the exit.
814 return true;
815 }
816 const std::string& filename = it.first;
817 const std::set<std::string>& locations = it.second;
818 VLOG(profiler) << "Tracked filename " << filename << " locations "
819 << android::base::Join(locations, ":");
820
821 std::vector<ProfileMethodInfo> profile_methods;
822 {
823 ScopedObjectAccess soa(Thread::Current());
824 jit_code_cache_->GetProfiledMethods(
825 locations, profile_methods, options_.GetInlineCacheThreshold());
826 total_number_of_code_cache_queries_++;
827 }
828 {
829 ProfileCompilationInfo info(Runtime::Current()->GetArenaPool(),
830 /*for_boot_image=*/options_.GetProfileBootClassPath());
831 // Load the existing profile before saving.
832 // If the file is updated between `Load` and `Save`, the update will be lost. This is
833 // acceptable. The main reason is that the lost entries will eventually come back if the user
834 // keeps using the same methods, or they won't be needed if the user doesn't use the same
835 // methods again.
836 if (!info.Load(filename, /*clear_if_invalid=*/true)) {
837 LOG(WARNING) << "Could not forcefully load profile " << filename;
838 continue;
839 }
840
841 uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
842 uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
843 VLOG(profiler) << "last_save_number_of_methods=" << last_save_number_of_methods
844 << " last_save_number_of_classes=" << last_save_number_of_classes
845 << " number of profiled methods=" << profile_methods.size();
846
847 // Try to add the method data. Note this may fail is the profile loaded from disk contains
848 // outdated data (e.g. the previous profiled dex files might have been updated).
849 // If this happens we clear the profile data and for the save to ensure the file is cleared.
850 if (!info.AddMethods(
851 profile_methods,
852 AnnotateSampleFlags(Hotness::kFlagHot | Hotness::kFlagPostStartup),
853 GetProfileSampleAnnotation())) {
854 LOG(WARNING) << "Could not add methods to the existing profiler. "
855 << "Clearing the profile data.";
856 info.ClearData();
857 force_save = true;
858 }
859
860 {
861 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
862 auto profile_cache_it = profile_cache_.find(filename);
863 if (profile_cache_it != profile_cache_.end()) {
864 if (!info.MergeWith(*(profile_cache_it->second))) {
865 LOG(WARNING) << "Could not merge the profile. Clearing the profile data.";
866 info.ClearData();
867 force_save = true;
868 }
869 } else if (VLOG_IS_ON(profiler)) {
870 LOG(INFO) << "Failed to find cached profile for " << filename;
871 for (auto&& pair : profile_cache_) {
872 LOG(INFO) << "Cached profile " << pair.first;
873 }
874 }
875
876 int64_t delta_number_of_methods =
877 info.GetNumberOfMethods() - last_save_number_of_methods;
878 int64_t delta_number_of_classes =
879 info.GetNumberOfResolvedClasses() - last_save_number_of_classes;
880
881 // Always write on a forced save. `artd` relies on the fact that profiles are always
882 // written when it waits for a forced profile save to be done.
883 if (!force_save &&
884 delta_number_of_methods < options_.GetMinMethodsToSave() &&
885 delta_number_of_classes < options_.GetMinClassesToSave()) {
886 VLOG(profiler) << "Not enough information to save to: " << filename
887 << " Number of methods: " << delta_number_of_methods
888 << " Number of classes: " << delta_number_of_classes;
889 total_number_of_skipped_writes_++;
890 continue;
891 }
892
893 if (number_of_new_methods != nullptr) {
894 *number_of_new_methods =
895 std::max(static_cast<uint16_t>(delta_number_of_methods),
896 *number_of_new_methods);
897 }
898 uint64_t bytes_written;
899 // Force the save. In case the profile data is corrupted or the profile
900 // has the wrong version this will "fix" the file to the correct format.
901 if (info.Save(filename, &bytes_written, force_save)) {
902 // We managed to save the profile. Clear the cache stored during startup.
903 if (profile_cache_it != profile_cache_.end()) {
904 ProfileCompilationInfo *cached_info = profile_cache_it->second;
905 profile_cache_.erase(profile_cache_it);
906 delete cached_info;
907 }
908 if (bytes_written > 0) {
909 total_number_of_writes_++;
910 total_bytes_written_ += bytes_written;
911 profile_file_saved = true;
912 } else {
913 // At this point we could still have avoided the write.
914 // We load and merge the data from the file lazily at its first ever
915 // save attempt. So, whatever we are trying to save could already be
916 // in the file.
917 total_number_of_skipped_writes_++;
918 }
919 } else {
920 LOG(WARNING) << "Could not save profiling info to " << filename;
921 total_number_of_failed_writes_++;
922 }
923 }
924 }
925 }
926
927 // Trim the maps to madvise the pages used for profile info.
928 // It is unlikely we will need them again in the near feature.
929 Runtime::Current()->GetArenaPool()->TrimMaps();
930
931 return profile_file_saved;
932 }
933
RunProfileSaverThread(void * arg)934 void* ProfileSaver::RunProfileSaverThread(void* arg) {
935 Runtime* runtime = Runtime::Current();
936
937 bool attached = runtime->AttachCurrentThread("Profile Saver",
938 /*as_daemon=*/true,
939 runtime->GetSystemThreadGroup(),
940 /*create_peer=*/true);
941 if (!attached) {
942 CHECK(runtime->IsShuttingDown(Thread::Current()));
943 return nullptr;
944 }
945
946 {
947 Locks::profiler_lock_->ExclusiveLock(Thread::Current());
948 CHECK_EQ(reinterpret_cast<ProfileSaver*>(arg), instance_);
949 instance_->Run();
950 }
951
952 runtime->DetachCurrentThread();
953 VLOG(profiler) << "Profile saver shutdown";
954 return nullptr;
955 }
956
ShouldProfileLocation(const std::string & location,bool profile_aot_code)957 static bool ShouldProfileLocation(const std::string& location, bool profile_aot_code) {
958 if (profile_aot_code) {
959 // If we have to profile all the code, irrespective of its compilation state, return true
960 // right away.
961 return true;
962 }
963
964 OatFileManager& oat_manager = Runtime::Current()->GetOatFileManager();
965 const OatFile* oat_file = oat_manager.FindOpenedOatFileFromDexLocation(location);
966 if (oat_file == nullptr) {
967 // This can happen if we fallback to run code directly from the APK.
968 // Profile it with the hope that the background dexopt will get us back into
969 // a good state.
970 VLOG(profiler) << "Asked to profile a location without an oat file:" << location;
971 return true;
972 }
973 CompilerFilter::Filter filter = oat_file->GetCompilerFilter();
974 if ((filter == CompilerFilter::kSpeed) || (filter == CompilerFilter::kEverything)) {
975 VLOG(profiler)
976 << "Skip profiling oat file because it's already speed|everything compiled: "
977 << location << " oat location: " << oat_file->GetLocation();
978 return false;
979 }
980 return true;
981 }
982
Start(const ProfileSaverOptions & options,const std::string & output_filename,jit::JitCodeCache * jit_code_cache,const std::vector<std::string> & code_paths,const std::string & ref_profile_filename,AppInfo::CodeType code_type)983 void ProfileSaver::Start(const ProfileSaverOptions& options,
984 const std::string& output_filename,
985 jit::JitCodeCache* jit_code_cache,
986 const std::vector<std::string>& code_paths,
987 const std::string& ref_profile_filename,
988 AppInfo::CodeType code_type) {
989 Runtime* const runtime = Runtime::Current();
990 DCHECK(options.IsEnabled());
991 DCHECK(runtime->GetJit() != nullptr);
992 DCHECK(!output_filename.empty());
993 DCHECK(jit_code_cache != nullptr);
994
995 std::vector<std::string> code_paths_to_profile;
996 for (const std::string& location : code_paths) {
997 if (ShouldProfileLocation(location, options.GetProfileAOTCode())) {
998 VLOG(profiler) << "Code path to profile " << location;
999 code_paths_to_profile.push_back(location);
1000 }
1001 }
1002
1003 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
1004 // Support getting profile samples for the boot class path. This will be used to generate the boot
1005 // image profile. The intention is to use this code to generate to boot image but not use it in
1006 // production. b/37966211
1007 if (options.GetProfileBootClassPath()) {
1008 std::set<std::string> code_paths_keys;
1009 for (const std::string& location : code_paths) {
1010 // Use the profile base key for checking file uniqueness (as it is constructed solely based
1011 // on the location and ignores other metadata like origin package).
1012 code_paths_keys.insert(ProfileCompilationInfo::GetProfileDexFileBaseKey(location));
1013 }
1014 for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
1015 // Don't check ShouldProfileLocation since the boot class path may be speed compiled.
1016 const std::string& location = dex_file->GetLocation();
1017 const std::string key = ProfileCompilationInfo::GetProfileDexFileBaseKey(location);
1018 VLOG(profiler) << "Registering boot dex file " << location;
1019 if (code_paths_keys.find(key) != code_paths_keys.end()) {
1020 LOG(WARNING) << "Boot class path location key conflicts with code path " << location;
1021 } else if (instance_ == nullptr) {
1022 // Only add the boot class path once since Start may be called multiple times for secondary
1023 // dexes.
1024 // We still do the collision check above. This handles any secondary dexes that conflict
1025 // with the boot class path dex files.
1026 code_paths_to_profile.push_back(location);
1027 }
1028 }
1029 }
1030 if (code_paths_to_profile.empty()) {
1031 VLOG(profiler) << "No code paths should be profiled.";
1032 return;
1033 }
1034
1035 if (instance_ != nullptr) {
1036 // If we already have an instance, make sure it uses the same jit_code_cache.
1037 // This may be called multiple times via Runtime::registerAppInfo (e.g. for
1038 // apps which share the same runtime).
1039 DCHECK_EQ(instance_->jit_code_cache_, jit_code_cache);
1040 // Add the code_paths to the tracked locations.
1041 instance_->AddTrackedLocations(
1042 output_filename, code_paths_to_profile, ref_profile_filename, code_type);
1043 return;
1044 }
1045
1046 VLOG(profiler) << "Starting profile saver using output file: " << output_filename
1047 << ". Tracking: " << android::base::Join(code_paths_to_profile, ':')
1048 << ". With reference profile: " << ref_profile_filename;
1049
1050 instance_ = new ProfileSaver(options, jit_code_cache);
1051 instance_->AddTrackedLocations(
1052 output_filename, code_paths_to_profile, ref_profile_filename, code_type);
1053
1054 // Create a new thread which does the saving.
1055 CHECK_PTHREAD_CALL(
1056 pthread_create,
1057 (&profiler_pthread_, nullptr, &RunProfileSaverThread, reinterpret_cast<void*>(instance_)),
1058 "Profile saver thread");
1059
1060 SetProfileSaverThreadPriority(profiler_pthread_, kProfileSaverPthreadPriority);
1061 }
1062
Stop(bool dump_info)1063 void ProfileSaver::Stop(bool dump_info) {
1064 ProfileSaver* profile_saver = nullptr;
1065 pthread_t profiler_pthread = 0U;
1066
1067 {
1068 MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_);
1069 VLOG(profiler) << "Stopping profile saver thread";
1070 profile_saver = instance_;
1071 profiler_pthread = profiler_pthread_;
1072 if (instance_ == nullptr) {
1073 DCHECK(false) << "Tried to stop a profile saver which was not started";
1074 return;
1075 }
1076 if (instance_->shutting_down_) {
1077 DCHECK(false) << "Tried to stop the profile saver twice";
1078 return;
1079 }
1080 instance_->shutting_down_ = true;
1081 }
1082
1083 {
1084 // Wake up the saver thread if it is sleeping to allow for a clean exit.
1085 MutexLock wait_mutex(Thread::Current(), profile_saver->wait_lock_);
1086 profile_saver->period_condition_.Signal(Thread::Current());
1087 }
1088
1089 // Force save everything before destroying the thread since we want profiler_pthread_ to remain
1090 // valid.
1091 profile_saver->ProcessProfilingInfo(/*force_ save=*/ true, /*number_of_new_methods=*/ nullptr);
1092
1093 // Wait for the saver thread to stop.
1094 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
1095
1096 {
1097 MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_);
1098 if (dump_info) {
1099 instance_->DumpInfo(LOG_STREAM(INFO));
1100 }
1101 instance_ = nullptr;
1102 profiler_pthread_ = 0U;
1103 }
1104 delete profile_saver;
1105 }
1106
ShuttingDown(Thread * self)1107 bool ProfileSaver::ShuttingDown(Thread* self) {
1108 MutexLock mu(self, *Locks::profiler_lock_);
1109 return shutting_down_;
1110 }
1111
IsStarted()1112 bool ProfileSaver::IsStarted() {
1113 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
1114 return instance_ != nullptr;
1115 }
1116
AddTrackedLocationsToMap(const std::string & output_filename,const std::vector<std::string> & code_paths,SafeMap<std::string,std::set<std::string>> * map)1117 static void AddTrackedLocationsToMap(const std::string& output_filename,
1118 const std::vector<std::string>& code_paths,
1119 SafeMap<std::string, std::set<std::string>>* map) {
1120 std::vector<std::string> code_paths_and_filenames;
1121 // The dex locations are sometimes set to the filename instead of the full path.
1122 // So make sure we have both "locations" when tracking what needs to be profiled.
1123 // - apps + system server have filenames
1124 // - boot classpath elements have full paths
1125
1126 // TODO(calin, ngeoffray, vmarko) This is an workaround for using filanames as
1127 // dex locations - needed to prebuilt with a partial boot image
1128 // (commit: c4a924d8c74241057d957d360bf31cd5cd0e4f9c).
1129 // We should find a better way which allows us to do the tracking based on full paths.
1130 for (const std::string& path : code_paths) {
1131 size_t last_sep_index = path.find_last_of('/');
1132 if (path.empty() || last_sep_index == path.size() - 1) {
1133 // Should not happen, but anyone can register code paths so better be prepared and ignore
1134 // such locations.
1135 continue;
1136 }
1137 std::string filename = last_sep_index == std::string::npos
1138 ? path
1139 : path.substr(last_sep_index + 1);
1140
1141 code_paths_and_filenames.push_back(path);
1142 code_paths_and_filenames.push_back(filename);
1143 }
1144
1145 auto it = map->FindOrAdd(output_filename);
1146 it->second.insert(code_paths_and_filenames.begin(), code_paths_and_filenames.end());
1147 }
1148
AddTrackedLocations(const std::string & output_filename,const std::vector<std::string> & code_paths,const std::string & ref_profile_filename,AppInfo::CodeType code_type)1149 void ProfileSaver::AddTrackedLocations(const std::string& output_filename,
1150 const std::vector<std::string>& code_paths,
1151 const std::string& ref_profile_filename,
1152 AppInfo::CodeType code_type) {
1153 // Register the output profile and its reference profile.
1154 auto it = tracked_profiles_.find(output_filename);
1155 if (it == tracked_profiles_.end()) {
1156 tracked_profiles_.Put(output_filename, std::make_pair(ref_profile_filename, code_type));
1157 }
1158
1159 // Add the code paths to the list of tracked location.
1160 AddTrackedLocationsToMap(output_filename, code_paths, &tracked_dex_base_locations_);
1161 // The code paths may contain symlinks which could fool the profiler.
1162 // If the dex file is compiled with an absolute location but loaded with symlink
1163 // the profiler could skip the dex due to location mismatch.
1164 // To avoid this, we add the code paths to the temporary cache of 'to_be_resolved'
1165 // locations. When the profiler thread executes we will resolve the paths to their
1166 // real paths.
1167 // Note that we delay taking the realpath to avoid spending more time than needed
1168 // when registering location (as it is done during app launch).
1169 AddTrackedLocationsToMap(output_filename,
1170 code_paths,
1171 &tracked_dex_base_locations_to_be_resolved_);
1172 }
1173
DumpInstanceInfo(std::ostream & os)1174 void ProfileSaver::DumpInstanceInfo(std::ostream& os) {
1175 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
1176 if (instance_ != nullptr) {
1177 instance_->DumpInfo(os);
1178 }
1179 }
1180
DumpInfo(std::ostream & os)1181 void ProfileSaver::DumpInfo(std::ostream& os) {
1182 os << "ProfileSaver total_bytes_written=" << total_bytes_written_ << '\n'
1183 << "ProfileSaver total_number_of_writes=" << total_number_of_writes_ << '\n'
1184 << "ProfileSaver total_number_of_code_cache_queries="
1185 << total_number_of_code_cache_queries_ << '\n'
1186 << "ProfileSaver total_number_of_skipped_writes=" << total_number_of_skipped_writes_ << '\n'
1187 << "ProfileSaver total_number_of_failed_writes=" << total_number_of_failed_writes_ << '\n'
1188 << "ProfileSaver total_ms_of_sleep=" << total_ms_of_sleep_ << '\n'
1189 << "ProfileSaver total_ms_of_work=" << NsToMs(total_ns_of_work_) << '\n'
1190 << "ProfileSaver total_number_of_hot_spikes=" << total_number_of_hot_spikes_ << '\n'
1191 << "ProfileSaver total_number_of_wake_ups=" << total_number_of_wake_ups_ << '\n';
1192 }
1193
1194
ForceProcessProfiles()1195 void ProfileSaver::ForceProcessProfiles() {
1196 ProfileSaver* saver = nullptr;
1197 {
1198 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
1199 saver = instance_;
1200 }
1201 // TODO(calin): this is not actually thread safe as the instance_ may have been deleted,
1202 // but we only use this in testing when we now this won't happen.
1203 // Refactor the way we handle the instance so that we don't end up in this situation.
1204 if (saver != nullptr) {
1205 saver->ProcessProfilingInfo(/*force_save=*/ true, /*number_of_new_methods=*/ nullptr);
1206 }
1207 }
1208
ResolveTrackedLocations()1209 void ProfileSaver::ResolveTrackedLocations() {
1210 SafeMap<std::string, std::set<std::string>> locations_to_be_resolved;
1211 {
1212 // Make a copy so that we don't hold the lock while doing I/O.
1213 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
1214 locations_to_be_resolved = tracked_dex_base_locations_to_be_resolved_;
1215 tracked_dex_base_locations_to_be_resolved_.clear();
1216 }
1217
1218 // Resolve the locations.
1219 SafeMap<std::string, std::vector<std::string>> resolved_locations_map;
1220 for (const auto& it : locations_to_be_resolved) {
1221 const std::string& filename = it.first;
1222 const std::set<std::string>& locations = it.second;
1223 auto resolved_locations_it = resolved_locations_map.Put(filename, std::vector<std::string>());
1224 resolved_locations_it->second.reserve(locations.size());
1225
1226 for (const auto& location : locations) {
1227 UniqueCPtr<const char[]> location_real(realpath(location.c_str(), nullptr));
1228 // Note that it's ok if we cannot get the real path.
1229 if (location_real != nullptr) {
1230 resolved_locations_it->second.emplace_back(location_real.get());
1231 }
1232 }
1233 }
1234
1235 // Add the resolved locations to the tracked collection.
1236 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
1237 for (const auto& it : resolved_locations_map) {
1238 AddTrackedLocationsToMap(it.first, it.second, &tracked_dex_base_locations_);
1239 }
1240 }
1241
GetProfileSampleAnnotation()1242 ProfileCompilationInfo::ProfileSampleAnnotation ProfileSaver::GetProfileSampleAnnotation() {
1243 // Ideally, this would be cached in the ProfileSaver class, when we start the thread.
1244 // However the profile is initialized before the process package name is set and fixing this
1245 // would require unnecessary complex synchronizations.
1246 std::string package_name = Runtime::Current()->GetProcessPackageName();
1247 if (package_name.empty()) {
1248 package_name = "unknown";
1249 }
1250 // We only use annotation for the boot image profiles. Regular apps do not use the extra
1251 // metadata and as such there is no need to pay the cost (storage and computational)
1252 // that comes with the annotations.
1253 return options_.GetProfileBootClassPath()
1254 ? ProfileCompilationInfo::ProfileSampleAnnotation(package_name)
1255 : ProfileCompilationInfo::ProfileSampleAnnotation::kNone;
1256 }
1257
GetExtraMethodHotnessFlags(const ProfileSaverOptions & options)1258 uint32_t ProfileSaver::GetExtraMethodHotnessFlags(const ProfileSaverOptions& options) {
1259 // We only add the extra flags for the boot image profile because individual apps do not use
1260 // this information.
1261 if (options.GetProfileBootClassPath()) {
1262 return Is64BitInstructionSet(Runtime::Current()->GetInstructionSet())
1263 ? Hotness::kFlag64bit
1264 : Hotness::kFlag32bit;
1265 } else {
1266 return 0u;
1267 }
1268 }
1269
AnnotateSampleFlags(uint32_t flags)1270 Hotness::Flag ProfileSaver::AnnotateSampleFlags(uint32_t flags) {
1271 uint32_t extra_flags = GetExtraMethodHotnessFlags(options_);
1272 return static_cast<Hotness::Flag>(flags | extra_flags);
1273 }
1274
1275 } // namespace art
1276