1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_INSTRUMENTATION_H_ 18 #define ART_RUNTIME_INSTRUMENTATION_H_ 19 20 #include <stdint.h> 21 22 #include <functional> 23 #include <list> 24 #include <memory> 25 #include <optional> 26 #include <queue> 27 #include <unordered_set> 28 29 #include "arch/instruction_set.h" 30 #include "base/locks.h" 31 #include "base/macros.h" 32 #include "base/pointer_size.h" 33 #include "base/safe_map.h" 34 #include "gc_root.h" 35 #include "jvalue.h" 36 #include "offsets.h" 37 38 namespace art HIDDEN { 39 namespace mirror { 40 class Class; 41 class Object; 42 class Throwable; 43 } // namespace mirror 44 class ArtField; 45 class ArtMethod; 46 class Context; 47 template <typename T> class Handle; 48 template <typename T> class MutableHandle; 49 struct NthCallerVisitor; 50 union JValue; 51 class OatQuickMethodHeader; 52 class SHARED_LOCKABLE ReaderWriterMutex; 53 class ShadowFrame; 54 class Thread; 55 enum class DeoptimizationMethodType; 56 57 namespace instrumentation { 58 59 60 // Do we want to deoptimize for method entry and exit listeners or just try to intercept 61 // invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the 62 // application's performance. 63 static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true; 64 65 // an optional frame is either Some(const ShadowFrame& current_frame) or None depending on if the 66 // method being exited has a shadow-frame associed with the current stack frame. In cases where 67 // there is no shadow-frame associated with this stack frame this will be None. 68 using OptionalFrame = std::optional<std::reference_wrapper<const ShadowFrame>>; 69 70 // Instrumentation event listener API. Registered listeners will get the appropriate call back for 71 // the events they are listening for. The call backs supply the thread, method and dex_pc the event 72 // occurred upon. The thread may or may not be Thread::Current(). 73 struct InstrumentationListener { InstrumentationListenerInstrumentationListener74 InstrumentationListener() {} ~InstrumentationListenerInstrumentationListener75 virtual ~InstrumentationListener() {} 76 77 // Call-back for when a method is entered. 78 virtual void MethodEntered(Thread* thread, ArtMethod* method) 79 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 80 81 virtual void MethodExited(Thread* thread, 82 ArtMethod* method, 83 OptionalFrame frame, 84 MutableHandle<mirror::Object>& return_value) 85 REQUIRES_SHARED(Locks::mutator_lock_); 86 87 // Call-back for when a method is exited. The implementor should either handler-ize the return 88 // value (if appropriate) or use the alternate MethodExited callback instead if they need to 89 // go through a suspend point. 90 virtual void MethodExited(Thread* thread, 91 ArtMethod* method, 92 OptionalFrame frame, 93 JValue& return_value) 94 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 95 96 // Call-back for when a method is popped due to an exception throw. A method will either cause a 97 // MethodExited call-back or a MethodUnwind call-back when its activation is removed. 98 virtual void MethodUnwind(Thread* thread, 99 ArtMethod* method, 100 uint32_t dex_pc) 101 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 102 103 // Call-back for when the dex pc moves in a method. 104 virtual void DexPcMoved(Thread* thread, 105 Handle<mirror::Object> this_object, 106 ArtMethod* method, 107 uint32_t new_dex_pc) 108 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 109 110 // Call-back for when we read from a field. 111 virtual void FieldRead(Thread* thread, 112 Handle<mirror::Object> this_object, 113 ArtMethod* method, 114 uint32_t dex_pc, 115 ArtField* field) = 0; 116 117 virtual void FieldWritten(Thread* thread, 118 Handle<mirror::Object> this_object, 119 ArtMethod* method, 120 uint32_t dex_pc, 121 ArtField* field, 122 Handle<mirror::Object> field_value) 123 REQUIRES_SHARED(Locks::mutator_lock_); 124 125 // Call-back for when we write into a field. 126 virtual void FieldWritten(Thread* thread, 127 Handle<mirror::Object> this_object, 128 ArtMethod* method, 129 uint32_t dex_pc, 130 ArtField* field, 131 const JValue& field_value) 132 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 133 134 // Call-back when an exception is thrown. 135 virtual void ExceptionThrown(Thread* thread, 136 Handle<mirror::Throwable> exception_object) 137 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 138 139 // Call-back when an exception is caught/handled by java code. 140 virtual void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object) 141 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 142 143 // Call-back for when we execute a branch. 144 virtual void Branch(Thread* thread, 145 ArtMethod* method, 146 uint32_t dex_pc, 147 int32_t dex_pc_offset) 148 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 149 150 // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by 151 // either return or exceptions. Normally instrumentation listeners should ensure that there are 152 // shadow-frames by deoptimizing stacks. 153 virtual void WatchedFramePop([[maybe_unused]] Thread* thread, 154 [[maybe_unused]] const ShadowFrame& frame) 155 REQUIRES_SHARED(Locks::mutator_lock_) = 0; 156 }; 157 158 class Instrumentation; 159 // A helper to send instrumentation events while popping the stack in a safe way. 160 class InstrumentationStackPopper { 161 public: 162 explicit InstrumentationStackPopper(Thread* self); 163 ~InstrumentationStackPopper() REQUIRES_SHARED(Locks::mutator_lock_); 164 165 // Increase the number of frames being popped up to `stack_pointer`. Return true if the 166 // frames were popped without any exceptions, false otherwise. The exception that caused 167 // the pop is 'exception'. 168 bool PopFramesTo(uintptr_t stack_pointer, /*in-out*/MutableHandle<mirror::Throwable>& exception) 169 REQUIRES_SHARED(Locks::mutator_lock_); 170 171 private: 172 Thread* self_; 173 Instrumentation* instrumentation_; 174 // The stack pointer limit for frames to pop. 175 uintptr_t pop_until_; 176 }; 177 178 // Instrumentation is a catch-all for when extra information is required from the runtime. The 179 // typical use for instrumentation is for profiling and debugging. Instrumentation may add stubs 180 // to method entry and exit, it may also force execution to be switched to the interpreter and 181 // trigger deoptimization. 182 class Instrumentation { 183 public: 184 enum InstrumentationEvent { 185 kMethodEntered = 0x1, 186 kMethodExited = 0x2, 187 kMethodUnwind = 0x4, 188 kDexPcMoved = 0x8, 189 kFieldRead = 0x10, 190 kFieldWritten = 0x20, 191 kExceptionThrown = 0x40, 192 kBranch = 0x80, 193 kWatchedFramePop = 0x200, 194 kExceptionHandled = 0x400, 195 }; 196 197 enum class InstrumentationLevel { 198 kInstrumentNothing, // execute without instrumentation 199 kInstrumentWithEntryExitHooks, // execute with entry/exit hooks 200 kInstrumentWithInterpreter // execute with interpreter 201 }; 202 203 static constexpr uint8_t kFastTraceListeners = 0b01; 204 static constexpr uint8_t kSlowMethodEntryExitListeners = 0b10; 205 206 Instrumentation(); 207 RunExitHooksOffset()208 static constexpr MemberOffset RunExitHooksOffset() { 209 // Assert that run_entry_exit_hooks_ is 8bits wide. If the size changes 210 // update the compare instructions in the code generator when generating checks for 211 // MethodEntryExitHooks. 212 static_assert(sizeof(run_exit_hooks_) == 1, "run_exit_hooks_ isn't expected size"); 213 return MemberOffset(OFFSETOF_MEMBER(Instrumentation, run_exit_hooks_)); 214 } 215 HaveMethodEntryListenersOffset()216 static constexpr MemberOffset HaveMethodEntryListenersOffset() { 217 // Assert that have_method_entry_listeners_ is 8bits wide. If the size changes 218 // update the compare instructions in the code generator when generating checks for 219 // MethodEntryExitHooks. 220 static_assert(sizeof(have_method_entry_listeners_) == 1, 221 "have_method_entry_listeners_ isn't expected size"); 222 return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_entry_listeners_)); 223 } 224 HaveMethodExitListenersOffset()225 static constexpr MemberOffset HaveMethodExitListenersOffset() { 226 // Assert that have_method_exit_slow_listeners_ is 8bits wide. If the size changes 227 // update the compare instructions in the code generator when generating checks for 228 // MethodEntryExitHooks. 229 static_assert(sizeof(have_method_exit_listeners_) == 1, 230 "have_method_exit_listeners_ isn't expected size"); 231 return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_exit_listeners_)); 232 } 233 234 // Add a listener to be notified of the masked together sent of instrumentation events. This 235 // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy 236 // for saying you should have suspended all threads (installing stubs while threads are running 237 // will break). 238 EXPORT void AddListener(InstrumentationListener* listener, 239 uint32_t events, 240 bool is_trace_listener = false) 241 REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 242 243 // Removes listeners for the specified events. 244 EXPORT void RemoveListener(InstrumentationListener* listener, 245 uint32_t events, 246 bool is_trace_listener = false) 247 REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 248 249 // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs. 250 // try_switch_to_non_debuggable specifies if we can switch the runtime back to non-debuggable. 251 // When a debugger is attached to a non-debuggable app, we switch the runtime to debuggable and 252 // when we are detaching the debugger we move back to non-debuggable. If we are disabling 253 // deoptimization for other reasons (ex: removing the last breakpoint) while the debugger is still 254 // connected, we pass false to stay in debuggable. Switching runtimes is expensive so we only want 255 // to switch when we know debug features aren't needed anymore. 256 EXPORT void DisableDeoptimization(const char* key, bool try_switch_to_non_debuggable) 257 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_); 258 259 // Enables entry exit hooks support. This is called in preparation for debug requests that require 260 // calling method entry / exit hooks. 261 EXPORT void EnableEntryExitHooks(const char* key) 262 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_); 263 AreAllMethodsDeoptimized()264 bool AreAllMethodsDeoptimized() const { 265 return InterpreterStubsInstalled(); 266 } 267 bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_); 268 269 // Executes everything with interpreter. 270 EXPORT void DeoptimizeEverything(const char* key) 271 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_) 272 REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 273 274 // Executes everything with compiled code (or interpreter if there is no code). May visit class 275 // linker classes through ConfigureStubs. 276 EXPORT void UndeoptimizeEverything(const char* key) 277 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_) 278 REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 279 280 // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static 281 // method (except a class initializer) set to the resolution trampoline will be deoptimized only 282 // once its declaring class is initialized. 283 EXPORT void Deoptimize(ArtMethod* method) 284 REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_); 285 286 // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method 287 // (except a class initializer) set to the resolution trampoline will be updated only once its 288 // declaring class is initialized. 289 EXPORT void Undeoptimize(ArtMethod* method) 290 REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_); 291 292 // Indicates whether the method has been deoptimized so it is executed with the interpreter. 293 EXPORT bool IsDeoptimized(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 294 295 // Indicates if any method needs to be deoptimized. This is used to avoid walking the stack to 296 // determine if a deoptimization is required. 297 bool IsDeoptimizedMethodsEmpty() const REQUIRES_SHARED(Locks::mutator_lock_); 298 299 // Enable method tracing by installing instrumentation entry/exit stubs or interpreter. 300 EXPORT void EnableMethodTracing( 301 const char* key, 302 InstrumentationListener* listener, 303 bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners) 304 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_) 305 REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 306 307 // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter. 308 EXPORT void DisableMethodTracing(const char* key) 309 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_) 310 REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 311 312 void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_); 313 void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_); 314 void InstrumentQuickAllocEntryPointsLocked() 315 REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_, 316 !Locks::runtime_shutdown_lock_); 317 void UninstrumentQuickAllocEntryPointsLocked() 318 REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_, 319 !Locks::runtime_shutdown_lock_); 320 void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_); 321 322 // Returns a string representation of the given entry point. 323 static std::string EntryPointString(const void* code); 324 325 // Initialize the entrypoint of the method .`aot_code` is the AOT code. 326 EXPORT void InitializeMethodsCode(ArtMethod* method, const void* aot_code) 327 REQUIRES_SHARED(Locks::mutator_lock_); 328 329 // Update the code of a method respecting any installed stubs. 330 void UpdateMethodsCode(ArtMethod* method, const void* new_code) 331 REQUIRES_SHARED(Locks::mutator_lock_); 332 333 // Update the code of a native method to a JITed stub. 334 void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* new_code) 335 REQUIRES_SHARED(Locks::mutator_lock_); 336 337 // Return the code that we can execute for an invoke including from the JIT. 338 EXPORT const void* GetCodeForInvoke(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 339 340 // Return the code that we can execute considering the current instrumentation level. 341 // If interpreter stubs are installed return interpreter bridge. If the entry exit stubs 342 // are installed return an instrumentation entry point. Otherwise, return the code that 343 // can be executed including from the JIT. 344 const void* GetMaybeInstrumentedCodeForInvoke(ArtMethod* method) 345 REQUIRES_SHARED(Locks::mutator_lock_); 346 ForceInterpretOnly()347 void ForceInterpretOnly() { 348 forced_interpret_only_ = true; 349 } 350 EntryExitStubsInstalled()351 bool EntryExitStubsInstalled() const { 352 return instrumentation_level_ == InstrumentationLevel::kInstrumentWithEntryExitHooks || 353 instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter; 354 } 355 InterpreterStubsInstalled()356 bool InterpreterStubsInstalled() const { 357 return instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter; 358 } 359 360 // Called by ArtMethod::Invoke to determine dispatch mechanism. InterpretOnly()361 bool InterpretOnly() const { 362 return forced_interpret_only_ || InterpreterStubsInstalled(); 363 } 364 bool InterpretOnly(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 365 IsForcedInterpretOnly()366 bool IsForcedInterpretOnly() const { 367 return forced_interpret_only_; 368 } 369 RunExitHooks()370 bool RunExitHooks() const { 371 return run_exit_hooks_; 372 } 373 HasMethodEntryListeners()374 bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 375 return have_method_entry_listeners_ != 0; 376 } 377 HasMethodExitListeners()378 bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 379 return have_method_exit_listeners_ != 0; 380 } 381 HasFastMethodEntryListenersOnly()382 bool HasFastMethodEntryListenersOnly() const REQUIRES_SHARED(Locks::mutator_lock_) { 383 return have_method_entry_listeners_ == kFastTraceListeners; 384 } 385 HasFastMethodExitListenersOnly()386 bool HasFastMethodExitListenersOnly() const REQUIRES_SHARED(Locks::mutator_lock_) { 387 return have_method_exit_listeners_ == kFastTraceListeners; 388 } 389 HasMethodUnwindListeners()390 bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 391 return have_method_unwind_listeners_; 392 } 393 HasDexPcListeners()394 bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 395 return have_dex_pc_listeners_; 396 } 397 HasFieldReadListeners()398 bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 399 return have_field_read_listeners_; 400 } 401 HasFieldWriteListeners()402 bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 403 return have_field_write_listeners_; 404 } 405 HasExceptionThrownListeners()406 bool HasExceptionThrownListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 407 return have_exception_thrown_listeners_; 408 } 409 HasBranchListeners()410 bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 411 return have_branch_listeners_; 412 } 413 HasWatchedFramePopListeners()414 bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 415 return have_watched_frame_pop_listeners_; 416 } 417 HasExceptionHandledListeners()418 bool HasExceptionHandledListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 419 return have_exception_handled_listeners_; 420 } 421 422 // Returns if dex pc events need to be reported for the specified method. 423 // These events are reported when DexPCListeners are installed and at least one of the 424 // following conditions hold: 425 // 1. The method is deoptimized. This is done when there is a breakpoint on method. 426 // 2. When the thread is deoptimized. This is used when single stepping a single thread. 427 // 3. When interpreter stubs are installed. In this case no additional information is maintained 428 // about which methods need dex pc move events. This is usually used for features which need 429 // them for several methods across threads or need expensive processing. So it is OK to not 430 // further optimize this case. 431 // DexPCListeners are installed when there is a breakpoint on any method / single stepping 432 // on any of thread. These are removed when the last breakpoint was removed. See AddListener and 433 // RemoveListener for more details. 434 bool NeedsDexPcEvents(ArtMethod* method, Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_); 435 NeedsSlowInterpreterForListeners()436 bool NeedsSlowInterpreterForListeners() const REQUIRES_SHARED(Locks::mutator_lock_) { 437 return have_field_read_listeners_ || 438 have_field_write_listeners_ || 439 have_watched_frame_pop_listeners_ || 440 have_exception_handled_listeners_; 441 } 442 443 // Inform listeners that a method has been entered. A dex PC is provided as we may install 444 // listeners into executing code and get method enter events for methods already on the stack. MethodEnterEvent(Thread * thread,ArtMethod * method)445 void MethodEnterEvent(Thread* thread, ArtMethod* method) const 446 REQUIRES_SHARED(Locks::mutator_lock_) { 447 if (UNLIKELY(HasMethodEntryListeners())) { 448 MethodEnterEventImpl(thread, method); 449 } 450 } 451 452 // Inform listeners that a method has been exited. 453 template<typename T> MethodExitEvent(Thread * thread,ArtMethod * method,OptionalFrame frame,T & return_value)454 void MethodExitEvent(Thread* thread, 455 ArtMethod* method, 456 OptionalFrame frame, 457 T& return_value) const 458 REQUIRES_SHARED(Locks::mutator_lock_) { 459 if (UNLIKELY(HasMethodExitListeners())) { 460 MethodExitEventImpl(thread, method, frame, return_value); 461 } 462 } 463 464 // Inform listeners that a method has been exited due to an exception. 465 void MethodUnwindEvent(Thread* thread, 466 ArtMethod* method, 467 uint32_t dex_pc) const 468 REQUIRES_SHARED(Locks::mutator_lock_); 469 470 // Inform listeners that the dex pc has moved (only supported by the interpreter). DexPcMovedEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc)471 void DexPcMovedEvent(Thread* thread, 472 ObjPtr<mirror::Object> this_object, 473 ArtMethod* method, 474 uint32_t dex_pc) const 475 REQUIRES_SHARED(Locks::mutator_lock_) { 476 if (UNLIKELY(HasDexPcListeners())) { 477 DexPcMovedEventImpl(thread, this_object, method, dex_pc); 478 } 479 } 480 481 // Inform listeners that a branch has been taken (only supported by the interpreter). Branch(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t offset)482 void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const 483 REQUIRES_SHARED(Locks::mutator_lock_) { 484 if (UNLIKELY(HasBranchListeners())) { 485 BranchImpl(thread, method, dex_pc, offset); 486 } 487 } 488 489 // Inform listeners that we read a field (only supported by the interpreter). FieldReadEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)490 void FieldReadEvent(Thread* thread, 491 ObjPtr<mirror::Object> this_object, 492 ArtMethod* method, 493 uint32_t dex_pc, 494 ArtField* field) const 495 REQUIRES_SHARED(Locks::mutator_lock_) { 496 if (UNLIKELY(HasFieldReadListeners())) { 497 FieldReadEventImpl(thread, this_object, method, dex_pc, field); 498 } 499 } 500 501 // Inform listeners that we write a field (only supported by the interpreter). FieldWriteEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)502 void FieldWriteEvent(Thread* thread, 503 ObjPtr<mirror::Object> this_object, 504 ArtMethod* method, 505 uint32_t dex_pc, 506 ArtField* field, 507 const JValue& field_value) const 508 REQUIRES_SHARED(Locks::mutator_lock_) { 509 if (UNLIKELY(HasFieldWriteListeners())) { 510 FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value); 511 } 512 } 513 514 // Inform listeners that a branch has been taken (only supported by the interpreter). WatchedFramePopped(Thread * thread,const ShadowFrame & frame)515 void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const 516 REQUIRES_SHARED(Locks::mutator_lock_) { 517 if (UNLIKELY(HasWatchedFramePopListeners())) { 518 WatchedFramePopImpl(thread, frame); 519 } 520 } 521 522 // Inform listeners that an exception was thrown. 523 void ExceptionThrownEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const 524 REQUIRES_SHARED(Locks::mutator_lock_); 525 526 // Inform listeners that an exception has been handled. This is not sent for native code or for 527 // exceptions which reach the end of the thread's stack. 528 void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const 529 REQUIRES_SHARED(Locks::mutator_lock_); 530 531 JValue GetReturnValue(ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result) 532 REQUIRES_SHARED(Locks::mutator_lock_); 533 bool PushDeoptContextIfNeeded(Thread* self, 534 DeoptimizationMethodType deopt_type, 535 bool is_ref, 536 const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_); 537 538 // Deoptimize upon pending exception or if the caller requires it. Returns a long jump context if 539 // a deoptimization is needed and taken. 540 std::unique_ptr<Context> DeoptimizeIfNeeded(Thread* self, 541 ArtMethod** sp, 542 DeoptimizationMethodType type, 543 JValue result, 544 bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_); 545 // This returns if the caller of runtime method requires a deoptimization. This checks both if the 546 // method requires a deopt or if this particular frame needs a deopt because of a class 547 // redefinition. 548 bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_); 549 bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp, size_t frame_size) 550 REQUIRES_SHARED(Locks::mutator_lock_); 551 // This returns if the specified method requires a deoptimization. This doesn't account if a stack 552 // frame involving this method requires a deoptimization. 553 bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method) 554 REQUIRES_SHARED(Locks::mutator_lock_); 555 556 DeoptimizationMethodType GetDeoptimizationMethodType(ArtMethod* method) 557 REQUIRES_SHARED(Locks::mutator_lock_); 558 559 // Call back for configure stubs. 560 void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); 561 562 void InstallStubsForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 563 564 EXPORT void UpdateEntrypointsForDebuggable() REQUIRES(art::Locks::mutator_lock_); 565 566 // Install instrumentation exit stub on every method of the stack of the given thread. 567 // This is used by: 568 // - the debugger to cause a deoptimization of the all frames in thread's stack (for 569 // example, after updating local variables) 570 // - to call method entry / exit hooks for tracing. For this we instrument 571 // the stack frame to run entry / exit hooks but we don't need to deoptimize. 572 // force_deopt indicates whether the frames need to deoptimize or not. 573 EXPORT void InstrumentThreadStack(Thread* thread, bool force_deopt) 574 REQUIRES(Locks::mutator_lock_); 575 void InstrumentAllThreadStacks(bool force_deopt) REQUIRES(Locks::mutator_lock_) 576 REQUIRES(!Locks::thread_list_lock_); 577 578 // Force all currently running frames to be deoptimized back to interpreter. This should only be 579 // used in cases where basically all compiled code has been invalidated. 580 EXPORT void DeoptimizeAllThreadFrames() REQUIRES(art::Locks::mutator_lock_); 581 582 static size_t ComputeFrameId(Thread* self, 583 size_t frame_depth, 584 size_t inlined_frames_before_frame) 585 REQUIRES_SHARED(Locks::mutator_lock_); 586 587 // Does not hold lock, used to check if someone changed from not instrumented to instrumented 588 // during a GC suspend point. AllocEntrypointsInstrumented()589 bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) { 590 return alloc_entrypoints_instrumented_; 591 } 592 593 bool ProcessMethodUnwindCallbacks(Thread* self, 594 std::queue<ArtMethod*>& methods, 595 MutableHandle<mirror::Throwable>& exception) 596 REQUIRES_SHARED(Locks::mutator_lock_); 597 598 EXPORT InstrumentationLevel GetCurrentInstrumentationLevel() const; 599 600 bool MethodSupportsExitEvents(ArtMethod* method, const OatQuickMethodHeader* header) 601 REQUIRES_SHARED(Locks::mutator_lock_); 602 603 private: 604 // Update the current instrumentation_level_. 605 void UpdateInstrumentationLevel(InstrumentationLevel level); 606 607 // Does the job of installing or removing instrumentation code within methods. 608 // In order to support multiple clients using instrumentation at the same time, 609 // the caller must pass a unique key (a string) identifying it so we remind which 610 // instrumentation level it needs. Therefore the current instrumentation level 611 // becomes the highest instrumentation level required by a client. 612 void ConfigureStubs(const char* key, 613 InstrumentationLevel desired_instrumentation_level, 614 bool try_switch_to_non_debuggable) 615 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_) 616 REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 617 void UpdateStubs(bool try_switch_to_non_debuggable) 618 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_) 619 REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); 620 621 // If there are no pending deoptimizations restores the stack to the normal state by updating the 622 // return pcs to actual return addresses from the instrumentation stack and clears the 623 // instrumentation stack. 624 void MaybeRestoreInstrumentationStack() REQUIRES(Locks::mutator_lock_); 625 626 // Switches the runtime state to non-java debuggable if entry / exit hooks are no longer required 627 // and the runtime did not start off as java debuggable. 628 void MaybeSwitchRuntimeDebugState(Thread* self) 629 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_); 630 631 // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring 632 // exclusive access to mutator lock which you can't get if the runtime isn't started. 633 void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS; 634 635 void MethodEnterEventImpl(Thread* thread, ArtMethod* method) const 636 REQUIRES_SHARED(Locks::mutator_lock_); 637 template <typename T> 638 void MethodExitEventImpl(Thread* thread, 639 ArtMethod* method, 640 OptionalFrame frame, 641 T& return_value) const 642 REQUIRES_SHARED(Locks::mutator_lock_); 643 void DexPcMovedEventImpl(Thread* thread, 644 ObjPtr<mirror::Object> this_object, 645 ArtMethod* method, 646 uint32_t dex_pc) const 647 REQUIRES_SHARED(Locks::mutator_lock_); 648 void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const 649 REQUIRES_SHARED(Locks::mutator_lock_); 650 void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const 651 REQUIRES_SHARED(Locks::mutator_lock_); 652 void FieldReadEventImpl(Thread* thread, 653 ObjPtr<mirror::Object> this_object, 654 ArtMethod* method, 655 uint32_t dex_pc, 656 ArtField* field) const 657 REQUIRES_SHARED(Locks::mutator_lock_); 658 void FieldWriteEventImpl(Thread* thread, 659 ObjPtr<mirror::Object> this_object, 660 ArtMethod* method, 661 uint32_t dex_pc, 662 ArtField* field, 663 const JValue& field_value) const 664 REQUIRES_SHARED(Locks::mutator_lock_); 665 666 // Read barrier-aware utility functions for accessing deoptimized_methods_ 667 bool AddDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_); 668 bool IsDeoptimizedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); 669 bool RemoveDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_); 670 void UpdateMethodsCodeImpl(ArtMethod* method, const void* new_code) 671 REQUIRES_SHARED(Locks::mutator_lock_); 672 673 // We need to run method exit hooks for two reasons: 674 // 1. When method exit listeners are installed 675 // 2. When we need to check if the caller of this method needs a deoptimization. This is needed 676 // only for deoptimizing the currently active invocations on stack when we deoptimize a method or 677 // invalidate the JITed code when redefining the classes. So future invocations don't need to do 678 // this check. 679 // 680 // For JITed code of non-native methods we already have a stack slot reserved for deoptimizing 681 // on demand and we use that stack slot to check if the caller needs a deoptimization. JITed code 682 // checks if there are any method exit listeners or if the stack slot is set to determine if 683 // method exit hooks need to be executed. 684 // 685 // For JITed JNI stubs there is no reserved stack slot for this and we just use this variable to 686 // check if we need to run method entry / exit hooks. This variable would be set when either of 687 // the above conditions are true. If we need method exit hooks only for case 2, we would call exit 688 // hooks for any future invocations which aren't necessary. 689 // QuickToInterpreterBridge and GenericJniStub also use this for same reasons. 690 // If calling entry / exit hooks becomes expensive we could do the same optimization we did for 691 // JITed code by having a reserved stack slot. 692 bool run_exit_hooks_; 693 694 // The required level of instrumentation. This could be one of the following values: 695 // kInstrumentNothing: no instrumentation support is needed 696 // kInstrumentWithEntryExitHooks: needs support to call method entry/exit stubs. 697 // kInstrumentWithInterpreter: only execute with interpreter 698 Instrumentation::InstrumentationLevel instrumentation_level_; 699 700 // Did the runtime request we only run in the interpreter? ie -Xint mode. 701 bool forced_interpret_only_; 702 703 // For method entry / exit events, we maintain fast trace listeners in a separate list to make 704 // implementation of fast trace listeners more efficient by JITing the code to handle fast trace 705 // events. We use a uint8_t (and not bool) to encode if there are none / fast / slow listeners. 706 // Do we have any listeners for method entry events. 707 uint8_t have_method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_); 708 709 // Do we have any listeners for method exit events. 710 uint8_t have_method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_); 711 712 // Do we have any listeners for method unwind events? 713 bool have_method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_); 714 715 // Do we have any listeners for dex move events? 716 bool have_dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_); 717 718 // Do we have any listeners for field read events? 719 bool have_field_read_listeners_ GUARDED_BY(Locks::mutator_lock_); 720 721 // Do we have any listeners for field write events? 722 bool have_field_write_listeners_ GUARDED_BY(Locks::mutator_lock_); 723 724 // Do we have any exception thrown listeners? 725 bool have_exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_); 726 727 // Do we have any frame pop listeners? 728 bool have_watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_); 729 730 // Do we have any branch listeners? 731 bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_); 732 733 // Do we have any exception handled listeners? 734 bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_); 735 736 // Contains the instrumentation level required by each client of the instrumentation identified 737 // by a string key. 738 using InstrumentationLevelTable = SafeMap<const char*, InstrumentationLevel>; 739 InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_); 740 741 // The event listeners, written to with the mutator_lock_ exclusively held. 742 // Mutators must be able to iterate over these lists concurrently, that is, with listeners being 743 // added or removed while iterating. The modifying thread holds exclusive lock, 744 // so other threads cannot iterate (i.e. read the data of the list) at the same time but they 745 // do keep iterators that need to remain valid. This is the reason these listeners are std::list 746 // and not for example std::vector: the existing storage for a std::list does not move. 747 // Note that mutators cannot make a copy of these lists before iterating, as the instrumentation 748 // listeners can also be deleted concurrently. 749 // As a result, these lists are never trimmed. That's acceptable given the low number of 750 // listeners we have. 751 std::list<InstrumentationListener*> method_entry_slow_listeners_ GUARDED_BY(Locks::mutator_lock_); 752 std::list<InstrumentationListener*> method_entry_fast_trace_listeners_ 753 GUARDED_BY(Locks::mutator_lock_); 754 std::list<InstrumentationListener*> method_exit_slow_listeners_ GUARDED_BY(Locks::mutator_lock_); 755 std::list<InstrumentationListener*> method_exit_fast_trace_listeners_ 756 GUARDED_BY(Locks::mutator_lock_); 757 std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_); 758 std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_); 759 std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_); 760 std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_); 761 std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_); 762 std::list<InstrumentationListener*> exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_); 763 std::list<InstrumentationListener*> watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_); 764 std::list<InstrumentationListener*> exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_); 765 766 // The set of methods being deoptimized (by the debugger) which must be executed with interpreter 767 // only. 768 std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(Locks::mutator_lock_); 769 770 // Current interpreter handler table. This is updated each time the thread state flags are 771 // modified. 772 773 // Greater than 0 if quick alloc entry points instrumented. 774 size_t quick_alloc_entry_points_instrumentation_counter_; 775 776 // alloc_entrypoints_instrumented_ is only updated with all the threads suspended, this is done 777 // to prevent races with the GC where the GC relies on thread suspension only see 778 // alloc_entrypoints_instrumented_ change during suspend points. 779 bool alloc_entrypoints_instrumented_; 780 781 friend class InstrumentationTest; // For GetCurrentInstrumentationLevel and ConfigureStubs. 782 friend class InstrumentationStackPopper; // For popping instrumentation frames. 783 friend void InstrumentationInstallStack(Thread*, bool); 784 785 DISALLOW_COPY_AND_ASSIGN(Instrumentation); 786 }; 787 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationEvent rhs); 788 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationLevel rhs); 789 790 } // namespace instrumentation 791 } // namespace art 792 793 #endif // ART_RUNTIME_INSTRUMENTATION_H_ 794