xref: /aosp_15_r20/art/runtime/interpreter/mterp/nterp.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * Mterp entry point and support functions.
19  */
20 #include "nterp.h"
21 
22 #include "arch/instruction_set.h"
23 #include "base/quasi_atomic.h"
24 #include "class_linker-inl.h"
25 #include "dex/dex_instruction_utils.h"
26 #include "debugger.h"
27 #include "entrypoints/entrypoint_utils-inl.h"
28 #include "interpreter/interpreter_cache-inl.h"
29 #include "interpreter/interpreter_common.h"
30 #include "interpreter/shadow_frame-inl.h"
31 #include "mirror/string-alloc-inl.h"
32 #include "nterp_helpers.h"
33 
34 namespace art HIDDEN {
35 namespace interpreter {
36 
IsNterpSupported()37 bool IsNterpSupported() {
38   switch (kRuntimeQuickCodeISA) {
39     case InstructionSet::kArm:
40     case InstructionSet::kThumb2:
41     case InstructionSet::kArm64:
42       return kReserveMarkingRegister && !kUseTableLookupReadBarrier;
43     case InstructionSet::kRiscv64:
44       return true;
45     case InstructionSet::kX86:
46     case InstructionSet::kX86_64:
47       return !kUseTableLookupReadBarrier;
48     default:
49       return false;
50   }
51 }
52 
CanRuntimeUseNterp()53 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
54   Runtime* runtime = Runtime::Current();
55   instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
56   // If the runtime is interpreter only, we currently don't use nterp as some
57   // parts of the runtime (like instrumentation) make assumption on an
58   // interpreter-only runtime to always be in a switch-like interpreter.
59   return IsNterpSupported() && !runtime->IsJavaDebuggable() && !instr->EntryExitStubsInstalled() &&
60          !instr->InterpretOnly() && !runtime->IsAotCompiler() &&
61          !instr->NeedsSlowInterpreterForListeners() &&
62          // An async exception has been thrown. We need to go to the switch interpreter. nterp
63          // doesn't know how to deal with these so we could end up never dealing with it if we are
64          // in an infinite loop.
65          !runtime->AreAsyncExceptionsThrown() &&
66          (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
67 }
68 
69 // The entrypoint for nterp, which ArtMethods can directly point to.
70 extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
71 extern "C" void EndExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
72 
GetNterpEntryPoint()73 const void* GetNterpEntryPoint() {
74   return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
75 }
76 
NterpImpl()77 ArrayRef<const uint8_t> NterpImpl() {
78   const uint8_t* entry_point = reinterpret_cast<const uint8_t*>(ExecuteNterpImpl);
79   size_t size = reinterpret_cast<const uint8_t*>(EndExecuteNterpImpl) - entry_point;
80   const uint8_t* code = reinterpret_cast<const uint8_t*>(EntryPointToCodePointer(entry_point));
81   return ArrayRef<const uint8_t>(code, size);
82 }
83 
84 // Another entrypoint, which does a clinit check at entry.
85 extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
86 extern "C" void EndExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
87 
GetNterpWithClinitEntryPoint()88 const void* GetNterpWithClinitEntryPoint() {
89   return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
90 }
91 
NterpWithClinitImpl()92 ArrayRef<const uint8_t> NterpWithClinitImpl() {
93   const uint8_t* entry_point = reinterpret_cast<const uint8_t*>(ExecuteNterpWithClinitImpl);
94   size_t size = reinterpret_cast<const uint8_t*>(EndExecuteNterpWithClinitImpl) - entry_point;
95   const uint8_t* code = reinterpret_cast<const uint8_t*>(EntryPointToCodePointer(entry_point));
96   return ArrayRef<const uint8_t>(code, size);
97 }
98 
99 /*
100  * Verify some constants used by the nterp interpreter.
101  */
CheckNterpAsmConstants()102 void CheckNterpAsmConstants() {
103   /*
104    * If we're using computed goto instruction transitions, make sure
105    * none of the handlers overflows the byte limit.  This won't tell
106    * which one did, but if any one is too big the total size will
107    * overflow.
108    */
109   constexpr size_t width = kNterpHandlerSize;
110   ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
111                           reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
112   static_assert(kNumPackedOpcodes * width != 0);
113   if (interp_size != kNumPackedOpcodes * width) {
114     LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
115                << "(did an instruction handler exceed " << width << " bytes?)";
116   }
117 }
118 
UpdateHotness(ArtMethod * method)119 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
120   // The hotness we will add to a method when we perform a
121   // field/method/class/string lookup.
122   method->UpdateCounter(0xf);
123 }
124 
125 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T value)126 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T value) {
127   self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
128 }
129 
130 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T * value)131 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T* value) {
132   UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
133 }
134 
135 #ifdef __arm__
136 
NterpStoreArm32Fprs(const char * shorty,uint32_t * registers,uint32_t * stack_args,const uint32_t * fprs)137 extern "C" void NterpStoreArm32Fprs(const char* shorty,
138                                     uint32_t* registers,
139                                     uint32_t* stack_args,
140                                     const uint32_t* fprs) {
141   // Note `shorty` has already the returned type removed.
142   ScopedAssertNoThreadSuspension sants("In nterp");
143   uint32_t arg_index = 0;
144   uint32_t fpr_double_index = 0;
145   uint32_t fpr_index = 0;
146   for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
147     char arg_type = shorty[shorty_index];
148     switch (arg_type) {
149       case 'D': {
150         // Double should not overlap with float.
151         fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
152         if (fpr_double_index < 16) {
153           registers[arg_index] = fprs[fpr_double_index++];
154           registers[arg_index + 1] = fprs[fpr_double_index++];
155         } else {
156           registers[arg_index] = stack_args[arg_index];
157           registers[arg_index + 1] = stack_args[arg_index + 1];
158         }
159         arg_index += 2;
160         break;
161       }
162       case 'F': {
163         if (fpr_index % 2 == 0) {
164           fpr_index = std::max(fpr_double_index, fpr_index);
165         }
166         if (fpr_index < 16) {
167           registers[arg_index] = fprs[fpr_index++];
168         } else {
169           registers[arg_index] = stack_args[arg_index];
170         }
171         arg_index++;
172         break;
173       }
174       case 'J': {
175         arg_index += 2;
176         break;
177       }
178       default: {
179         arg_index++;
180         break;
181       }
182     }
183   }
184 }
185 
NterpSetupArm32Fprs(const char * shorty,uint32_t dex_register,uint32_t stack_index,uint32_t * fprs,uint32_t * registers,uint32_t * stack_args)186 extern "C" void NterpSetupArm32Fprs(const char* shorty,
187                                     uint32_t dex_register,
188                                     uint32_t stack_index,
189                                     uint32_t* fprs,
190                                     uint32_t* registers,
191                                     uint32_t* stack_args) {
192   // Note `shorty` has already the returned type removed.
193   ScopedAssertNoThreadSuspension sants("In nterp");
194   uint32_t fpr_double_index = 0;
195   uint32_t fpr_index = 0;
196   for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
197     char arg_type = shorty[shorty_index];
198     switch (arg_type) {
199       case 'D': {
200         // Double should not overlap with float.
201         fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
202         if (fpr_double_index < 16) {
203           fprs[fpr_double_index++] = registers[dex_register++];
204           fprs[fpr_double_index++] = registers[dex_register++];
205           stack_index += 2;
206         } else {
207           stack_args[stack_index++] = registers[dex_register++];
208           stack_args[stack_index++] = registers[dex_register++];
209         }
210         break;
211       }
212       case 'F': {
213         if (fpr_index % 2 == 0) {
214           fpr_index = std::max(fpr_double_index, fpr_index);
215         }
216         if (fpr_index < 16) {
217           fprs[fpr_index++] = registers[dex_register++];
218           stack_index++;
219         } else {
220           stack_args[stack_index++] = registers[dex_register++];
221         }
222         break;
223       }
224       case 'J': {
225         stack_index += 2;
226         dex_register += 2;
227         break;
228       }
229       default: {
230         stack_index++;
231         dex_register++;
232         break;
233       }
234     }
235   }
236 }
237 
238 #endif
239 
NterpGetCodeItem(ArtMethod * method)240 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
241     REQUIRES_SHARED(Locks::mutator_lock_) {
242   ScopedAssertNoThreadSuspension sants("In nterp");
243   return method->GetCodeItem();
244 }
245 
NterpGetShorty(ArtMethod * method)246 extern "C" const char* NterpGetShorty(ArtMethod* method)
247     REQUIRES_SHARED(Locks::mutator_lock_) {
248   ScopedAssertNoThreadSuspension sants("In nterp");
249   return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
250 }
251 
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)252 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
253     REQUIRES_SHARED(Locks::mutator_lock_) {
254   ScopedAssertNoThreadSuspension sants("In nterp");
255   return caller->GetDexFile()->GetMethodShorty(method_index);
256 }
257 
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)258 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
259     REQUIRES_SHARED(Locks::mutator_lock_) {
260   ScopedAssertNoThreadSuspension sants("In nterp");
261   const Instruction* inst = Instruction::At(dex_pc_ptr);
262   dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
263       ? inst->VRegH_45cc()
264       : inst->VRegH_4rcc());
265   return caller->GetDexFile()->GetShorty(proto_idx);
266 }
267 
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)268 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
269     REQUIRES_SHARED(Locks::mutator_lock_) {
270   ScopedAssertNoThreadSuspension sants("In nterp");
271   const Instruction* inst = Instruction::At(dex_pc_ptr);
272   uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
273       ? inst->VRegB_35c()
274       : inst->VRegB_3rc());
275   const DexFile* dex_file = caller->GetDexFile();
276   dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
277   return dex_file->GetShorty(proto_idx);
278 }
279 
280 static constexpr uint8_t kInvalidInvokeType = 255u;
281 static_assert(static_cast<uint8_t>(kMaxInvokeType) < kInvalidInvokeType);
282 
GetOpcodeInvokeType(uint8_t opcode)283 static constexpr uint8_t GetOpcodeInvokeType(uint8_t opcode) {
284   switch (opcode) {
285     case Instruction::INVOKE_DIRECT:
286     case Instruction::INVOKE_DIRECT_RANGE:
287       return static_cast<uint8_t>(kDirect);
288     case Instruction::INVOKE_INTERFACE:
289     case Instruction::INVOKE_INTERFACE_RANGE:
290       return static_cast<uint8_t>(kInterface);
291     case Instruction::INVOKE_STATIC:
292     case Instruction::INVOKE_STATIC_RANGE:
293       return static_cast<uint8_t>(kStatic);
294     case Instruction::INVOKE_SUPER:
295     case Instruction::INVOKE_SUPER_RANGE:
296       return static_cast<uint8_t>(kSuper);
297     case Instruction::INVOKE_VIRTUAL:
298     case Instruction::INVOKE_VIRTUAL_RANGE:
299       return static_cast<uint8_t>(kVirtual);
300 
301     default:
302       return kInvalidInvokeType;
303   }
304 }
305 
GenerateOpcodeInvokeTypes()306 static constexpr std::array<uint8_t, 256u> GenerateOpcodeInvokeTypes() {
307   std::array<uint8_t, 256u> opcode_invoke_types{};
308   for (size_t opcode = 0u; opcode != opcode_invoke_types.size(); ++opcode) {
309     opcode_invoke_types[opcode] = GetOpcodeInvokeType(opcode);
310   }
311   return opcode_invoke_types;
312 }
313 
314 static constexpr std::array<uint8_t, 256u> kOpcodeInvokeTypes = GenerateOpcodeInvokeTypes();
315 
316 LIBART_PROTECTED FLATTEN
NterpGetMethod(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr)317 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr)
318     REQUIRES_SHARED(Locks::mutator_lock_) {
319   UpdateHotness(caller);
320   const Instruction* inst = Instruction::At(dex_pc_ptr);
321   Instruction::Code opcode = inst->Opcode();
322   DCHECK(IsUint<8>(static_cast<std::underlying_type_t<Instruction::Code>>(opcode)));
323   uint8_t raw_invoke_type = kOpcodeInvokeTypes[opcode];
324   DCHECK_LE(raw_invoke_type, kMaxInvokeType);
325   InvokeType invoke_type = static_cast<InvokeType>(raw_invoke_type);
326 
327   // In release mode, this is just a simple load.
328   // In debug mode, this checks that we're using the correct instruction format.
329   uint16_t method_index =
330       (opcode >= Instruction::INVOKE_VIRTUAL_RANGE) ? inst->VRegB_3rc() : inst->VRegB_35c();
331 
332   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
333   ArtMethod* resolved_method = caller->SkipAccessChecks()
334       ? class_linker->ResolveMethodId(method_index, caller)
335       : class_linker->ResolveMethodWithChecks(method_index, caller, invoke_type);
336   if (resolved_method == nullptr) {
337     DCHECK(self->IsExceptionPending());
338     return 0;
339   }
340 
341   if (invoke_type == kSuper) {
342     resolved_method = caller->SkipAccessChecks()
343         ? FindSuperMethodToCall</*access_check=*/false>(method_index, resolved_method, caller, self)
344         : FindSuperMethodToCall</*access_check=*/true>(method_index, resolved_method, caller, self);
345     if (resolved_method == nullptr) {
346       DCHECK(self->IsExceptionPending());
347       return 0;
348     }
349   }
350 
351   if (invoke_type == kInterface) {
352     size_t result = 0u;
353     if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
354       // Set the low bit to notify the interpreter it should do a vtable call.
355       DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
356       result = (resolved_method->GetMethodIndex() << 16) | 1U;
357     } else {
358       DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
359       DCHECK(!resolved_method->IsCopied());
360       if (!resolved_method->IsAbstract()) {
361         // Set the second bit to notify the interpreter this is a default
362         // method.
363         result = reinterpret_cast<size_t>(resolved_method) | 2U;
364       } else {
365         result = reinterpret_cast<size_t>(resolved_method);
366       }
367     }
368     UpdateCache(self, dex_pc_ptr, result);
369     return result;
370   } else if (resolved_method->IsStringConstructor()) {
371     CHECK_NE(invoke_type, kSuper);
372     resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
373     // Or the result with 1 to notify to nterp this is a string init method. We
374     // also don't cache the result as we don't want nterp to have its fast path always
375     // check for it, and we expect a lot more regular calls than string init
376     // calls.
377     return reinterpret_cast<size_t>(resolved_method) | 1;
378   } else if (invoke_type == kVirtual) {
379     UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
380     return resolved_method->GetMethodIndex();
381   } else {
382     UpdateCache(self, dex_pc_ptr, resolved_method);
383     return reinterpret_cast<size_t>(resolved_method);
384   }
385 }
386 
387 LIBART_PROTECTED
NterpGetStaticField(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)388 extern "C" size_t NterpGetStaticField(Thread* self,
389                                       ArtMethod* caller,
390                                       const uint16_t* dex_pc_ptr,
391                                       size_t resolve_field_type)  // Resolve if not zero
392     REQUIRES_SHARED(Locks::mutator_lock_) {
393   UpdateHotness(caller);
394   const Instruction* inst = Instruction::At(dex_pc_ptr);
395   uint16_t field_index = inst->VRegB_21c();
396   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
397   Instruction::Code opcode = inst->Opcode();
398   ArtField* resolved_field = ResolveFieldWithAccessChecks(
399       self,
400       class_linker,
401       field_index,
402       caller,
403       /*is_static=*/ true,
404       /*is_put=*/ IsInstructionSPut(opcode),
405       resolve_field_type);
406 
407   if (resolved_field == nullptr) {
408     DCHECK(self->IsExceptionPending());
409     return 0;
410   }
411   if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
412     StackHandleScope<1> hs(self);
413     Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
414     if (UNLIKELY(!class_linker->EnsureInitialized(
415                       self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
416       DCHECK(self->IsExceptionPending());
417       return 0;
418     }
419     DCHECK(h_class->IsInitializing());
420   }
421   if (resolved_field->IsVolatile()) {
422     // Or the result with 1 to notify to nterp this is a volatile field. We
423     // also don't cache the result as we don't want nterp to have its fast path always
424     // check for it.
425     return reinterpret_cast<size_t>(resolved_field) | 1;
426   } else {
427     // For sput-object, try to resolve the field type even if we were not requested to.
428     // Only if the field type is successfully resolved can we update the cache. If we
429     // fail to resolve the type, we clear the exception to keep interpreter
430     // semantics of not throwing when null is stored.
431     if (opcode == Instruction::SPUT_OBJECT &&
432         resolve_field_type == 0 &&
433         resolved_field->ResolveType() == nullptr) {
434       DCHECK(self->IsExceptionPending());
435       self->ClearException();
436     } else {
437       UpdateCache(self, dex_pc_ptr, resolved_field);
438     }
439     return reinterpret_cast<size_t>(resolved_field);
440   }
441 }
442 
443 LIBART_PROTECTED
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)444 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
445                                                 ArtMethod* caller,
446                                                 const uint16_t* dex_pc_ptr,
447                                                 size_t resolve_field_type)  // Resolve if not zero
448     REQUIRES_SHARED(Locks::mutator_lock_) {
449   UpdateHotness(caller);
450   const Instruction* inst = Instruction::At(dex_pc_ptr);
451   uint16_t field_index = inst->VRegC_22c();
452   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
453   Instruction::Code opcode = inst->Opcode();
454   ArtField* resolved_field = ResolveFieldWithAccessChecks(
455       self,
456       class_linker,
457       field_index,
458       caller,
459       /*is_static=*/ false,
460       /*is_put=*/ IsInstructionIPut(opcode),
461       resolve_field_type);
462   if (resolved_field == nullptr) {
463     DCHECK(self->IsExceptionPending());
464     return 0;
465   }
466   if (resolved_field->IsVolatile()) {
467     // Don't cache for a volatile field, and return a negative offset as marker
468     // of volatile.
469     return -resolved_field->GetOffset().Uint32Value();
470   }
471   // For iput-object, try to resolve the field type even if we were not requested to.
472   // Only if the field type is successfully resolved can we update the cache. If we
473   // fail to resolve the type, we clear the exception to keep interpreter
474   // semantics of not throwing when null is stored.
475   if (opcode == Instruction::IPUT_OBJECT &&
476       resolve_field_type == 0 &&
477       resolved_field->ResolveType() == nullptr) {
478     DCHECK(self->IsExceptionPending());
479     self->ClearException();
480   } else {
481     UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
482   }
483   return resolved_field->GetOffset().Uint32Value();
484 }
485 
NterpGetClass(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)486 extern "C" mirror::Object* NterpGetClass(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
487     REQUIRES_SHARED(Locks::mutator_lock_) {
488   UpdateHotness(caller);
489   const Instruction* inst = Instruction::At(dex_pc_ptr);
490   Instruction::Code opcode = inst->Opcode();
491   DCHECK(opcode == Instruction::CHECK_CAST ||
492          opcode == Instruction::INSTANCE_OF ||
493          opcode == Instruction::CONST_CLASS ||
494          opcode == Instruction::NEW_ARRAY);
495 
496   // In release mode, this is just a simple load.
497   // In debug mode, this checks that we're using the correct instruction format.
498   dex::TypeIndex index = dex::TypeIndex(
499       (opcode == Instruction::CHECK_CAST || opcode == Instruction::CONST_CLASS)
500           ? inst->VRegB_21c()
501           : inst->VRegC_22c());
502 
503   ObjPtr<mirror::Class> c =
504       ResolveVerifyAndClinit(index,
505                              caller,
506                              self,
507                              /* can_run_clinit= */ false,
508                              /* verify_access= */ !caller->SkipAccessChecks());
509   if (UNLIKELY(c == nullptr)) {
510     DCHECK(self->IsExceptionPending());
511     return nullptr;
512   }
513 
514   UpdateCache(self, dex_pc_ptr, c.Ptr());
515   return c.Ptr();
516 }
517 
NterpAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)518 extern "C" mirror::Object* NterpAllocateObject(Thread* self,
519                                                ArtMethod* caller,
520                                                uint16_t* dex_pc_ptr)
521     REQUIRES_SHARED(Locks::mutator_lock_) {
522   UpdateHotness(caller);
523   const Instruction* inst = Instruction::At(dex_pc_ptr);
524   DCHECK_EQ(inst->Opcode(), Instruction::NEW_INSTANCE);
525   dex::TypeIndex index = dex::TypeIndex(inst->VRegB_21c());
526   ObjPtr<mirror::Class> c =
527       ResolveVerifyAndClinit(index,
528                              caller,
529                              self,
530                              /* can_run_clinit= */ false,
531                              /* verify_access= */ !caller->SkipAccessChecks());
532   if (UNLIKELY(c == nullptr)) {
533     DCHECK(self->IsExceptionPending());
534     return nullptr;
535   }
536 
537   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
538   if (UNLIKELY(c->IsStringClass())) {
539     // We don't cache the class for strings as we need to special case their
540     // allocation.
541     return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
542   } else {
543     if (!c->IsFinalizable() && c->IsInstantiable()) {
544       // Cache non-finalizable classes for next calls.
545       UpdateCache(self, dex_pc_ptr, c.Ptr());
546     }
547     return AllocObjectFromCode(c, self, allocator_type).Ptr();
548   }
549 }
550 
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)551 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
552     REQUIRES_SHARED(Locks::mutator_lock_) {
553   const Instruction* inst = Instruction::At(dex_pc_ptr);
554   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
555   switch (inst->Opcode()) {
556     case Instruction::CONST_STRING:
557     case Instruction::CONST_STRING_JUMBO: {
558       UpdateHotness(caller);
559       dex::StringIndex string_index(
560           (inst->Opcode() == Instruction::CONST_STRING)
561               ? inst->VRegB_21c()
562               : inst->VRegB_31c());
563       ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
564       if (str == nullptr) {
565         DCHECK(self->IsExceptionPending());
566         return nullptr;
567       }
568       UpdateCache(self, dex_pc_ptr, str.Ptr());
569       return str.Ptr();
570     }
571     case Instruction::CONST_METHOD_HANDLE: {
572       // Don't cache: we don't expect this to be performance sensitive, and we
573       // don't want the cache to conflict with a performance sensitive entry.
574       return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
575     }
576     case Instruction::CONST_METHOD_TYPE: {
577       // Don't cache: we don't expect this to be performance sensitive, and we
578       // don't want the cache to conflict with a performance sensitive entry.
579       return class_linker->ResolveMethodType(
580           self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
581     }
582     default:
583       LOG(FATAL) << "Unreachable";
584   }
585   return nullptr;
586 }
587 
NterpUnimplemented()588 extern "C" void NterpUnimplemented() {
589   LOG(FATAL) << "Unimplemented";
590 }
591 
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)592 static mirror::Object* DoFilledNewArray(Thread* self,
593                                         ArtMethod* caller,
594                                         uint16_t* dex_pc_ptr,
595                                         uint32_t* regs,
596                                         bool is_range)
597     REQUIRES_SHARED(Locks::mutator_lock_) {
598   const Instruction* inst = Instruction::At(dex_pc_ptr);
599   if (kIsDebugBuild) {
600     if (is_range) {
601       DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
602     } else {
603       DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
604     }
605   }
606   const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
607   DCHECK_GE(length, 0);
608   if (!is_range) {
609     // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
610     DCHECK_LE(length, 5);
611   }
612   uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
613   ObjPtr<mirror::Class> array_class =
614       ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
615                              caller,
616                              self,
617                              /* can_run_clinit= */ true,
618                              /* verify_access= */ !caller->SkipAccessChecks());
619   if (UNLIKELY(array_class == nullptr)) {
620     DCHECK(self->IsExceptionPending());
621     return nullptr;
622   }
623   DCHECK(array_class->IsArrayClass());
624   ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
625   const bool is_primitive_int_component = component_class->IsPrimitiveInt();
626   if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
627     if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
628       ThrowRuntimeException("Bad filled array request for type %s",
629                             component_class->PrettyDescriptor().c_str());
630     } else {
631       self->ThrowNewExceptionF(
632           "Ljava/lang/InternalError;",
633           "Found type %s; filled-new-array not implemented for anything but 'int'",
634           component_class->PrettyDescriptor().c_str());
635     }
636     return nullptr;
637   }
638   ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
639       self,
640       array_class,
641       length,
642       array_class->GetComponentSizeShift(),
643       Runtime::Current()->GetHeap()->GetCurrentAllocator());
644   if (UNLIKELY(new_array == nullptr)) {
645     self->AssertPendingOOMException();
646     return nullptr;
647   }
648   uint32_t arg[Instruction::kMaxVarArgRegs];  // only used in filled-new-array.
649   uint32_t vregC = 0;   // only used in filled-new-array-range.
650   if (is_range) {
651     vregC = inst->VRegC_3rc();
652   } else {
653     inst->GetVarArgs(arg);
654   }
655   for (int32_t i = 0; i < length; ++i) {
656     size_t src_reg = is_range ? vregC + i : arg[i];
657     if (is_primitive_int_component) {
658       new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
659     } else {
660       new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
661           i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
662     }
663   }
664   return new_array.Ptr();
665 }
666 
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)667 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
668                                                ArtMethod* caller,
669                                                uint32_t* registers,
670                                                uint16_t* dex_pc_ptr)
671     REQUIRES_SHARED(Locks::mutator_lock_) {
672   return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
673 }
674 
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)675 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
676                                                     ArtMethod* caller,
677                                                     uint32_t* registers,
678                                                     uint16_t* dex_pc_ptr)
679     REQUIRES_SHARED(Locks::mutator_lock_) {
680   return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
681 }
682 
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)683 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
684     REQUIRES_SHARED(Locks::mutator_lock_) {
685   // It is important this method is not suspended because it can be called on
686   // method entry and async deoptimization does not expect runtime methods other than the
687   // suspend entrypoint before executing the first instruction of a Java
688   // method.
689   ScopedAssertNoThreadSuspension sants("In nterp");
690   Runtime* runtime = Runtime::Current();
691   if (method->IsMemorySharedMethod()) {
692     if (!method->IsIntrinsic()) {
693       // Intrinsics are special and will be considered hot from the first call.
694       DCHECK_EQ(Thread::Current()->GetSharedMethodHotness(), 0u);
695       Thread::Current()->ResetSharedMethodHotness();
696     }
697   } else {
698     // Move the counter to the initial threshold in case we have to re-JIT it.
699     method->ResetCounter(runtime->GetJITOptions()->GetWarmupThreshold());
700     // Mark the method as warm for the profile saver.
701     method->SetPreviouslyWarm();
702   }
703   jit::Jit* jit = runtime->GetJit();
704   if (jit != nullptr && jit->UseJitCompilation()) {
705     // Nterp passes null on entry where we don't want to OSR.
706     if (dex_pc_ptr != nullptr) {
707       // This could be a loop back edge, check if we can OSR.
708       CodeItemInstructionAccessor accessor(method->DexInstructions());
709       uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
710       jit::OsrData* osr_data = jit->PrepareForOsr(
711           method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
712       if (osr_data != nullptr) {
713         return osr_data;
714       }
715     }
716     jit->MaybeEnqueueCompilation(method, Thread::Current());
717   }
718   return nullptr;
719 }
720 
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)721 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
722     REQUIRES_SHARED(Locks::mutator_lock_) {
723   ScopedAssertNoThreadSuspension sants("In nterp");
724   const int kInstrLen = 3;
725 
726   /*
727    * Packed switch data format:
728    *  ushort ident = 0x0100   magic value
729    *  ushort size             number of entries in the table
730    *  int first_key           first (and lowest) switch case value
731    *  int targets[size]       branch targets, relative to switch opcode
732    *
733    * Total size is (4+size*2) 16-bit code units.
734    */
735   uint16_t signature = *switchData++;
736   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
737 
738   uint16_t size = *switchData++;
739 
740   int32_t firstKey = *switchData++;
741   firstKey |= (*switchData++) << 16;
742 
743   int index = testVal - firstKey;
744   if (index < 0 || index >= size) {
745     return kInstrLen;
746   }
747 
748   /*
749    * The entries are guaranteed to be aligned on a 32-bit boundary;
750    * we can treat them as a native int array.
751    */
752   const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
753   return entries[index];
754 }
755 
756 /*
757  * Find the matching case.  Returns the offset to the handler instructions.
758  *
759  * Returns 3 if we don't find a match (it's the size of the sparse-switch
760  * instruction).
761  */
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)762 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
763     REQUIRES_SHARED(Locks::mutator_lock_) {
764   ScopedAssertNoThreadSuspension sants("In nterp");
765   const int kInstrLen = 3;
766   uint16_t size;
767   const int32_t* keys;
768   const int32_t* entries;
769 
770   /*
771    * Sparse switch data format:
772    *  ushort ident = 0x0200   magic value
773    *  ushort size             number of entries in the table; > 0
774    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
775    *  int targets[size]       branch targets, relative to switch opcode
776    *
777    * Total size is (2+size*4) 16-bit code units.
778    */
779 
780   uint16_t signature = *switchData++;
781   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
782 
783   size = *switchData++;
784 
785   /* The keys are guaranteed to be aligned on a 32-bit boundary;
786    * we can treat them as a native int array.
787    */
788   keys = reinterpret_cast<const int32_t*>(switchData);
789 
790   /* The entries are guaranteed to be aligned on a 32-bit boundary;
791    * we can treat them as a native int array.
792    */
793   entries = keys + size;
794 
795   /*
796    * Binary-search through the array of keys, which are guaranteed to
797    * be sorted low-to-high.
798    */
799   int lo = 0;
800   int hi = size - 1;
801   while (lo <= hi) {
802     int mid = (lo + hi) >> 1;
803 
804     int32_t foundVal = keys[mid];
805     if (testVal < foundVal) {
806       hi = mid - 1;
807     } else if (testVal > foundVal) {
808       lo = mid + 1;
809     } else {
810       return entries[mid];
811     }
812   }
813   return kInstrLen;
814 }
815 
NterpFree(void * val)816 extern "C" void NterpFree(void* val) {
817   free(val);
818 }
819 
820 }  // namespace interpreter
821 }  // namespace art
822