xref: /aosp_15_r20/art/compiler/optimizing/code_generator_riscv64.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
19 
20 #include "android-base/logging.h"
21 #include "arch/riscv64/registers_riscv64.h"
22 #include "base/macros.h"
23 #include "code_generator.h"
24 #include "driver/compiler_options.h"
25 #include "intrinsics_list.h"
26 #include "optimizing/locations.h"
27 #include "parallel_move_resolver.h"
28 #include "utils/riscv64/assembler_riscv64.h"
29 
30 namespace art HIDDEN {
31 namespace riscv64 {
32 
33 // InvokeDexCallingConvention registers
34 static constexpr XRegister kParameterCoreRegisters[] = {A1, A2, A3, A4, A5, A6, A7};
35 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
36 
37 static constexpr FRegister kParameterFpuRegisters[] = {FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7};
38 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
39 
40 // InvokeRuntimeCallingConvention registers
41 static constexpr XRegister kRuntimeParameterCoreRegisters[] = {A0, A1, A2, A3, A4, A5, A6, A7};
42 static constexpr size_t kRuntimeParameterCoreRegistersLength =
43     arraysize(kRuntimeParameterCoreRegisters);
44 
45 static constexpr FRegister kRuntimeParameterFpuRegisters[] = {
46     FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7
47 };
48 static constexpr size_t kRuntimeParameterFpuRegistersLength =
49     arraysize(kRuntimeParameterFpuRegisters);
50 
51 // FCLASS returns a 10-bit classification mask with the two highest bits marking NaNs
52 // (signaling and quiet). To detect a NaN, we can compare (either BGE or BGEU, the sign
53 // bit is always clear) the result with the `kFClassNaNMinValue`.
54 static_assert(kSignalingNaN == 0x100);
55 static_assert(kQuietNaN == 0x200);
56 static constexpr int32_t kFClassNaNMinValue = 0x100;
57 
58 #define UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(V) \
59   V(FP16Ceil)                                   \
60   V(FP16Compare)                                \
61   V(FP16Floor)                                  \
62   V(FP16Rint)                                   \
63   V(FP16ToFloat)                                \
64   V(FP16ToHalf)                                 \
65   V(FP16Greater)                                \
66   V(FP16GreaterEquals)                          \
67   V(FP16Less)                                   \
68   V(FP16LessEquals)                             \
69   V(FP16Min)                                    \
70   V(FP16Max)                                    \
71   V(StringStringIndexOf)                        \
72   V(StringStringIndexOfAfter)                   \
73   V(StringBufferAppend)                         \
74   V(StringBufferLength)                         \
75   V(StringBufferToString)                       \
76   V(StringBuilderAppendObject)                  \
77   V(StringBuilderAppendString)                  \
78   V(StringBuilderAppendCharSequence)            \
79   V(StringBuilderAppendCharArray)               \
80   V(StringBuilderAppendBoolean)                 \
81   V(StringBuilderAppendChar)                    \
82   V(StringBuilderAppendInt)                     \
83   V(StringBuilderAppendLong)                    \
84   V(StringBuilderAppendFloat)                   \
85   V(StringBuilderAppendDouble)                  \
86   V(StringBuilderLength)                        \
87   V(StringBuilderToString)                      \
88   V(CRC32Update)                                \
89   V(CRC32UpdateBytes)                           \
90   V(CRC32UpdateByteBuffer)                      \
91   V(MethodHandleInvokeExact)                    \
92   V(MethodHandleInvoke)                         \
93   V(UnsafeArrayBaseOffset)                      \
94   V(JdkUnsafeArrayBaseOffset)                   \
95 
96 // Method register on invoke.
97 static const XRegister kArtMethodRegister = A0;
98 
99 // Helper functions used by codegen as well as intrinsics.
100 XRegister InputXRegisterOrZero(Location location);
101 int32_t ReadBarrierMarkEntrypointOffset(Location ref);
102 
103 class CodeGeneratorRISCV64;
104 
105 class InvokeRuntimeCallingConvention : public CallingConvention<XRegister, FRegister> {
106  public:
InvokeRuntimeCallingConvention()107   InvokeRuntimeCallingConvention()
108       : CallingConvention(kRuntimeParameterCoreRegisters,
109                           kRuntimeParameterCoreRegistersLength,
110                           kRuntimeParameterFpuRegisters,
111                           kRuntimeParameterFpuRegistersLength,
112                           kRiscv64PointerSize) {}
113 
114   Location GetReturnLocation(DataType::Type return_type);
115 
116  private:
117   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
118 };
119 
120 class InvokeDexCallingConvention : public CallingConvention<XRegister, FRegister> {
121  public:
InvokeDexCallingConvention()122   InvokeDexCallingConvention()
123       : CallingConvention(kParameterCoreRegisters,
124                           kParameterCoreRegistersLength,
125                           kParameterFpuRegisters,
126                           kParameterFpuRegistersLength,
127                           kRiscv64PointerSize) {}
128 
129  private:
130   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
131 };
132 
133 class InvokeDexCallingConventionVisitorRISCV64 : public InvokeDexCallingConventionVisitor {
134  public:
InvokeDexCallingConventionVisitorRISCV64()135   InvokeDexCallingConventionVisitorRISCV64() {}
~InvokeDexCallingConventionVisitorRISCV64()136   virtual ~InvokeDexCallingConventionVisitorRISCV64() {}
137 
138   Location GetNextLocation(DataType::Type type) override;
139   Location GetReturnLocation(DataType::Type type) const override;
140   Location GetMethodLocation() const override;
141 
142  private:
143   InvokeDexCallingConvention calling_convention;
144 
145   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorRISCV64);
146 };
147 
148 class CriticalNativeCallingConventionVisitorRiscv64 : public InvokeDexCallingConventionVisitor {
149  public:
CriticalNativeCallingConventionVisitorRiscv64(bool for_register_allocation)150   explicit CriticalNativeCallingConventionVisitorRiscv64(bool for_register_allocation)
151       : for_register_allocation_(for_register_allocation) {}
152 
~CriticalNativeCallingConventionVisitorRiscv64()153   virtual ~CriticalNativeCallingConventionVisitorRiscv64() {}
154 
155   Location GetNextLocation(DataType::Type type) override;
156   Location GetReturnLocation(DataType::Type type) const override;
157   Location GetMethodLocation() const override;
158 
GetStackOffset()159   size_t GetStackOffset() const { return stack_offset_; }
160 
161  private:
162   // Register allocator does not support adjusting frame size, so we cannot provide final locations
163   // of stack arguments for register allocation. We ask the register allocator for any location and
164   // move these arguments to the right place after adjusting the SP when generating the call.
165   const bool for_register_allocation_;
166   size_t gpr_index_ = 0u;
167   size_t fpr_index_ = 0u;
168   size_t stack_offset_ = 0u;
169 
170   DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorRiscv64);
171 };
172 
173 class SlowPathCodeRISCV64 : public SlowPathCode {
174  public:
SlowPathCodeRISCV64(HInstruction * instruction)175   explicit SlowPathCodeRISCV64(HInstruction* instruction)
176       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
177 
GetEntryLabel()178   Riscv64Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()179   Riscv64Label* GetExitLabel() { return &exit_label_; }
180 
181  private:
182   Riscv64Label entry_label_;
183   Riscv64Label exit_label_;
184 
185   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeRISCV64);
186 };
187 
188 class ParallelMoveResolverRISCV64 : public ParallelMoveResolverWithSwap {
189  public:
ParallelMoveResolverRISCV64(ArenaAllocator * allocator,CodeGeneratorRISCV64 * codegen)190   ParallelMoveResolverRISCV64(ArenaAllocator* allocator, CodeGeneratorRISCV64* codegen)
191       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
192 
193   void EmitMove(size_t index) override;
194   void EmitSwap(size_t index) override;
195   void SpillScratch(int reg) override;
196   void RestoreScratch(int reg) override;
197 
198   void Exchange(int index1, int index2, bool double_slot);
199 
200   Riscv64Assembler* GetAssembler() const;
201 
202  private:
203   CodeGeneratorRISCV64* const codegen_;
204 
205   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverRISCV64);
206 };
207 
208 class FieldAccessCallingConventionRISCV64 : public FieldAccessCallingConvention {
209  public:
FieldAccessCallingConventionRISCV64()210   FieldAccessCallingConventionRISCV64() {}
211 
GetObjectLocation()212   Location GetObjectLocation() const override {
213     return Location::RegisterLocation(A1);
214   }
GetFieldIndexLocation()215   Location GetFieldIndexLocation() const override {
216     return Location::RegisterLocation(A0);
217   }
GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED)218   Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
219     return Location::RegisterLocation(A0);
220   }
GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,bool is_instance)221   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
222                                bool is_instance) const override {
223     return is_instance
224         ? Location::RegisterLocation(A2)
225         : Location::RegisterLocation(A1);
226   }
GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED)227   Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
228     return Location::FpuRegisterLocation(FA0);
229   }
230 
231  private:
232   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionRISCV64);
233 };
234 
235 class LocationsBuilderRISCV64 : public HGraphVisitor {
236  public:
LocationsBuilderRISCV64(HGraph * graph,CodeGeneratorRISCV64 * codegen)237   LocationsBuilderRISCV64(HGraph* graph, CodeGeneratorRISCV64* codegen)
238       : HGraphVisitor(graph), codegen_(codegen) {}
239 
240 #define DECLARE_VISIT_INSTRUCTION(name, super) void Visit##name(H##name* instr) override;
241 
242   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)243   FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)
244 
245 #undef DECLARE_VISIT_INSTRUCTION
246 
247   void VisitInstruction(HInstruction* instruction) override {
248     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id "
249                << instruction->GetId() << ")";
250   }
251 
252  protected:
253   void HandleInvoke(HInvoke* invoke);
254   void HandleBinaryOp(HBinaryOperation* operation);
255   void HandleCondition(HCondition* instruction);
256   void HandleShift(HBinaryOperation* operation);
257   void HandleFieldSet(HInstruction* instruction);
258   void HandleFieldGet(HInstruction* instruction);
259 
260   InvokeDexCallingConventionVisitorRISCV64 parameter_visitor_;
261 
262   CodeGeneratorRISCV64* const codegen_;
263 
264   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderRISCV64);
265 };
266 
267 class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
268  public:
269   InstructionCodeGeneratorRISCV64(HGraph* graph, CodeGeneratorRISCV64* codegen);
270 
271 #define DECLARE_VISIT_INSTRUCTION(name, super) void Visit##name(H##name* instr) override;
272 
273   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)274   FOR_EACH_CONCRETE_INSTRUCTION_RISCV64(DECLARE_VISIT_INSTRUCTION)
275 
276 #undef DECLARE_VISIT_INSTRUCTION
277 
278   void VisitInstruction(HInstruction* instruction) override {
279     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() << " (id "
280                << instruction->GetId() << ")";
281   }
282 
GetAssembler()283   Riscv64Assembler* GetAssembler() const { return assembler_; }
284 
285   void GenerateMemoryBarrier(MemBarrierKind kind);
286 
287   void FAdd(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
288   void FClass(XRegister rd, FRegister rs1, DataType::Type type);
289 
290   void Load(Location out, XRegister rs1, int32_t offset, DataType::Type type);
291   void Store(Location value, XRegister rs1, int32_t offset, DataType::Type type);
292 
293   // Sequentially consistent store. Used for volatile fields and intrinsics.
294   // The `instruction` argument is for recording an implicit null check stack map with the
295   // store instruction which may not be the last instruction emitted by `StoreSeqCst()`.
296   void StoreSeqCst(Location value,
297                    XRegister rs1,
298                    int32_t offset,
299                    DataType::Type type,
300                    HInstruction* instruction = nullptr);
301 
302   void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
303 
304  protected:
305   void GenerateClassInitializationCheck(SlowPathCodeRISCV64* slow_path, XRegister class_reg);
306   void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, XRegister temp);
307   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
308   void HandleBinaryOp(HBinaryOperation* operation);
309   void HandleCondition(HCondition* instruction);
310   void HandleShift(HBinaryOperation* operation);
311   void HandleFieldSet(HInstruction* instruction,
312                       const FieldInfo& field_info,
313                       bool value_can_be_null,
314                       WriteBarrierKind write_barrier_kind);
315   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
316 
317   // Generate a heap reference load using one register `out`:
318   //
319   //   out <- *(out + offset)
320   //
321   // while honoring heap poisoning and/or read barriers (if any).
322   //
323   // Location `maybe_temp` is used when generating a read barrier and
324   // shall be a register in that case; it may be an invalid location
325   // otherwise.
326   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
327                                         Location out,
328                                         uint32_t offset,
329                                         Location maybe_temp,
330                                         ReadBarrierOption read_barrier_option);
331   // Generate a heap reference load using two different registers
332   // `out` and `obj`:
333   //
334   //   out <- *(obj + offset)
335   //
336   // while honoring heap poisoning and/or read barriers (if any).
337   //
338   // Location `maybe_temp` is used when generating a Baker's (fast
339   // path) read barrier and shall be a register in that case; it may
340   // be an invalid location otherwise.
341   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
342                                          Location out,
343                                          Location obj,
344                                          uint32_t offset,
345                                          Location maybe_temp,
346                                          ReadBarrierOption read_barrier_option);
347 
348   void GenerateTestAndBranch(HInstruction* instruction,
349                              size_t condition_input_index,
350                              Riscv64Label* true_target,
351                              Riscv64Label* false_target);
352   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
353   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
354   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
355   void GenerateDivRemIntegral(HBinaryOperation* instruction);
356   void GenerateIntLongCondition(IfCondition cond, LocationSummary* locations);
357   void GenerateIntLongCondition(IfCondition cond,
358                                 LocationSummary* locations,
359                                 XRegister rd,
360                                 bool to_all_bits);
361   void GenerateIntLongCompareAndBranch(IfCondition cond,
362                                        LocationSummary* locations,
363                                        Riscv64Label* label);
364   void GenerateFpCondition(IfCondition cond,
365                            bool gt_bias,
366                            DataType::Type type,
367                            LocationSummary* locations,
368                            Riscv64Label* label = nullptr);
369   void GenerateFpCondition(IfCondition cond,
370                            bool gt_bias,
371                            DataType::Type type,
372                            LocationSummary* locations,
373                            Riscv64Label* label,
374                            XRegister rd,
375                            bool to_all_bits);
376   void GenerateMethodEntryExitHook(HInstruction* instruction);
377   void HandleGoto(HInstruction* got, HBasicBlock* successor);
378   void GenPackedSwitchWithCompares(XRegister adjusted,
379                                    XRegister temp,
380                                    uint32_t num_entries,
381                                    HBasicBlock* switch_block);
382   void GenTableBasedPackedSwitch(XRegister adjusted,
383                                  XRegister temp,
384                                  uint32_t num_entries,
385                                  HBasicBlock* switch_block);
386   int32_t VecAddress(LocationSummary* locations,
387                      size_t size,
388                      /*out*/ XRegister* adjusted_base);
389 
390   template <typename Reg,
391             void (Riscv64Assembler::*opS)(Reg, FRegister, FRegister),
392             void (Riscv64Assembler::*opD)(Reg, FRegister, FRegister)>
393   void FpBinOp(Reg rd, FRegister rs1, FRegister rs2, DataType::Type type);
394   void FSub(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
395   void FDiv(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
396   void FMul(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
397   void FMin(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
398   void FMax(FRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
399   void FEq(XRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
400   void FLt(XRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
401   void FLe(XRegister rd, FRegister rs1, FRegister rs2, DataType::Type type);
402 
403   template <typename Reg,
404             void (Riscv64Assembler::*opS)(Reg, FRegister),
405             void (Riscv64Assembler::*opD)(Reg, FRegister)>
406   void FpUnOp(Reg rd, FRegister rs1, DataType::Type type);
407   void FAbs(FRegister rd, FRegister rs1, DataType::Type type);
408   void FNeg(FRegister rd, FRegister rs1, DataType::Type type);
409   void FMv(FRegister rd, FRegister rs1, DataType::Type type);
410   void FMvX(XRegister rd, FRegister rs1, DataType::Type type);
411 
412   Riscv64Assembler* const assembler_;
413   CodeGeneratorRISCV64* const codegen_;
414 
415   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorRISCV64);
416 };
417 
418 class CodeGeneratorRISCV64 : public CodeGenerator {
419  public:
420   CodeGeneratorRISCV64(HGraph* graph,
421                        const CompilerOptions& compiler_options,
422                        OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorRISCV64()423   virtual ~CodeGeneratorRISCV64() {}
424 
425   void GenerateFrameEntry() override;
426   void GenerateFrameExit() override;
427 
428   void Bind(HBasicBlock* block) override;
429 
GetWordSize()430   size_t GetWordSize() const override {
431     // The "word" for the compiler is the core register size (64-bit for riscv64) while the
432     // riscv64 assembler uses "word" for 32-bit values and "double word" for 64-bit values.
433     return kRiscv64DoublewordSize;
434   }
435 
SupportsPredicatedSIMD()436   bool SupportsPredicatedSIMD() const override {
437     // TODO(riscv64): Check the vector extension.
438     return false;
439   }
440 
441   // Get FP register width in bytes for spilling/restoring in the slow paths.
442   //
443   // Note: In SIMD graphs this should return SIMD register width as all FP and SIMD registers
444   // alias and live SIMD registers are forced to be spilled in full size in the slow paths.
GetSlowPathFPWidth()445   size_t GetSlowPathFPWidth() const override {
446     // Default implementation.
447     return GetCalleePreservedFPWidth();
448   }
449 
GetCalleePreservedFPWidth()450   size_t GetCalleePreservedFPWidth() const override {
451     return kRiscv64FloatRegSizeInBytes;
452   };
453 
GetSIMDRegisterWidth()454   size_t GetSIMDRegisterWidth() const override {
455     // TODO(riscv64): Implement SIMD with the Vector extension.
456     // Note: HLoopOptimization calls this function even for an ISA without SIMD support.
457     return kRiscv64FloatRegSizeInBytes;
458   };
459 
GetAddressOf(HBasicBlock * block)460   uintptr_t GetAddressOf(HBasicBlock* block) override {
461     return assembler_.GetLabelLocation(GetLabelOf(block));
462   };
463 
GetLabelOf(HBasicBlock * block)464   Riscv64Label* GetLabelOf(HBasicBlock* block) const {
465     return CommonGetLabelOf<Riscv64Label>(block_labels_, block);
466   }
467 
Initialize()468   void Initialize() override { block_labels_ = CommonInitializeLabels<Riscv64Label>(); }
469 
470   void MoveConstant(Location destination, int32_t value) override;
471   void MoveLocation(Location destination, Location source, DataType::Type dst_type) override;
472   void AddLocationAsTemp(Location location, LocationSummary* locations) override;
473 
GetAssembler()474   Riscv64Assembler* GetAssembler() override { return &assembler_; }
GetAssembler()475   const Riscv64Assembler& GetAssembler() const override { return assembler_; }
476 
GetLocationBuilder()477   HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
478 
GetInstructionVisitor()479   InstructionCodeGeneratorRISCV64* GetInstructionVisitor() override {
480     return &instruction_visitor_;
481   }
482 
483   void MaybeGenerateInlineCacheCheck(HInstruction* instruction, XRegister klass);
484 
485   void SetupBlockedRegisters() const override;
486 
487   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
488   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
489   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
490   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
491 
492   void DumpCoreRegister(std::ostream& stream, int reg) const override;
493   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
494 
GetInstructionSet()495   InstructionSet GetInstructionSet() const override { return InstructionSet::kRiscv64; }
496 
497   const Riscv64InstructionSetFeatures& GetInstructionSetFeatures() const;
498 
GetPreferredSlotsAlignment()499   uint32_t GetPreferredSlotsAlignment() const override {
500     return static_cast<uint32_t>(kRiscv64PointerSize);
501   }
502 
503   void Finalize() override;
504 
505   // Generate code to invoke a runtime entry point.
506   void InvokeRuntime(QuickEntrypointEnum entrypoint,
507                      HInstruction* instruction,
508                      uint32_t dex_pc,
509                      SlowPathCode* slow_path = nullptr) override;
510 
511   // Generate code to invoke a runtime entry point, but do not record
512   // PC-related information in a stack map.
513   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
514                                            HInstruction* instruction,
515                                            SlowPathCode* slow_path);
516 
GetMoveResolver()517   ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
518 
NeedsTwoRegisters(DataType::Type type)519   bool NeedsTwoRegisters([[maybe_unused]] DataType::Type type) const override { return false; }
520 
521   void IncreaseFrame(size_t adjustment) override;
522   void DecreaseFrame(size_t adjustment) override;
523 
524   void GenerateNop() override;
525 
526   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
527   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
528 
529   // Check if the desired_string_load_kind is supported. If it is, return it,
530   // otherwise return a fall-back kind that should be used instead.
531   HLoadString::LoadKind GetSupportedLoadStringKind(
532       HLoadString::LoadKind desired_string_load_kind) override;
533 
534   // Check if the desired_class_load_kind is supported. If it is, return it,
535   // otherwise return a fall-back kind that should be used instead.
536   HLoadClass::LoadKind GetSupportedLoadClassKind(
537       HLoadClass::LoadKind desired_class_load_kind) override;
538 
539   // Check if the desired_dispatch_info is supported. If it is, return it,
540   // otherwise return a fall-back info that should be used instead.
541   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
542       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, ArtMethod* method) override;
543 
544   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
545   // whether through .data.img.rel.ro, .bss, or directly in the boot image.
546   //
547   // The 20-bit and 12-bit parts of the 32-bit PC-relative offset are patched separately,
548   // necessitating two patches/infos. There can be more than two patches/infos if the
549   // instruction supplying the high part is shared with e.g. a slow path, while the low
550   // part is supplied by separate instructions, e.g.:
551   //     auipc r1, high       // patch
552   //     lwu   r2, low(r1)    // patch
553   //     beqz  r2, slow_path
554   //   back:
555   //     ...
556   //   slow_path:
557   //     ...
558   //     sw    r2, low(r1)    // patch
559   //     j     back
560   struct PcRelativePatchInfo : PatchInfo<Riscv64Label> {
PcRelativePatchInfoPcRelativePatchInfo561     PcRelativePatchInfo(const DexFile* dex_file,
562                         uint32_t off_or_idx,
563                         const PcRelativePatchInfo* info_high)
564         : PatchInfo<Riscv64Label>(dex_file, off_or_idx),
565           pc_insn_label(info_high != nullptr ? &info_high->label : &label) {
566       DCHECK_IMPLIES(info_high != nullptr, info_high->pc_insn_label == &info_high->label);
567     }
568 
569     // Pointer to the info for the high part patch or nullptr if this is the high part patch info.
570     const Riscv64Label* pc_insn_label;
571 
572    private:
573     PcRelativePatchInfo(PcRelativePatchInfo&& other) = delete;
574     DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
575   };
576 
577   PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
578                                                   const PcRelativePatchInfo* info_high = nullptr);
579   PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
580                                               const PcRelativePatchInfo* info_high = nullptr);
581   PcRelativePatchInfo* NewAppImageMethodPatch(MethodReference target_method,
582                                               const PcRelativePatchInfo* info_high = nullptr);
583   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
584                                                const PcRelativePatchInfo* info_high = nullptr);
585   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
586                                               const PcRelativePatchInfo* info_high = nullptr);
587   PcRelativePatchInfo* NewBootImageJniEntrypointPatch(
588       MethodReference target_method, const PcRelativePatchInfo* info_high = nullptr);
589 
590   PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file,
591                                              dex::TypeIndex type_index,
592                                              const PcRelativePatchInfo* info_high = nullptr);
593   PcRelativePatchInfo* NewAppImageTypePatch(const DexFile& dex_file,
594                                             dex::TypeIndex type_index,
595                                             const PcRelativePatchInfo* info_high = nullptr);
596   PcRelativePatchInfo* NewTypeBssEntryPatch(HLoadClass* load_class,
597                                             const PcRelativePatchInfo* info_high = nullptr);
598   PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
599                                                dex::StringIndex string_index,
600                                                const PcRelativePatchInfo* info_high = nullptr);
601   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
602                                               dex::StringIndex string_index,
603                                               const PcRelativePatchInfo* info_high = nullptr);
604 
605   void EmitPcRelativeAuipcPlaceholder(PcRelativePatchInfo* info_high, XRegister out);
606   void EmitPcRelativeAddiPlaceholder(PcRelativePatchInfo* info_low, XRegister rd, XRegister rs1);
607   void EmitPcRelativeLwuPlaceholder(PcRelativePatchInfo* info_low, XRegister rd, XRegister rs1);
608   void EmitPcRelativeLdPlaceholder(PcRelativePatchInfo* info_low, XRegister rd, XRegister rs1);
609 
610   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
611 
612   Literal* DeduplicateBootImageAddressLiteral(uint64_t address);
613   void PatchJitRootUse(uint8_t* code,
614                        const uint8_t* roots_data,
615                        const Literal* literal,
616                        uint64_t index_in_table) const;
617   Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
618                                        dex::StringIndex string_index,
619                                        Handle<mirror::String> handle);
620   Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
621                                       dex::TypeIndex type_index,
622                                       Handle<mirror::Class> handle);
623   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
624 
625   void LoadTypeForBootImageIntrinsic(XRegister dest, TypeReference target_type);
626   void LoadBootImageRelRoEntry(XRegister dest, uint32_t boot_image_offset);
627   void LoadBootImageAddress(XRegister dest, uint32_t boot_image_reference);
628   void LoadIntrinsicDeclaringClass(XRegister dest, HInvoke* invoke);
629   void LoadClassRootForIntrinsic(XRegister dest, ClassRoot class_root);
630 
631   void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
632   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
633                                   Location temp,
634                                   SlowPathCode* slow_path = nullptr) override;
635   void GenerateVirtualCall(HInvokeVirtual* invoke,
636                            Location temp,
637                            SlowPathCode* slow_path = nullptr) override;
638   void MoveFromReturnRegister(Location trg, DataType::Type type) override;
639 
640   void GenerateMemoryBarrier(MemBarrierKind kind);
641 
642   void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
643 
644   bool CanUseImplicitSuspendCheck() const;
645 
646 
647   // Create slow path for a Baker read barrier for a GC root load within `instruction`.
648   SlowPathCodeRISCV64* AddGcRootBakerBarrierBarrierSlowPath(
649       HInstruction* instruction, Location root, Location temp);
650 
651   // Emit marking check for a Baker read barrier for a GC root load within `instruction`.
652   void EmitBakerReadBarierMarkingCheck(
653       SlowPathCodeRISCV64* slow_path, Location root, Location temp);
654 
655   // Generate a GC root reference load:
656   //
657   //   root <- *(obj + offset)
658   //
659   // while honoring read barriers (if any).
660   void GenerateGcRootFieldLoad(HInstruction* instruction,
661                                Location root,
662                                XRegister obj,
663                                uint32_t offset,
664                                ReadBarrierOption read_barrier_option,
665                                Riscv64Label* label_low = nullptr);
666 
667   // Fast path implementation of ReadBarrier::Barrier for a heap
668   // reference field load when Baker's read barriers are used.
669   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
670                                              Location ref,
671                                              XRegister obj,
672                                              uint32_t offset,
673                                              Location temp,
674                                              bool needs_null_check);
675   // Fast path implementation of ReadBarrier::Barrier for a heap
676   // reference array load when Baker's read barriers are used.
677   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
678                                              Location ref,
679                                              XRegister obj,
680                                              uint32_t data_offset,
681                                              Location index,
682                                              Location temp,
683                                              bool needs_null_check);
684   // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
685   // GenerateArrayLoadWithBakerReadBarrier and intrinsics.
686   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
687                                                  Location ref,
688                                                  XRegister obj,
689                                                  uint32_t offset,
690                                                  Location index,
691                                                  Location temp,
692                                                  bool needs_null_check);
693 
694   // Create slow path for a read barrier for a heap reference within `instruction`.
695   //
696   // This is a helper function for GenerateReadBarrierSlow() that has the same
697   // arguments. The creation and adding of the slow path is exposed for intrinsics
698   // that cannot use GenerateReadBarrierSlow() from their own slow paths.
699   SlowPathCodeRISCV64* AddReadBarrierSlowPath(HInstruction* instruction,
700                                               Location out,
701                                               Location ref,
702                                               Location obj,
703                                               uint32_t offset,
704                                               Location index);
705 
706   // Generate a read barrier for a heap reference within `instruction`
707   // using a slow path.
708   //
709   // A read barrier for an object reference read from the heap is
710   // implemented as a call to the artReadBarrierSlow runtime entry
711   // point, which is passed the values in locations `ref`, `obj`, and
712   // `offset`:
713   //
714   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
715   //                                      mirror::Object* obj,
716   //                                      uint32_t offset);
717   //
718   // The `out` location contains the value returned by
719   // artReadBarrierSlow.
720   //
721   // When `index` is provided (i.e. for array accesses), the offset
722   // value passed to artReadBarrierSlow is adjusted to take `index`
723   // into account.
724   void GenerateReadBarrierSlow(HInstruction* instruction,
725                                Location out,
726                                Location ref,
727                                Location obj,
728                                uint32_t offset,
729                                Location index = Location::NoLocation());
730 
731   // If read barriers are enabled, generate a read barrier for a heap
732   // reference using a slow path. If heap poisoning is enabled, also
733   // unpoison the reference in `out`.
734   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
735                                     Location out,
736                                     Location ref,
737                                     Location obj,
738                                     uint32_t offset,
739                                     Location index = Location::NoLocation());
740 
741   // Generate a read barrier for a GC root within `instruction` using
742   // a slow path.
743   //
744   // A read barrier for an object reference GC root is implemented as
745   // a call to the artReadBarrierForRootSlow runtime entry point,
746   // which is passed the value in location `root`:
747   //
748   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
749   //
750   // The `out` location contains the value returned by
751   // artReadBarrierForRootSlow.
752   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
753 
754   // Emit a write barrier if:
755   // A) emit_null_check is false
756   // B) emit_null_check is true, and value is not null.
757   void MaybeMarkGCCard(XRegister object, XRegister value, bool emit_null_check);
758 
759   // Emit a write barrier unconditionally.
760   void MarkGCCard(XRegister object);
761 
762   // Crash if the card table is not valid. This check is only emitted for the CC GC. We assert
763   // `(!clean || !self->is_gc_marking)`, since the card table should not be set to clean when the CC
764   // GC is marking for eliminated write barriers.
765   void CheckGCCardIsValid(XRegister object);
766 
767   //
768   // Heap poisoning.
769   //
770 
771   // Poison a heap reference contained in `reg`.
772   void PoisonHeapReference(XRegister reg);
773 
774   // Unpoison a heap reference contained in `reg`.
775   void UnpoisonHeapReference(XRegister reg);
776 
777   // Poison a heap reference contained in `reg` if heap poisoning is enabled.
778   void MaybePoisonHeapReference(XRegister reg);
779 
780   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
781   void MaybeUnpoisonHeapReference(XRegister reg);
782 
783   void SwapLocations(Location loc1, Location loc2, DataType::Type type);
784 
785  private:
786   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
787   using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
788   using StringToLiteralMap =
789       ArenaSafeMap<StringReference, Literal*, StringReferenceValueComparator>;
790   using TypeToLiteralMap = ArenaSafeMap<TypeReference, Literal*, TypeReferenceValueComparator>;
791 
792   Literal* DeduplicateUint32Literal(uint32_t value);
793   Literal* DeduplicateUint64Literal(uint64_t value);
794 
795   PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
796                                           uint32_t offset_or_index,
797                                           const PcRelativePatchInfo* info_high,
798                                           ArenaDeque<PcRelativePatchInfo>* patches);
799 
800   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
801   void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
802                                    ArenaVector<linker::LinkerPatch>* linker_patches);
803 
804   Riscv64Assembler assembler_;
805   LocationsBuilderRISCV64 location_builder_;
806   InstructionCodeGeneratorRISCV64 instruction_visitor_;
807   Riscv64Label frame_entry_label_;
808 
809   // Labels for each block that will be compiled.
810   Riscv64Label* block_labels_;  // Indexed by block id.
811 
812   ParallelMoveResolverRISCV64 move_resolver_;
813 
814   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
815   Uint32ToLiteralMap uint32_literals_;
816   // Deduplication map for 64-bit literals, used for non-patchable method address or method code
817   // address.
818   Uint64ToLiteralMap uint64_literals_;
819 
820   // PC-relative method patch info for kBootImageLinkTimePcRelative.
821   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
822   // PC-relative method patch info for kAppImageRelRo.
823   ArenaDeque<PcRelativePatchInfo> app_image_method_patches_;
824   // PC-relative method patch info for kBssEntry.
825   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
826   // PC-relative type patch info for kBootImageLinkTimePcRelative.
827   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
828   // PC-relative type patch info for kAppImageRelRo.
829   ArenaDeque<PcRelativePatchInfo> app_image_type_patches_;
830   // PC-relative type patch info for kBssEntry.
831   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
832   // PC-relative public type patch info for kBssEntryPublic.
833   ArenaDeque<PcRelativePatchInfo> public_type_bss_entry_patches_;
834   // PC-relative package type patch info for kBssEntryPackage.
835   ArenaDeque<PcRelativePatchInfo> package_type_bss_entry_patches_;
836   // PC-relative String patch info for kBootImageLinkTimePcRelative.
837   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
838   // PC-relative String patch info for kBssEntry.
839   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
840   // PC-relative method patch info for kBootImageLinkTimePcRelative+kCallCriticalNative.
841   ArenaDeque<PcRelativePatchInfo> boot_image_jni_entrypoint_patches_;
842   // PC-relative patch info for IntrinsicObjects for the boot image,
843   // and for method/type/string patches for kBootImageRelRo otherwise.
844   ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
845 
846   // Patches for string root accesses in JIT compiled code.
847   StringToLiteralMap jit_string_patches_;
848   // Patches for class root accesses in JIT compiled code.
849   TypeToLiteralMap jit_class_patches_;
850 };
851 
852 }  // namespace riscv64
853 }  // namespace art
854 
855 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_RISCV64_H_
856