xref: /aosp_15_r20/art/compiler/utils/arm/assembler_arm_vixl.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
18 #define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
19 
20 #include <android-base/logging.h>
21 
22 #include "base/macros.h"
23 #include "constants_arm.h"
24 #include "dwarf/register.h"
25 #include "offsets.h"
26 #include "utils/arm/managed_register_arm.h"
27 #include "utils/assembler.h"
28 
29 // TODO(VIXL): Make VIXL compile cleanly with -Wshadow, -Wdeprecated-declarations.
30 #pragma GCC diagnostic push
31 #pragma GCC diagnostic ignored "-Wshadow"
32 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
33 #include "aarch32/macro-assembler-aarch32.h"
34 #pragma GCC diagnostic pop
35 
36 namespace vixl32 = vixl::aarch32;
37 
38 namespace art HIDDEN {
39 namespace arm {
40 
DWARFReg(vixl32::Register reg)41 inline dwarf::Reg DWARFReg(vixl32::Register reg) {
42   return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
43 }
44 
DWARFReg(vixl32::SRegister reg)45 inline dwarf::Reg DWARFReg(vixl32::SRegister reg) {
46   return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
47 }
48 
49 enum LoadOperandType {
50   kLoadSignedByte,
51   kLoadUnsignedByte,
52   kLoadSignedHalfword,
53   kLoadUnsignedHalfword,
54   kLoadWord,
55   kLoadWordPair,
56   kLoadSWord,
57   kLoadDWord
58 };
59 
60 enum StoreOperandType {
61   kStoreByte,
62   kStoreHalfword,
63   kStoreWord,
64   kStoreWordPair,
65   kStoreSWord,
66   kStoreDWord
67 };
68 
69 class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
70  public:
71   // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
72   // fewer system calls than a larger default capacity.
73   static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
74 
ArmVIXLMacroAssembler()75   ArmVIXLMacroAssembler()
76       : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
77 
78   // The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
79   // CMP+Bcc are generated by default.
80   // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
81   // then Cbz/Cbnz is generated.
82   // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz.
83   // In T32, Cbz/Cbnz instructions have following limitations:
84   // - Far targets, which are over 126 bytes away, are not supported.
85   // - Only low registers can be encoded.
86   // - Backward branches are not supported.
87   void CompareAndBranchIfZero(vixl32::Register rn,
88                               vixl32::Label* label,
89                               bool is_far_target = true);
90   void CompareAndBranchIfNonZero(vixl32::Register rn,
91                                  vixl32::Label* label,
92                                  bool is_far_target = true);
93 
94   // In T32 some of the instructions (add, mov, etc) outside an IT block
95   // have only 32-bit encodings. But there are 16-bit flag setting
96   // versions of these instructions (adds, movs, etc). In most of the
97   // cases in ART we don't care if the instructions keep flags or not;
98   // thus we can benefit from smaller code size.
99   // VIXL will never generate flag setting versions (for example, adds
100   // for Add macro instruction) unless vixl32::DontCare option is
101   // explicitly specified. That's why we introduce wrappers to use
102   // DontCare option by default.
103 #define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \
104   void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \
105     MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \
106   } \
107   using MacroAssembler::func_name
108 
109   WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc);
110   WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub);
111   WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc);
112   WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb);
113   WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc);
114 
115   WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor);
116   WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr);
117   WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn);
118   WITH_FLAGS_DONT_CARE_RD_RN_OP(And);
119   WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic);
120 
121   WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr);
122   WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr);
123   WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl);
124   WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror);
125 
126 #undef WITH_FLAGS_DONT_CARE_RD_RN_OP
127 
128 #define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \
129   void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \
130     MacroAssembler::func_name(vixl32::DontCare, rd, operand); \
131   } \
132   using MacroAssembler::func_name
133 
134   WITH_FLAGS_DONT_CARE_RD_OP(Mvn);
135   WITH_FLAGS_DONT_CARE_RD_OP(Mov);
136 
137 #undef WITH_FLAGS_DONT_CARE_RD_OP
138 
139   // The following two functions don't fall into above categories. Overload them separately.
Rrx(vixl32::Register rd,vixl32::Register rn)140   void Rrx(vixl32::Register rd, vixl32::Register rn) {
141     MacroAssembler::Rrx(vixl32::DontCare, rd, rn);
142   }
143   using MacroAssembler::Rrx;
144 
Mul(vixl32::Register rd,vixl32::Register rn,vixl32::Register rm)145   void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) {
146     MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm);
147   }
148   using MacroAssembler::Mul;
149 
150   // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand)
151   // makes the right decision about 16-bit encodings.
Add(vixl32::Register rd,vixl32::Register rn,const vixl32::Operand & operand)152   void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) {
153     if (rd.Is(rn) && operand.IsPlainRegister()) {
154       MacroAssembler::Add(rd, rn, operand);
155     } else {
156       MacroAssembler::Add(vixl32::DontCare, rd, rn, operand);
157     }
158   }
159   using MacroAssembler::Add;
160 
161   // These interfaces try to use 16-bit T2 encoding of B instruction.
162   void B(vixl32::Label* label);
163   // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports
164   // jumping within 2KB range. For B(cond, label), because the supported branch range is 256
165   // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
166   void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
167 
168   // Use literal for generating double constant if it doesn't fit VMOV encoding.
Vmov(vixl32::DRegister rd,double imm)169   void Vmov(vixl32::DRegister rd, double imm) {
170     if (vixl::VFP::IsImmFP64(imm)) {
171       MacroAssembler::Vmov(rd, imm);
172     } else {
173       MacroAssembler::Vldr(rd, imm);
174     }
175   }
176   using MacroAssembler::Vmov;
177 
178   // TODO(b/281982421): Move the implementation of Mrrc to vixl and remove this implementation.
Mrrc(vixl32::Register r1,vixl32::Register r2,int coproc,int opc1,int crm)179   void Mrrc(vixl32::Register r1, vixl32::Register r2, int coproc, int opc1, int crm) {
180     // See ARM A-profile A32/T32 Instruction set architecture
181     // https://developer.arm.com/documentation/ddi0597/2022-09/Base-Instructions/MRRC--Move-to-two-general-purpose-registers-from-System-register-
182     CHECK(coproc == 15 || coproc == 14);
183     if (IsUsingT32()) {
184       uint32_t inst = (0b111011000101 << 20) |
185                       (r2.GetCode() << 16) |
186                       (r1.GetCode() << 12) |
187                       (coproc << 8) |
188                       (opc1 << 4) |
189                       crm;
190       EmitT32_32(inst);
191     } else {
192       uint32_t inst = (0b000011000101 << 20) |
193                       (r2.GetCode() << 16) |
194                       (r1.GetCode() << 12) |
195                       (coproc << 8) |
196                       (opc1 << 4) |
197                       crm;
198       EmitA32(inst);
199     }
200   }
201 };
202 
203 class ArmVIXLAssembler final : public Assembler {
204  private:
205   class ArmException;
206  public:
ArmVIXLAssembler(ArenaAllocator * allocator)207   explicit ArmVIXLAssembler(ArenaAllocator* allocator)
208       : Assembler(allocator) {
209     // Use Thumb2 instruction set.
210     vixl_masm_.UseT32();
211   }
212 
~ArmVIXLAssembler()213   virtual ~ArmVIXLAssembler() {}
GetVIXLAssembler()214   ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
215   void FinalizeCode() override;
216 
217   // Size of generated code.
218   size_t CodeSize() const override;
219   const uint8_t* CodeBufferBaseAddress() const override;
220 
221   // Copy instructions out of assembly buffer into the given region of memory.
222   void CopyInstructions(const MemoryRegion& region) override;
223 
Bind(Label * label)224   void Bind([[maybe_unused]] Label* label) override {
225     UNIMPLEMENTED(FATAL) << "Do not use Bind(Label*) for ARM";
226   }
Jump(Label * label)227   void Jump([[maybe_unused]] Label* label) override {
228     UNIMPLEMENTED(FATAL) << "Do not use Jump(Label*) for ARM";
229   }
230 
Bind(vixl::aarch32::Label * label)231   void Bind(vixl::aarch32::Label* label) {
232     vixl_masm_.Bind(label);
233   }
Jump(vixl::aarch32::Label * label)234   void Jump(vixl::aarch32::Label* label) {
235     vixl_masm_.B(label);
236   }
237 
238   //
239   // Heap poisoning.
240   //
241 
242   // Poison a heap reference contained in `reg`.
243   void PoisonHeapReference(vixl32::Register reg);
244   // Unpoison a heap reference contained in `reg`.
245   void UnpoisonHeapReference(vixl32::Register reg);
246   // Poison a heap reference contained in `reg` if heap poisoning is enabled.
247   void MaybePoisonHeapReference(vixl32::Register reg);
248   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
249   void MaybeUnpoisonHeapReference(vixl32::Register reg);
250 
251   // Emit code checking the status of the Marking Register, and aborting
252   // the program if MR does not match the value stored in the art::Thread
253   // object.
254   //
255   // Argument `temp` is used as a temporary register to generate code.
256   // Argument `code` is used to identify the different occurrences of
257   // MaybeGenerateMarkingRegisterCheck and is passed to the BKPT instruction.
258   void GenerateMarkingRegisterCheck(vixl32::Register temp, int code = 0);
259 
260   void StoreToOffset(StoreOperandType type,
261                      vixl32::Register reg,
262                      vixl32::Register base,
263                      int32_t offset);
264   void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
265   void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
266 
267   void LoadImmediate(vixl32::Register dest, int32_t value);
268   void LoadFromOffset(LoadOperandType type,
269                       vixl32::Register reg,
270                       vixl32::Register base,
271                       int32_t offset);
272   void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
273   void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
274 
275   void LoadRegisterList(RegList regs, size_t stack_offset);
276   void StoreRegisterList(RegList regs, size_t stack_offset);
277 
278   bool ShifterOperandCanAlwaysHold(uint32_t immediate);
279   bool ShifterOperandCanHold(Opcode opcode,
280                              uint32_t immediate,
281                              vixl::aarch32::FlagsUpdate update_flags = vixl::aarch32::DontCare);
282   bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
283                                int32_t offset,
284                                /*out*/ int32_t* add_to_base,
285                                /*out*/ int32_t* offset_for_load_store);
286   int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
287                                 vixl32::Register temp,
288                                 vixl32::Register base,
289                                 int32_t offset);
290   int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
291   int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
292 
293   void AddConstant(vixl32::Register rd, int32_t value);
294   void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
295   void AddConstantInIt(vixl32::Register rd,
296                        vixl32::Register rn,
297                        int32_t value,
298                        vixl32::Condition cond = vixl32::al);
299 
300   template <typename T>
CreateLiteralDestroyedWithPool(T value)301   vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) {
302     vixl::aarch32::Literal<T>* literal =
303         new vixl::aarch32::Literal<T>(value,
304                                       vixl32::RawLiteral::kPlacedWhenUsed,
305                                       vixl32::RawLiteral::kDeletedOnPoolDestruction);
306     return literal;
307   }
308 
309  private:
310   // VIXL assembler.
311   ArmVIXLMacroAssembler vixl_masm_;
312 };
313 
314 // Thread register declaration.
315 extern const vixl32::Register tr;
316 // Marking register declaration.
317 extern const vixl32::Register mr;
318 
319 }  // namespace arm
320 }  // namespace art
321 
322 #endif  // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
323