1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "AArch64.h"
14 #include "AArch64CallLowering.h"
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64TargetMachine.h"
17 #include "AArch64TargetObjectFile.h"
18 #include "AArch64TargetTransformInfo.h"
19 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
20 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
21 #include "llvm/CodeGen/Passes.h"
22 #include "llvm/CodeGen/RegAllocRegistry.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/LegacyPassManager.h"
26 #include "llvm/InitializePasses.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/TargetRegistry.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Transforms/Scalar.h"
31 using namespace llvm;
32
33 static cl::opt<bool>
34 EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"),
35 cl::init(true), cl::Hidden);
36
37 static cl::opt<bool> EnableMCR("aarch64-mcr",
38 cl::desc("Enable the machine combiner pass"),
39 cl::init(true), cl::Hidden);
40
41 static cl::opt<bool>
42 EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"),
43 cl::init(true), cl::Hidden);
44
45 static cl::opt<bool>
46 EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
47 " integer instructions"), cl::init(false), cl::Hidden);
48
49 static cl::opt<bool>
50 EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote "
51 "constant pass"), cl::init(true), cl::Hidden);
52
53 static cl::opt<bool>
54 EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the"
55 " linker optimization hints (LOH)"), cl::init(true),
56 cl::Hidden);
57
58 static cl::opt<bool>
59 EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden,
60 cl::desc("Enable the pass that removes dead"
61 " definitons and replaces stores to"
62 " them with stores to the zero"
63 " register"),
64 cl::init(true));
65
66 static cl::opt<bool>
67 EnableRedundantCopyElimination("aarch64-redundant-copy-elim",
68 cl::desc("Enable the redundant copy elimination pass"),
69 cl::init(true), cl::Hidden);
70
71 static cl::opt<bool>
72 EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair"
73 " optimization pass"), cl::init(true), cl::Hidden);
74
75 static cl::opt<bool>
76 EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
77 cl::desc("Run SimplifyCFG after expanding atomic operations"
78 " to make use of cmpxchg flow-based information"),
79 cl::init(true));
80
81 static cl::opt<bool>
82 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
83 cl::desc("Run early if-conversion"),
84 cl::init(true));
85
86 static cl::opt<bool>
87 EnableCondOpt("aarch64-condopt",
88 cl::desc("Enable the condition optimizer pass"),
89 cl::init(true), cl::Hidden);
90
91 static cl::opt<bool>
92 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
93 cl::desc("Work around Cortex-A53 erratum 835769"),
94 cl::init(false));
95
96 static cl::opt<bool>
97 EnableGEPOpt("aarch64-gep-opt", cl::Hidden,
98 cl::desc("Enable optimizations on complex GEPs"),
99 cl::init(false));
100
101 // FIXME: Unify control over GlobalMerge.
102 static cl::opt<cl::boolOrDefault>
103 EnableGlobalMerge("aarch64-global-merge", cl::Hidden,
104 cl::desc("Enable the global merge pass"));
105
106 static cl::opt<bool>
107 EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden,
108 cl::desc("Enable the loop data prefetch pass"),
109 cl::init(true));
110
LLVMInitializeAArch64Target()111 extern "C" void LLVMInitializeAArch64Target() {
112 // Register the target.
113 RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
114 RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
115 RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
116 auto PR = PassRegistry::getPassRegistry();
117 initializeGlobalISel(*PR);
118 initializeAArch64ExpandPseudoPass(*PR);
119 }
120
121 //===----------------------------------------------------------------------===//
122 // AArch64 Lowering public interface.
123 //===----------------------------------------------------------------------===//
createTLOF(const Triple & TT)124 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
125 if (TT.isOSBinFormatMachO())
126 return make_unique<AArch64_MachoTargetObjectFile>();
127
128 return make_unique<AArch64_ELFTargetObjectFile>();
129 }
130
131 // Helper function to build a DataLayout string
computeDataLayout(const Triple & TT,bool LittleEndian)132 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
133 if (TT.isOSBinFormatMachO())
134 return "e-m:o-i64:64-i128:128-n32:64-S128";
135 if (LittleEndian)
136 return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
137 return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
138 }
139
140 // Helper function to set up the defaults for reciprocals.
initReciprocals(AArch64TargetMachine & TM,AArch64Subtarget & ST)141 static void initReciprocals(AArch64TargetMachine& TM, AArch64Subtarget& ST)
142 {
143 // For the estimates, convergence is quadratic, so essentially the number of
144 // digits is doubled after each iteration. ARMv8, the minimum architected
145 // accuracy of the initial estimate is 2^-8. Therefore, the number of extra
146 // steps to refine the result for float (23 mantissa bits) and for double
147 // (52 mantissa bits) are 2 and 3, respectively.
148 unsigned ExtraStepsF = 2,
149 ExtraStepsD = ExtraStepsF + 1;
150 bool UseRsqrt = ST.useRSqrt();
151
152 TM.Options.Reciprocals.setDefaults("sqrtf", UseRsqrt, ExtraStepsF);
153 TM.Options.Reciprocals.setDefaults("sqrtd", UseRsqrt, ExtraStepsD);
154 TM.Options.Reciprocals.setDefaults("vec-sqrtf", UseRsqrt, ExtraStepsF);
155 TM.Options.Reciprocals.setDefaults("vec-sqrtd", UseRsqrt, ExtraStepsD);
156
157 TM.Options.Reciprocals.setDefaults("divf", false, ExtraStepsF);
158 TM.Options.Reciprocals.setDefaults("divd", false, ExtraStepsD);
159 TM.Options.Reciprocals.setDefaults("vec-divf", false, ExtraStepsF);
160 TM.Options.Reciprocals.setDefaults("vec-divd", false, ExtraStepsD);
161 }
162
getEffectiveRelocModel(const Triple & TT,Optional<Reloc::Model> RM)163 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
164 Optional<Reloc::Model> RM) {
165 // AArch64 Darwin is always PIC.
166 if (TT.isOSDarwin())
167 return Reloc::PIC_;
168 // On ELF platforms the default static relocation model has a smart enough
169 // linker to cope with referencing external symbols defined in a shared
170 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
171 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
172 return Reloc::Static;
173 return *RM;
174 }
175
176 /// Create an AArch64 architecture model.
177 ///
AArch64TargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Optional<Reloc::Model> RM,CodeModel::Model CM,CodeGenOpt::Level OL,bool LittleEndian)178 AArch64TargetMachine::AArch64TargetMachine(
179 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
180 const TargetOptions &Options, Optional<Reloc::Model> RM,
181 CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian)
182 // This nested ternary is horrible, but DL needs to be properly
183 // initialized before TLInfo is constructed.
184 : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
185 Options, getEffectiveRelocModel(TT, RM), CM, OL),
186 TLOF(createTLOF(getTargetTriple())),
187 Subtarget(TT, CPU, FS, *this, LittleEndian) {
188 initReciprocals(*this, Subtarget);
189 initAsmInfo();
190 }
191
~AArch64TargetMachine()192 AArch64TargetMachine::~AArch64TargetMachine() {}
193
194 #ifdef LLVM_BUILD_GLOBAL_ISEL
195 namespace {
196 struct AArch64GISelActualAccessor : public GISelAccessor {
197 std::unique_ptr<CallLowering> CallLoweringInfo;
198 std::unique_ptr<RegisterBankInfo> RegBankInfo;
getCallLowering__anon4cccfc3b0111::AArch64GISelActualAccessor199 const CallLowering *getCallLowering() const override {
200 return CallLoweringInfo.get();
201 }
getRegBankInfo__anon4cccfc3b0111::AArch64GISelActualAccessor202 const RegisterBankInfo *getRegBankInfo() const override {
203 return RegBankInfo.get();
204 }
205 };
206 } // End anonymous namespace.
207 #endif
208
209 const AArch64Subtarget *
getSubtargetImpl(const Function & F) const210 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
211 Attribute CPUAttr = F.getFnAttribute("target-cpu");
212 Attribute FSAttr = F.getFnAttribute("target-features");
213
214 std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
215 ? CPUAttr.getValueAsString().str()
216 : TargetCPU;
217 std::string FS = !FSAttr.hasAttribute(Attribute::None)
218 ? FSAttr.getValueAsString().str()
219 : TargetFS;
220
221 auto &I = SubtargetMap[CPU + FS];
222 if (!I) {
223 // This needs to be done before we create a new subtarget since any
224 // creation will depend on the TM and the code generation flags on the
225 // function that reside in TargetOptions.
226 resetTargetOptions(F);
227 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
228 Subtarget.isLittleEndian());
229 #ifndef LLVM_BUILD_GLOBAL_ISEL
230 GISelAccessor *GISel = new GISelAccessor();
231 #else
232 AArch64GISelActualAccessor *GISel =
233 new AArch64GISelActualAccessor();
234 GISel->CallLoweringInfo.reset(
235 new AArch64CallLowering(*I->getTargetLowering()));
236 GISel->RegBankInfo.reset(
237 new AArch64RegisterBankInfo(*I->getRegisterInfo()));
238 #endif
239 I->setGISelAccessor(*GISel);
240 }
241 return I.get();
242 }
243
anchor()244 void AArch64leTargetMachine::anchor() { }
245
AArch64leTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Optional<Reloc::Model> RM,CodeModel::Model CM,CodeGenOpt::Level OL)246 AArch64leTargetMachine::AArch64leTargetMachine(
247 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
248 const TargetOptions &Options, Optional<Reloc::Model> RM,
249 CodeModel::Model CM, CodeGenOpt::Level OL)
250 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
251
anchor()252 void AArch64beTargetMachine::anchor() { }
253
AArch64beTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Optional<Reloc::Model> RM,CodeModel::Model CM,CodeGenOpt::Level OL)254 AArch64beTargetMachine::AArch64beTargetMachine(
255 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
256 const TargetOptions &Options, Optional<Reloc::Model> RM,
257 CodeModel::Model CM, CodeGenOpt::Level OL)
258 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
259
260 namespace {
261 /// AArch64 Code Generator Pass Configuration Options.
262 class AArch64PassConfig : public TargetPassConfig {
263 public:
AArch64PassConfig(AArch64TargetMachine * TM,PassManagerBase & PM)264 AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
265 : TargetPassConfig(TM, PM) {
266 if (TM->getOptLevel() != CodeGenOpt::None)
267 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
268 }
269
getAArch64TargetMachine() const270 AArch64TargetMachine &getAArch64TargetMachine() const {
271 return getTM<AArch64TargetMachine>();
272 }
273
274 void addIRPasses() override;
275 bool addPreISel() override;
276 bool addInstSelector() override;
277 #ifdef LLVM_BUILD_GLOBAL_ISEL
278 bool addIRTranslator() override;
279 bool addRegBankSelect() override;
280 #endif
281 bool addILPOpts() override;
282 void addPreRegAlloc() override;
283 void addPostRegAlloc() override;
284 void addPreSched2() override;
285 void addPreEmitPass() override;
286 };
287 } // namespace
288
getTargetIRAnalysis()289 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
290 return TargetIRAnalysis([this](const Function &F) {
291 return TargetTransformInfo(AArch64TTIImpl(this, F));
292 });
293 }
294
createPassConfig(PassManagerBase & PM)295 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
296 return new AArch64PassConfig(this, PM);
297 }
298
addIRPasses()299 void AArch64PassConfig::addIRPasses() {
300 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
301 // ourselves.
302 addPass(createAtomicExpandPass(TM));
303
304 // Cmpxchg instructions are often used with a subsequent comparison to
305 // determine whether it succeeded. We can exploit existing control-flow in
306 // ldrex/strex loops to simplify this, but it needs tidying up.
307 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
308 addPass(createCFGSimplificationPass());
309
310 // Run LoopDataPrefetch
311 //
312 // Run this before LSR to remove the multiplies involved in computing the
313 // pointer values N iterations ahead.
314 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch)
315 addPass(createLoopDataPrefetchPass());
316
317 TargetPassConfig::addIRPasses();
318
319 // Match interleaved memory accesses to ldN/stN intrinsics.
320 if (TM->getOptLevel() != CodeGenOpt::None)
321 addPass(createInterleavedAccessPass(TM));
322
323 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
324 // Call SeparateConstOffsetFromGEP pass to extract constants within indices
325 // and lower a GEP with multiple indices to either arithmetic operations or
326 // multiple GEPs with single index.
327 addPass(createSeparateConstOffsetFromGEPPass(TM, true));
328 // Call EarlyCSE pass to find and remove subexpressions in the lowered
329 // result.
330 addPass(createEarlyCSEPass());
331 // Do loop invariant code motion in case part of the lowered result is
332 // invariant.
333 addPass(createLICMPass());
334 }
335 }
336
337 // Pass Pipeline Configuration
addPreISel()338 bool AArch64PassConfig::addPreISel() {
339 // Run promote constant before global merge, so that the promoted constants
340 // get a chance to be merged
341 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
342 addPass(createAArch64PromoteConstantPass());
343 // FIXME: On AArch64, this depends on the type.
344 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
345 // and the offset has to be a multiple of the related size in bytes.
346 if ((TM->getOptLevel() != CodeGenOpt::None &&
347 EnableGlobalMerge == cl::BOU_UNSET) ||
348 EnableGlobalMerge == cl::BOU_TRUE) {
349 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
350 (EnableGlobalMerge == cl::BOU_UNSET);
351 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
352 }
353
354 if (TM->getOptLevel() != CodeGenOpt::None)
355 addPass(createAArch64AddressTypePromotionPass());
356
357 return false;
358 }
359
addInstSelector()360 bool AArch64PassConfig::addInstSelector() {
361 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
362
363 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
364 // references to _TLS_MODULE_BASE_ as possible.
365 if (TM->getTargetTriple().isOSBinFormatELF() &&
366 getOptLevel() != CodeGenOpt::None)
367 addPass(createAArch64CleanupLocalDynamicTLSPass());
368
369 return false;
370 }
371
372 #ifdef LLVM_BUILD_GLOBAL_ISEL
addIRTranslator()373 bool AArch64PassConfig::addIRTranslator() {
374 addPass(new IRTranslator());
375 return false;
376 }
addRegBankSelect()377 bool AArch64PassConfig::addRegBankSelect() {
378 addPass(new RegBankSelect());
379 return false;
380 }
381 #endif
382
addILPOpts()383 bool AArch64PassConfig::addILPOpts() {
384 if (EnableCondOpt)
385 addPass(createAArch64ConditionOptimizerPass());
386 if (EnableCCMP)
387 addPass(createAArch64ConditionalCompares());
388 if (EnableMCR)
389 addPass(&MachineCombinerID);
390 if (EnableEarlyIfConversion)
391 addPass(&EarlyIfConverterID);
392 if (EnableStPairSuppress)
393 addPass(createAArch64StorePairSuppressPass());
394 return true;
395 }
396
addPreRegAlloc()397 void AArch64PassConfig::addPreRegAlloc() {
398 // Use AdvSIMD scalar instructions whenever profitable.
399 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
400 addPass(createAArch64AdvSIMDScalar());
401 // The AdvSIMD pass may produce copies that can be rewritten to
402 // be register coaleascer friendly.
403 addPass(&PeepholeOptimizerID);
404 }
405 }
406
addPostRegAlloc()407 void AArch64PassConfig::addPostRegAlloc() {
408 // Remove redundant copy instructions.
409 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
410 addPass(createAArch64RedundantCopyEliminationPass());
411
412 // Change dead register definitions to refer to the zero register.
413 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
414 addPass(createAArch64DeadRegisterDefinitions());
415 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
416 // Improve performance for some FP/SIMD code for A57.
417 addPass(createAArch64A57FPLoadBalancing());
418 }
419
addPreSched2()420 void AArch64PassConfig::addPreSched2() {
421 // Expand some pseudo instructions to allow proper scheduling.
422 addPass(createAArch64ExpandPseudoPass());
423 // Use load/store pair instructions when possible.
424 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
425 addPass(createAArch64LoadStoreOptimizationPass());
426 }
427
addPreEmitPass()428 void AArch64PassConfig::addPreEmitPass() {
429 if (EnableA53Fix835769)
430 addPass(createAArch64A53Fix835769());
431 // Relax conditional branch instructions if they're otherwise out of
432 // range of their destination.
433 addPass(createAArch64BranchRelaxation());
434 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
435 TM->getTargetTriple().isOSBinFormatMachO())
436 addPass(createAArch64CollectLOHPass());
437 }
438