1 /**************************************************************************
2 *
3 * Copyright 2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 **************************************************************************/
27
28
29 /**
30 * The purpose of this module is to expose LLVM functionality not available
31 * through the C++ bindings.
32 */
33
34
35 // Undef these vars just to silence warnings
36 #undef PACKAGE_BUGREPORT
37 #undef PACKAGE_NAME
38 #undef PACKAGE_STRING
39 #undef PACKAGE_TARNAME
40 #undef PACKAGE_VERSION
41
42
43 #include <stddef.h>
44
45 #include <llvm/Config/llvm-config.h>
46 #include <llvm-c/Core.h>
47 #include <llvm-c/Support.h>
48 #include <llvm-c/ExecutionEngine.h>
49 #include <llvm/Target/TargetOptions.h>
50 #include <llvm/ExecutionEngine/ExecutionEngine.h>
51 #include <llvm/Analysis/TargetLibraryInfo.h>
52 #include <llvm/ExecutionEngine/SectionMemoryManager.h>
53 #include <llvm/Support/CommandLine.h>
54 #include <llvm/Support/PrettyStackTrace.h>
55 #include <llvm/ExecutionEngine/ObjectCache.h>
56 #include <llvm/Support/TargetSelect.h>
57 #include <llvm/CodeGen/SelectionDAGNodes.h>
58 #if LLVM_VERSION_MAJOR >= 15
59 #include <llvm/Support/MemoryBuffer.h>
60 #endif
61
62 #if LLVM_VERSION_MAJOR >= 17
63 #include <llvm/TargetParser/Host.h>
64 #include <llvm/TargetParser/Triple.h>
65 #else
66 #include <llvm/Support/Host.h>
67 #include <llvm/ADT/Triple.h>
68 #endif
69
70 #if LLVM_VERSION_MAJOR < 11
71 #include <llvm/IR/CallSite.h>
72 #endif
73 #include <llvm/IR/IRBuilder.h>
74 #include <llvm/IR/Module.h>
75 #include <llvm/Support/CBindingWrapping.h>
76
77 #include <llvm/Config/llvm-config.h>
78 #if LLVM_USE_INTEL_JITEVENTS
79 #include <llvm/ExecutionEngine/JITEventListener.h>
80 #endif
81
82 #include "c11/threads.h"
83 #include "util/u_thread.h"
84 #include "util/detect.h"
85 #include "util/u_debug.h"
86 #include "util/u_cpu_detect.h"
87
88 #include "lp_bld_misc.h"
89 #include "lp_bld_debug.h"
90
91 static void lp_run_atexit_for_destructors(void);
92
93 namespace {
94
95 class LLVMEnsureMultithreaded {
96 public:
LLVMEnsureMultithreaded()97 LLVMEnsureMultithreaded()
98 {
99 if (!LLVMIsMultithreaded()) {
100 LLVMStartMultithreaded();
101 }
102 }
103 };
104
105 static LLVMEnsureMultithreaded lLVMEnsureMultithreaded;
106
107 }
108
109 static once_flag init_native_targets_once_flag = ONCE_FLAG_INIT;
110
lp_bld_init_native_targets()111 void lp_bld_init_native_targets()
112 {
113 // If we have a native target, initialize it to ensure it is linked in and
114 // usable by the JIT.
115 llvm::InitializeNativeTarget();
116
117 llvm::InitializeNativeTargetAsmPrinter();
118
119 llvm::InitializeNativeTargetDisassembler();
120 #if MESA_DEBUG
121 {
122 char *env_llc_options = getenv("GALLIVM_LLC_OPTIONS");
123 if (env_llc_options) {
124 char *option;
125 char *options[64] = {(char *) "llc"}; // Warning without cast
126 int n;
127 for (n = 0, option = strtok(env_llc_options, " "); option; n++, option = strtok(NULL, " ")) {
128 options[n + 1] = option;
129 }
130 if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
131 debug_printf("llc additional options (%d):\n", n);
132 for (int i = 1; i <= n; i++)
133 debug_printf("\t%s\n", options[i]);
134 debug_printf("\n");
135 }
136 LLVMParseCommandLineOptions(n + 1, options, NULL);
137 }
138 }
139 #endif
140 lp_run_atexit_for_destructors();
141 }
142
143 extern "C" void
lp_set_target_options(void)144 lp_set_target_options(void)
145 {
146 /* The llvm target registry is not thread-safe, so drivers and gallium frontends
147 * that want to initialize targets should use the lp_set_target_options()
148 * function to safely initialize targets.
149 *
150 * LLVM targets should be initialized before the driver or gallium frontend tries
151 * to access the registry.
152 */
153 call_once(&init_native_targets_once_flag, lp_bld_init_native_targets);
154 }
155
156 extern "C"
157 LLVMTargetLibraryInfoRef
gallivm_create_target_library_info(const char * triple)158 gallivm_create_target_library_info(const char *triple)
159 {
160 return reinterpret_cast<LLVMTargetLibraryInfoRef>(
161 new llvm::TargetLibraryInfoImpl(
162 llvm::Triple(triple)));
163 }
164
165 extern "C"
166 void
gallivm_dispose_target_library_info(LLVMTargetLibraryInfoRef library_info)167 gallivm_dispose_target_library_info(LLVMTargetLibraryInfoRef library_info)
168 {
169 delete reinterpret_cast<
170 llvm::TargetLibraryInfoImpl
171 *>(library_info);
172 }
173
174
175 typedef llvm::RTDyldMemoryManager BaseMemoryManager;
176
177
178 /*
179 * Delegating is tedious but the default manager class is hidden in an
180 * anonymous namespace in LLVM, so we cannot just derive from it to change
181 * its behavior.
182 */
183 class DelegatingJITMemoryManager : public BaseMemoryManager {
184
185 protected:
186 virtual BaseMemoryManager *mgr() const = 0;
187
188 public:
189 /*
190 * From RTDyldMemoryManager
191 */
allocateCodeSection(uintptr_t Size,unsigned Alignment,unsigned SectionID,llvm::StringRef SectionName)192 virtual uint8_t *allocateCodeSection(uintptr_t Size,
193 unsigned Alignment,
194 unsigned SectionID,
195 llvm::StringRef SectionName) {
196 return mgr()->allocateCodeSection(Size, Alignment, SectionID,
197 SectionName);
198 }
allocateDataSection(uintptr_t Size,unsigned Alignment,unsigned SectionID,llvm::StringRef SectionName,bool IsReadOnly)199 virtual uint8_t *allocateDataSection(uintptr_t Size,
200 unsigned Alignment,
201 unsigned SectionID,
202 llvm::StringRef SectionName,
203 bool IsReadOnly) {
204 return mgr()->allocateDataSection(Size, Alignment, SectionID,
205 SectionName,
206 IsReadOnly);
207 }
registerEHFrames(uint8_t * Addr,uint64_t LoadAddr,size_t Size)208 virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) {
209 mgr()->registerEHFrames(Addr, LoadAddr, Size);
210 }
211 #if LLVM_VERSION_MAJOR >= 5
deregisterEHFrames()212 virtual void deregisterEHFrames() {
213 mgr()->deregisterEHFrames();
214 }
215 #else
deregisterEHFrames(uint8_t * Addr,uint64_t LoadAddr,size_t Size)216 virtual void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) {
217 mgr()->deregisterEHFrames(Addr, LoadAddr, Size);
218 }
219 #endif
getPointerToNamedFunction(const std::string & Name,bool AbortOnFailure=true)220 virtual void *getPointerToNamedFunction(const std::string &Name,
221 bool AbortOnFailure=true) {
222 return mgr()->getPointerToNamedFunction(Name, AbortOnFailure);
223 }
finalizeMemory(std::string * ErrMsg=0)224 virtual bool finalizeMemory(std::string *ErrMsg = 0) {
225 return mgr()->finalizeMemory(ErrMsg);
226 }
227 };
228
229
230 /*
231 * Delegate memory management to one shared manager for more efficient use
232 * of memory than creating a separate pool for each LLVM engine.
233 * Keep generated code until freeGeneratedCode() is called, instead of when
234 * memory manager is destroyed, which happens during engine destruction.
235 * This allows additional memory savings as we don't have to keep the engine
236 * around in order to use the code.
237 * All methods are delegated to the shared manager except destruction and
238 * deallocating code. For the latter we just remember what needs to be
239 * deallocated later. The shared manager is deleted once it is empty.
240 */
241 class ShaderMemoryManager : public DelegatingJITMemoryManager {
242
243 BaseMemoryManager *TheMM;
244
245 struct GeneratedCode {
246 typedef std::vector<void *> Vec;
247 Vec FunctionBody, ExceptionTable;
248 BaseMemoryManager *TheMM;
249
GeneratedCodeShaderMemoryManager::GeneratedCode250 GeneratedCode(BaseMemoryManager *MM) {
251 TheMM = MM;
252 }
253
~GeneratedCodeShaderMemoryManager::GeneratedCode254 ~GeneratedCode() {
255 }
256 };
257
258 GeneratedCode *code;
259
mgr() const260 BaseMemoryManager *mgr() const {
261 return TheMM;
262 }
263
264 public:
265
ShaderMemoryManager(BaseMemoryManager * MM)266 ShaderMemoryManager(BaseMemoryManager* MM) {
267 TheMM = MM;
268 code = new GeneratedCode(MM);
269 }
270
~ShaderMemoryManager()271 virtual ~ShaderMemoryManager() {
272 /*
273 * 'code' is purposely not deleted. It is the user's responsibility
274 * to call getGeneratedCode() and freeGeneratedCode().
275 */
276 }
277
getGeneratedCode()278 struct lp_generated_code *getGeneratedCode() {
279 return (struct lp_generated_code *) code;
280 }
281
freeGeneratedCode(struct lp_generated_code * code)282 static void freeGeneratedCode(struct lp_generated_code *code) {
283 delete (GeneratedCode *) code;
284 }
285
deallocateFunctionBody(void * Body)286 virtual void deallocateFunctionBody(void *Body) {
287 // remember for later deallocation
288 code->FunctionBody.push_back(Body);
289 }
290 };
291
292 class LPObjectCache : public llvm::ObjectCache {
293 private:
294 bool has_object;
295 struct lp_cached_code *cache_out;
296 public:
LPObjectCache(struct lp_cached_code * cache)297 LPObjectCache(struct lp_cached_code *cache) {
298 cache_out = cache;
299 has_object = false;
300 }
301
~LPObjectCache()302 ~LPObjectCache() {
303 }
notifyObjectCompiled(const llvm::Module * M,llvm::MemoryBufferRef Obj)304 void notifyObjectCompiled(const llvm::Module *M, llvm::MemoryBufferRef Obj) {
305 const std::string ModuleID = M->getModuleIdentifier();
306 if (has_object)
307 fprintf(stderr, "CACHE ALREADY HAS MODULE OBJECT\n");
308 has_object = true;
309 cache_out->data_size = Obj.getBufferSize();
310 cache_out->data = malloc(cache_out->data_size);
311 memcpy(cache_out->data, Obj.getBufferStart(), cache_out->data_size);
312 }
313
getObject(const llvm::Module * M)314 virtual std::unique_ptr<llvm::MemoryBuffer> getObject(const llvm::Module *M) {
315 if (cache_out->data_size) {
316 return llvm::MemoryBuffer::getMemBuffer(llvm::StringRef((const char *)cache_out->data, cache_out->data_size), "", false);
317 }
318 return NULL;
319 }
320
321 };
322
323 void
lp_build_fill_mattrs(std::vector<std::string> & MAttrs)324 lp_build_fill_mattrs(std::vector<std::string> &MAttrs)
325 {
326
327 #if DETECT_ARCH_ARM
328 /* llvm-3.3+ implements sys::getHostCPUFeatures for Arm,
329 * which allows us to enable/disable code generation based
330 * on the results of cpuid on these architectures.
331 */
332 llvm::StringMap<bool> features;
333 llvm::sys::getHostCPUFeatures(features);
334
335 for (llvm::StringMapIterator<bool> f = features.begin();
336 f != features.end();
337 ++f) {
338 MAttrs.push_back(((*f).second ? "+" : "-") + (*f).first().str());
339 }
340 #elif DETECT_ARCH_X86 || DETECT_ARCH_X86_64
341 /*
342 * Because we can override cpu caps with environment variables,
343 * so we do not use llvm::sys::getHostCPUFeatures to detect cpu features
344 * but using util_get_cpu_caps() instead.
345 */
346 #if DETECT_ARCH_X86_64
347 /*
348 * Without this, on some "buggy" qemu cpu setup, LLVM could crash
349 * if LLVM detects the wrong CPU type.
350 */
351 MAttrs.push_back("+64bit");
352 #endif
353 MAttrs.push_back(util_get_cpu_caps()->has_sse ? "+sse" : "-sse" );
354 MAttrs.push_back(util_get_cpu_caps()->has_sse2 ? "+sse2" : "-sse2" );
355 MAttrs.push_back(util_get_cpu_caps()->has_sse3 ? "+sse3" : "-sse3" );
356 MAttrs.push_back(util_get_cpu_caps()->has_ssse3 ? "+ssse3" : "-ssse3" );
357 MAttrs.push_back(util_get_cpu_caps()->has_sse4_1 ? "+sse4.1" : "-sse4.1");
358 MAttrs.push_back(util_get_cpu_caps()->has_sse4_2 ? "+sse4.2" : "-sse4.2");
359 /*
360 * AVX feature is not automatically detected from CPUID by the X86 target
361 * yet, because the old (yet default) JIT engine is not capable of
362 * emitting the opcodes. On newer llvm versions it is and at least some
363 * versions (tested with 3.3) will emit avx opcodes without this anyway.
364 */
365 MAttrs.push_back(util_get_cpu_caps()->has_avx ? "+avx" : "-avx");
366 MAttrs.push_back(util_get_cpu_caps()->has_f16c ? "+f16c" : "-f16c");
367 MAttrs.push_back(util_get_cpu_caps()->has_fma ? "+fma" : "-fma");
368 MAttrs.push_back(util_get_cpu_caps()->has_avx2 ? "+avx2" : "-avx2");
369
370 /* All avx512 have avx512f */
371 MAttrs.push_back(util_get_cpu_caps()->has_avx512f ? "+avx512f" : "-avx512f");
372 MAttrs.push_back(util_get_cpu_caps()->has_avx512cd ? "+avx512cd" : "-avx512cd");
373 MAttrs.push_back(util_get_cpu_caps()->has_avx512er ? "+avx512er" : "-avx512er");
374 MAttrs.push_back(util_get_cpu_caps()->has_avx512pf ? "+avx512pf" : "-avx512pf");
375 MAttrs.push_back(util_get_cpu_caps()->has_avx512bw ? "+avx512bw" : "-avx512bw");
376 MAttrs.push_back(util_get_cpu_caps()->has_avx512dq ? "+avx512dq" : "-avx512dq");
377 MAttrs.push_back(util_get_cpu_caps()->has_avx512vl ? "+avx512vl" : "-avx512vl");
378 #endif
379 #if DETECT_ARCH_ARM
380 if (!util_get_cpu_caps()->has_neon) {
381 MAttrs.push_back("-neon");
382 MAttrs.push_back("-crypto");
383 MAttrs.push_back("-vfp2");
384 }
385 #endif
386
387 #if DETECT_ARCH_PPC
388 MAttrs.push_back(util_get_cpu_caps()->has_altivec ? "+altivec" : "-altivec");
389 /*
390 * Bug 25503 is fixed, by the same fix that fixed
391 * bug 26775, in versions of LLVM later than 3.8 (starting with 3.8.1).
392 * BZ 33531 actually comprises more than one bug, all of
393 * which are fixed in LLVM 4.0.
394 *
395 * With LLVM 4.0 or higher:
396 * Make sure VSX instructions are ENABLED (if supported), unless
397 * VSX instructions are explicitly enabled/disabled via GALLIVM_VSX=1 or 0.
398 */
399 if (util_get_cpu_caps()->has_altivec) {
400 MAttrs.push_back(util_get_cpu_caps()->has_vsx ? "+vsx" : "-vsx");
401 }
402 #endif
403
404 #if DETECT_ARCH_MIPS64
405 MAttrs.push_back(util_get_cpu_caps()->has_msa ? "+msa" : "-msa");
406 /* MSA requires a 64-bit FPU register file */
407 MAttrs.push_back("+fp64");
408 #endif
409
410 #if DETECT_ARCH_RISCV64 == 1
411 /* Before riscv is more matured and util_get_cpu_caps() is implemented,
412 * assume this for now since most of linux capable riscv machine are
413 * riscv64gc
414 */
415 MAttrs = {"+m","+c","+a","+d","+f"};
416 #endif
417
418 #if DETECT_ARCH_LOONGARCH64 == 1
419 /*
420 * No FPU-less LoongArch64 systems are ever shipped yet, and LP64D is
421 * the default ABI, so FPU is enabled here.
422 *
423 * The Software development convention defaults to have "128-bit
424 * vector", so LSX is enabled here, see
425 * https://github.com/loongson/la-softdev-convention/releases/download/v0.1/la-softdev-convention.pdf
426 */
427 MAttrs = {"+f","+d"};
428 #if LLVM_VERSION_MAJOR >= 18
429 MAttrs.push_back(util_get_cpu_caps()->has_lsx ? "+lsx" : "-lsx");
430 MAttrs.push_back(util_get_cpu_caps()->has_lasx ? "+lasx" : "-lasx");
431 #else
432 /*
433 * LLVM 17's LSX support is incomplete, and LLVM 16 isn't supported
434 * LSX and LASX. So explicitly mask it.
435 */
436 MAttrs.push_back("-lsx");
437 MAttrs.push_back("-lasx");
438 #endif
439 #endif
440 }
441
442 void
lp_build_dump_mattrs(std::vector<std::string> & MAttrs)443 lp_build_dump_mattrs(std::vector<std::string> &MAttrs)
444 {
445 if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
446 int n = MAttrs.size();
447 if (n > 0) {
448 debug_printf("llc -mattr option(s): ");
449 for (int i = 0; i < n; i++)
450 debug_printf("%s%s", MAttrs[i].c_str(), (i < n - 1) ? "," : "");
451 debug_printf("\n");
452 }
453 }
454 }
455
456 /**
457 * Same as LLVMCreateJITCompilerForModule, but:
458 * - allows using MCJIT and enabling AVX feature where available.
459 * - set target options
460 *
461 * See also:
462 * - llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
463 * - llvm/tools/lli/lli.cpp
464 * - http://markmail.org/message/ttkuhvgj4cxxy2on#query:+page:1+mid:aju2dggerju3ivd3+state:results
465 */
466 extern "C"
467 LLVMBool
lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef * OutJIT,lp_generated_code ** OutCode,struct lp_cached_code * cache_out,LLVMModuleRef M,LLVMMCJITMemoryManagerRef CMM,unsigned OptLevel,char ** OutError)468 lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
469 lp_generated_code **OutCode,
470 struct lp_cached_code *cache_out,
471 LLVMModuleRef M,
472 LLVMMCJITMemoryManagerRef CMM,
473 unsigned OptLevel,
474 char **OutError)
475 {
476 using namespace llvm;
477
478 std::string Error;
479 EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
480
481 /**
482 * LLVM 3.1+ haven't more "extern unsigned llvm::StackAlignmentOverride" and
483 * friends for configuring code generation options, like stack alignment.
484 */
485 TargetOptions options;
486 #if DETECT_ARCH_X86 && LLVM_VERSION_MAJOR < 13
487 options.StackAlignmentOverride = 4;
488 #endif
489
490 builder.setEngineKind(EngineKind::JIT)
491 .setErrorStr(&Error)
492 .setTargetOptions(options)
493 #if LLVM_VERSION_MAJOR >= 18
494 .setOptLevel((CodeGenOptLevel)OptLevel);
495 #else
496 .setOptLevel((CodeGenOpt::Level)OptLevel);
497 #endif
498
499 #if DETECT_OS_WINDOWS
500 /*
501 * MCJIT works on Windows, but currently only through ELF object format.
502 *
503 * XXX: We could use `LLVM_HOST_TRIPLE "-elf"` but LLVM_HOST_TRIPLE has
504 * different strings for MinGW/MSVC, so better play it safe and be
505 * explicit.
506 */
507 # if DETECT_ARCH_X86_64
508 LLVMSetTarget(M, "x86_64-pc-win32-elf");
509 # elif DETECT_ARCH_X86
510 LLVMSetTarget(M, "i686-pc-win32-elf");
511 # elif DETECT_ARCH_AARCH64
512 LLVMSetTarget(M, "aarch64-pc-win32-elf");
513 # else
514 # error Unsupported architecture for MCJIT on Windows.
515 # endif
516 #endif
517
518 std::vector<std::string> MAttrs;
519
520 lp_build_fill_mattrs(MAttrs);
521
522 builder.setMAttrs(MAttrs);
523
524 lp_build_dump_mattrs(MAttrs);
525
526 StringRef MCPU = llvm::sys::getHostCPUName();
527 /*
528 * The cpu bits are no longer set automatically, so need to set mcpu manually.
529 * Note that the MAttrs set above will be sort of ignored (since we should
530 * not set any which would not be set by specifying the cpu anyway).
531 * It ought to be safe though since getHostCPUName() should include bits
532 * not only from the cpu but environment as well (for instance if it's safe
533 * to use avx instructions which need OS support). According to
534 * http://llvm.org/bugs/show_bug.cgi?id=19429 however if I understand this
535 * right it may be necessary to specify older cpu (or disable mattrs) though
536 * when not using MCJIT so no instructions are generated which the old JIT
537 * can't handle. Not entirely sure if we really need to do anything yet.
538 */
539
540 #if DETECT_ARCH_PPC_64
541 /*
542 * Large programs, e.g. gnome-shell and firefox, may tax the addressability
543 * of the Medium code model once dynamically generated JIT-compiled shader
544 * programs are linked in and relocated. Yet the default code model as of
545 * LLVM 8 is Medium or even Small.
546 * The cost of changing from Medium to Large is negligible:
547 * - an additional 8-byte pointer stored immediately before the shader entrypoint;
548 * - change an add-immediate (addis) instruction to a load (ld).
549 */
550 builder.setCodeModel(CodeModel::Large);
551
552 #if UTIL_ARCH_LITTLE_ENDIAN
553 /*
554 * Versions of LLVM prior to 4.0 lacked a table entry for "POWER8NVL",
555 * resulting in (big-endian) "generic" being returned on
556 * little-endian Power8NVL systems. The result was that code that
557 * attempted to load the least significant 32 bits of a 64-bit quantity
558 * from memory loaded the wrong half. This resulted in failures in some
559 * Piglit tests, e.g.
560 * .../arb_gpu_shader_fp64/execution/conversion/frag-conversion-explicit-double-uint
561 */
562 if (MCPU == "generic")
563 MCPU = "pwr8";
564 #endif
565 #endif
566
567 #if DETECT_ARCH_MIPS64
568 /*
569 * ls3a4000 CPU and ls2k1000 SoC is a mips64r5 compatible with MSA SIMD
570 * instruction set implemented, while ls3a3000 is mips64r2 compatible
571 * only. getHostCPUName() return "generic" on all loongson
572 * mips CPU currently. So we override the MCPU to mips64r5 if MSA is
573 * implemented, feedback to mips64r2 for all other ordinary mips64 cpu.
574 */
575 if (MCPU == "generic")
576 MCPU = util_get_cpu_caps()->has_msa ? "mips64r5" : "mips64r2";
577 #endif
578
579 builder.setMCPU(MCPU);
580 if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
581 debug_printf("llc -mcpu option: %s\n", MCPU.str().c_str());
582 }
583
584 ShaderMemoryManager *MM = NULL;
585 BaseMemoryManager* JMM = reinterpret_cast<BaseMemoryManager*>(CMM);
586 MM = new ShaderMemoryManager(JMM);
587 *OutCode = MM->getGeneratedCode();
588
589 builder.setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager>(MM));
590 MM = NULL; // ownership taken by std::unique_ptr
591
592 ExecutionEngine *JIT;
593
594 JIT = builder.create();
595
596 if (cache_out) {
597 LPObjectCache *objcache = new LPObjectCache(cache_out);
598 JIT->setObjectCache(objcache);
599 cache_out->jit_obj_cache = (void *)objcache;
600 }
601
602 #if LLVM_USE_INTEL_JITEVENTS
603 JITEventListener *JEL = JITEventListener::createIntelJITEventListener();
604 JIT->RegisterJITEventListener(JEL);
605 #endif
606 if (JIT) {
607 *OutJIT = wrap(JIT);
608 return 0;
609 }
610 lp_free_generated_code(*OutCode);
611 *OutCode = 0;
612 delete MM;
613 *OutError = strdup(Error.c_str());
614 return 1;
615 }
616
617
618 extern "C"
619 void
lp_free_generated_code(struct lp_generated_code * code)620 lp_free_generated_code(struct lp_generated_code *code)
621 {
622 ShaderMemoryManager::freeGeneratedCode(code);
623 }
624
625 extern "C"
626 LLVMMCJITMemoryManagerRef
lp_get_default_memory_manager()627 lp_get_default_memory_manager()
628 {
629 BaseMemoryManager *mm;
630 mm = new llvm::SectionMemoryManager();
631 return reinterpret_cast<LLVMMCJITMemoryManagerRef>(mm);
632 }
633
634 extern "C"
635 void
lp_free_memory_manager(LLVMMCJITMemoryManagerRef memorymgr)636 lp_free_memory_manager(LLVMMCJITMemoryManagerRef memorymgr)
637 {
638 delete reinterpret_cast<BaseMemoryManager*>(memorymgr);
639 }
640
641 extern "C" void
lp_free_objcache(void * objcache_ptr)642 lp_free_objcache(void *objcache_ptr)
643 {
644 LPObjectCache *objcache = (LPObjectCache *)objcache_ptr;
645 delete objcache;
646 }
647
648 extern "C" LLVMValueRef
lp_get_called_value(LLVMValueRef call)649 lp_get_called_value(LLVMValueRef call)
650 {
651 return LLVMGetCalledValue(call);
652 }
653
654 extern "C" bool
lp_is_function(LLVMValueRef v)655 lp_is_function(LLVMValueRef v)
656 {
657 return LLVMGetValueKind(v) == LLVMFunctionValueKind;
658 }
659
660 extern "C" void
lp_set_module_stack_alignment_override(LLVMModuleRef MRef,unsigned align)661 lp_set_module_stack_alignment_override(LLVMModuleRef MRef, unsigned align)
662 {
663 #if LLVM_VERSION_MAJOR >= 13
664 llvm::Module *M = llvm::unwrap(MRef);
665 M->setOverrideStackAlignment(align);
666 #endif
667 }
668
669 using namespace llvm;
670
671 class GallivmRunAtExitForStaticDestructors : public SDNode
672 {
673 public:
674 /* getSDVTList (protected) calls getValueTypeList (private), which contains static variables. */
GallivmRunAtExitForStaticDestructors()675 GallivmRunAtExitForStaticDestructors(): SDNode(0, 0, DebugLoc(), getSDVTList(MVT::Other))
676 {
677 }
678 };
679
680 static void
lp_run_atexit_for_destructors(void)681 lp_run_atexit_for_destructors(void)
682 {
683 /* LLVM >= 16 registers static variable destructors on the first compile, which gcc
684 * implements by calling atexit there. Before that, u_queue registers its atexit
685 * handler to kill all threads. Since exit() runs atexit handlers in the reverse order,
686 * the LLVM destructors are called first while shader compiler threads may still be
687 * running, which crashes in LLVM in SelectionDAG.cpp.
688 *
689 * The solution is to run the code that declares the LLVM static variables first,
690 * so that atexit for LLVM is registered first and u_queue is registered after that,
691 * which ensures that all u_queue threads are terminated before LLVM destructors are
692 * called.
693 *
694 * This just executes the code that declares static variables.
695 */
696 GallivmRunAtExitForStaticDestructors();
697 }
698