1*1fd5a2e1SPrashanth Swaminathan/* ----------------------------------------------------------------------- 2*1fd5a2e1SPrashanth Swaminathan unix64.S - Copyright (c) 2013 The Written Word, Inc. 3*1fd5a2e1SPrashanth Swaminathan - Copyright (c) 2008 Red Hat, Inc 4*1fd5a2e1SPrashanth Swaminathan - Copyright (c) 2002 Bo Thorsen <[email protected]> 5*1fd5a2e1SPrashanth Swaminathan 6*1fd5a2e1SPrashanth Swaminathan x86-64 Foreign Function Interface 7*1fd5a2e1SPrashanth Swaminathan 8*1fd5a2e1SPrashanth Swaminathan Permission is hereby granted, free of charge, to any person obtaining 9*1fd5a2e1SPrashanth Swaminathan a copy of this software and associated documentation files (the 10*1fd5a2e1SPrashanth Swaminathan ``Software''), to deal in the Software without restriction, including 11*1fd5a2e1SPrashanth Swaminathan without limitation the rights to use, copy, modify, merge, publish, 12*1fd5a2e1SPrashanth Swaminathan distribute, sublicense, and/or sell copies of the Software, and to 13*1fd5a2e1SPrashanth Swaminathan permit persons to whom the Software is furnished to do so, subject to 14*1fd5a2e1SPrashanth Swaminathan the following conditions: 15*1fd5a2e1SPrashanth Swaminathan 16*1fd5a2e1SPrashanth Swaminathan The above copyright notice and this permission notice shall be included 17*1fd5a2e1SPrashanth Swaminathan in all copies or substantial portions of the Software. 18*1fd5a2e1SPrashanth Swaminathan 19*1fd5a2e1SPrashanth Swaminathan THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, 20*1fd5a2e1SPrashanth Swaminathan EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21*1fd5a2e1SPrashanth Swaminathan MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 22*1fd5a2e1SPrashanth Swaminathan NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 23*1fd5a2e1SPrashanth Swaminathan HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 24*1fd5a2e1SPrashanth Swaminathan WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25*1fd5a2e1SPrashanth Swaminathan OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26*1fd5a2e1SPrashanth Swaminathan DEALINGS IN THE SOFTWARE. 27*1fd5a2e1SPrashanth Swaminathan ----------------------------------------------------------------------- */ 28*1fd5a2e1SPrashanth Swaminathan 29*1fd5a2e1SPrashanth Swaminathan#ifdef __x86_64__ 30*1fd5a2e1SPrashanth Swaminathan#define LIBFFI_ASM 31*1fd5a2e1SPrashanth Swaminathan#include <fficonfig.h> 32*1fd5a2e1SPrashanth Swaminathan#include <ffi.h> 33*1fd5a2e1SPrashanth Swaminathan#include "internal64.h" 34*1fd5a2e1SPrashanth Swaminathan#include "asmnames.h" 35*1fd5a2e1SPrashanth Swaminathan 36*1fd5a2e1SPrashanth Swaminathan .text 37*1fd5a2e1SPrashanth Swaminathan 38*1fd5a2e1SPrashanth Swaminathan/* This macro allows the safe creation of jump tables without an 39*1fd5a2e1SPrashanth Swaminathan actual table. The entry points into the table are all 8 bytes. 40*1fd5a2e1SPrashanth Swaminathan The use of ORG asserts that we're at the correct location. */ 41*1fd5a2e1SPrashanth Swaminathan/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ 42*1fd5a2e1SPrashanth Swaminathan#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) 43*1fd5a2e1SPrashanth Swaminathan# define E(BASE, X) .balign 8 44*1fd5a2e1SPrashanth Swaminathan#else 45*1fd5a2e1SPrashanth Swaminathan# define E(BASE, X) .balign 8; .org BASE + X * 8 46*1fd5a2e1SPrashanth Swaminathan#endif 47*1fd5a2e1SPrashanth Swaminathan 48*1fd5a2e1SPrashanth Swaminathan/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, 49*1fd5a2e1SPrashanth Swaminathan void *raddr, void (*fnaddr)(void)); 50*1fd5a2e1SPrashanth Swaminathan 51*1fd5a2e1SPrashanth Swaminathan Bit o trickiness here -- ARGS+BYTES is the base of the stack frame 52*1fd5a2e1SPrashanth Swaminathan for this function. This has been allocated by ffi_call. We also 53*1fd5a2e1SPrashanth Swaminathan deallocate some of the stack that has been alloca'd. */ 54*1fd5a2e1SPrashanth Swaminathan 55*1fd5a2e1SPrashanth Swaminathan .balign 8 56*1fd5a2e1SPrashanth Swaminathan .globl C(ffi_call_unix64) 57*1fd5a2e1SPrashanth Swaminathan FFI_HIDDEN(C(ffi_call_unix64)) 58*1fd5a2e1SPrashanth Swaminathan 59*1fd5a2e1SPrashanth SwaminathanC(ffi_call_unix64): 60*1fd5a2e1SPrashanth SwaminathanL(UW0): 61*1fd5a2e1SPrashanth Swaminathan movq (%rsp), %r10 /* Load return address. */ 62*1fd5a2e1SPrashanth Swaminathan leaq (%rdi, %rsi), %rax /* Find local stack base. */ 63*1fd5a2e1SPrashanth Swaminathan movq %rdx, (%rax) /* Save flags. */ 64*1fd5a2e1SPrashanth Swaminathan movq %rcx, 8(%rax) /* Save raddr. */ 65*1fd5a2e1SPrashanth Swaminathan movq %rbp, 16(%rax) /* Save old frame pointer. */ 66*1fd5a2e1SPrashanth Swaminathan movq %r10, 24(%rax) /* Relocate return address. */ 67*1fd5a2e1SPrashanth Swaminathan movq %rax, %rbp /* Finalize local stack frame. */ 68*1fd5a2e1SPrashanth Swaminathan 69*1fd5a2e1SPrashanth Swaminathan /* New stack frame based off rbp. This is a itty bit of unwind 70*1fd5a2e1SPrashanth Swaminathan trickery in that the CFA *has* changed. There is no easy way 71*1fd5a2e1SPrashanth Swaminathan to describe it correctly on entry to the function. Fortunately, 72*1fd5a2e1SPrashanth Swaminathan it doesn't matter too much since at all points we can correctly 73*1fd5a2e1SPrashanth Swaminathan unwind back to ffi_call. Note that the location to which we 74*1fd5a2e1SPrashanth Swaminathan moved the return address is (the new) CFA-8, so from the 75*1fd5a2e1SPrashanth Swaminathan perspective of the unwind info, it hasn't moved. */ 76*1fd5a2e1SPrashanth SwaminathanL(UW1): 77*1fd5a2e1SPrashanth Swaminathan /* cfi_def_cfa(%rbp, 32) */ 78*1fd5a2e1SPrashanth Swaminathan /* cfi_rel_offset(%rbp, 16) */ 79*1fd5a2e1SPrashanth Swaminathan 80*1fd5a2e1SPrashanth Swaminathan movq %rdi, %r10 /* Save a copy of the register area. */ 81*1fd5a2e1SPrashanth Swaminathan movq %r8, %r11 /* Save a copy of the target fn. */ 82*1fd5a2e1SPrashanth Swaminathan movl %r9d, %eax /* Set number of SSE registers. */ 83*1fd5a2e1SPrashanth Swaminathan 84*1fd5a2e1SPrashanth Swaminathan /* Load up all argument registers. */ 85*1fd5a2e1SPrashanth Swaminathan movq (%r10), %rdi 86*1fd5a2e1SPrashanth Swaminathan movq 0x08(%r10), %rsi 87*1fd5a2e1SPrashanth Swaminathan movq 0x10(%r10), %rdx 88*1fd5a2e1SPrashanth Swaminathan movq 0x18(%r10), %rcx 89*1fd5a2e1SPrashanth Swaminathan movq 0x20(%r10), %r8 90*1fd5a2e1SPrashanth Swaminathan movq 0x28(%r10), %r9 91*1fd5a2e1SPrashanth Swaminathan movl 0xb0(%r10), %eax 92*1fd5a2e1SPrashanth Swaminathan testl %eax, %eax 93*1fd5a2e1SPrashanth Swaminathan jnz L(load_sse) 94*1fd5a2e1SPrashanth SwaminathanL(ret_from_load_sse): 95*1fd5a2e1SPrashanth Swaminathan 96*1fd5a2e1SPrashanth Swaminathan /* Deallocate the reg arg area, except for r10, then load via pop. */ 97*1fd5a2e1SPrashanth Swaminathan leaq 0xb8(%r10), %rsp 98*1fd5a2e1SPrashanth Swaminathan popq %r10 99*1fd5a2e1SPrashanth Swaminathan 100*1fd5a2e1SPrashanth Swaminathan /* Call the user function. */ 101*1fd5a2e1SPrashanth Swaminathan call *%r11 102*1fd5a2e1SPrashanth Swaminathan 103*1fd5a2e1SPrashanth Swaminathan /* Deallocate stack arg area; local stack frame in redzone. */ 104*1fd5a2e1SPrashanth Swaminathan leaq 24(%rbp), %rsp 105*1fd5a2e1SPrashanth Swaminathan 106*1fd5a2e1SPrashanth Swaminathan movq 0(%rbp), %rcx /* Reload flags. */ 107*1fd5a2e1SPrashanth Swaminathan movq 8(%rbp), %rdi /* Reload raddr. */ 108*1fd5a2e1SPrashanth Swaminathan movq 16(%rbp), %rbp /* Reload old frame pointer. */ 109*1fd5a2e1SPrashanth SwaminathanL(UW2): 110*1fd5a2e1SPrashanth Swaminathan /* cfi_remember_state */ 111*1fd5a2e1SPrashanth Swaminathan /* cfi_def_cfa(%rsp, 8) */ 112*1fd5a2e1SPrashanth Swaminathan /* cfi_restore(%rbp) */ 113*1fd5a2e1SPrashanth Swaminathan 114*1fd5a2e1SPrashanth Swaminathan /* The first byte of the flags contains the FFI_TYPE. */ 115*1fd5a2e1SPrashanth Swaminathan cmpb $UNIX64_RET_LAST, %cl 116*1fd5a2e1SPrashanth Swaminathan movzbl %cl, %r10d 117*1fd5a2e1SPrashanth Swaminathan leaq L(store_table)(%rip), %r11 118*1fd5a2e1SPrashanth Swaminathan ja L(sa) 119*1fd5a2e1SPrashanth Swaminathan leaq (%r11, %r10, 8), %r10 120*1fd5a2e1SPrashanth Swaminathan 121*1fd5a2e1SPrashanth Swaminathan /* Prep for the structure cases: scratch area in redzone. */ 122*1fd5a2e1SPrashanth Swaminathan leaq -20(%rsp), %rsi 123*1fd5a2e1SPrashanth Swaminathan jmp *%r10 124*1fd5a2e1SPrashanth Swaminathan 125*1fd5a2e1SPrashanth Swaminathan .balign 8 126*1fd5a2e1SPrashanth SwaminathanL(store_table): 127*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_VOID) 128*1fd5a2e1SPrashanth Swaminathan ret 129*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_UINT8) 130*1fd5a2e1SPrashanth Swaminathan movzbl %al, %eax 131*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 132*1fd5a2e1SPrashanth Swaminathan ret 133*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_UINT16) 134*1fd5a2e1SPrashanth Swaminathan movzwl %ax, %eax 135*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 136*1fd5a2e1SPrashanth Swaminathan ret 137*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_UINT32) 138*1fd5a2e1SPrashanth Swaminathan movl %eax, %eax 139*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 140*1fd5a2e1SPrashanth Swaminathan ret 141*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_SINT8) 142*1fd5a2e1SPrashanth Swaminathan movsbq %al, %rax 143*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 144*1fd5a2e1SPrashanth Swaminathan ret 145*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_SINT16) 146*1fd5a2e1SPrashanth Swaminathan movswq %ax, %rax 147*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 148*1fd5a2e1SPrashanth Swaminathan ret 149*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_SINT32) 150*1fd5a2e1SPrashanth Swaminathan cltq 151*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 152*1fd5a2e1SPrashanth Swaminathan ret 153*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_INT64) 154*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rdi) 155*1fd5a2e1SPrashanth Swaminathan ret 156*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_XMM32) 157*1fd5a2e1SPrashanth Swaminathan movd %xmm0, (%rdi) 158*1fd5a2e1SPrashanth Swaminathan ret 159*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_XMM64) 160*1fd5a2e1SPrashanth Swaminathan movq %xmm0, (%rdi) 161*1fd5a2e1SPrashanth Swaminathan ret 162*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_X87) 163*1fd5a2e1SPrashanth Swaminathan fstpt (%rdi) 164*1fd5a2e1SPrashanth Swaminathan ret 165*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_X87_2) 166*1fd5a2e1SPrashanth Swaminathan fstpt (%rdi) 167*1fd5a2e1SPrashanth Swaminathan fstpt 16(%rdi) 168*1fd5a2e1SPrashanth Swaminathan ret 169*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_ST_XMM0_RAX) 170*1fd5a2e1SPrashanth Swaminathan movq %rax, 8(%rsi) 171*1fd5a2e1SPrashanth Swaminathan jmp L(s3) 172*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_ST_RAX_XMM0) 173*1fd5a2e1SPrashanth Swaminathan movq %xmm0, 8(%rsi) 174*1fd5a2e1SPrashanth Swaminathan jmp L(s2) 175*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_ST_XMM0_XMM1) 176*1fd5a2e1SPrashanth Swaminathan movq %xmm1, 8(%rsi) 177*1fd5a2e1SPrashanth Swaminathan jmp L(s3) 178*1fd5a2e1SPrashanth SwaminathanE(L(store_table), UNIX64_RET_ST_RAX_RDX) 179*1fd5a2e1SPrashanth Swaminathan movq %rdx, 8(%rsi) 180*1fd5a2e1SPrashanth SwaminathanL(s2): 181*1fd5a2e1SPrashanth Swaminathan movq %rax, (%rsi) 182*1fd5a2e1SPrashanth Swaminathan shrl $UNIX64_SIZE_SHIFT, %ecx 183*1fd5a2e1SPrashanth Swaminathan rep movsb 184*1fd5a2e1SPrashanth Swaminathan ret 185*1fd5a2e1SPrashanth Swaminathan .balign 8 186*1fd5a2e1SPrashanth SwaminathanL(s3): 187*1fd5a2e1SPrashanth Swaminathan movq %xmm0, (%rsi) 188*1fd5a2e1SPrashanth Swaminathan shrl $UNIX64_SIZE_SHIFT, %ecx 189*1fd5a2e1SPrashanth Swaminathan rep movsb 190*1fd5a2e1SPrashanth Swaminathan ret 191*1fd5a2e1SPrashanth Swaminathan 192*1fd5a2e1SPrashanth SwaminathanL(sa): call PLT(C(abort)) 193*1fd5a2e1SPrashanth Swaminathan 194*1fd5a2e1SPrashanth Swaminathan /* Many times we can avoid loading any SSE registers at all. 195*1fd5a2e1SPrashanth Swaminathan It's not worth an indirect jump to load the exact set of 196*1fd5a2e1SPrashanth Swaminathan SSE registers needed; zero or all is a good compromise. */ 197*1fd5a2e1SPrashanth Swaminathan .balign 2 198*1fd5a2e1SPrashanth SwaminathanL(UW3): 199*1fd5a2e1SPrashanth Swaminathan /* cfi_restore_state */ 200*1fd5a2e1SPrashanth SwaminathanL(load_sse): 201*1fd5a2e1SPrashanth Swaminathan movdqa 0x30(%r10), %xmm0 202*1fd5a2e1SPrashanth Swaminathan movdqa 0x40(%r10), %xmm1 203*1fd5a2e1SPrashanth Swaminathan movdqa 0x50(%r10), %xmm2 204*1fd5a2e1SPrashanth Swaminathan movdqa 0x60(%r10), %xmm3 205*1fd5a2e1SPrashanth Swaminathan movdqa 0x70(%r10), %xmm4 206*1fd5a2e1SPrashanth Swaminathan movdqa 0x80(%r10), %xmm5 207*1fd5a2e1SPrashanth Swaminathan movdqa 0x90(%r10), %xmm6 208*1fd5a2e1SPrashanth Swaminathan movdqa 0xa0(%r10), %xmm7 209*1fd5a2e1SPrashanth Swaminathan jmp L(ret_from_load_sse) 210*1fd5a2e1SPrashanth Swaminathan 211*1fd5a2e1SPrashanth SwaminathanL(UW4): 212*1fd5a2e1SPrashanth SwaminathanENDF(C(ffi_call_unix64)) 213*1fd5a2e1SPrashanth Swaminathan 214*1fd5a2e1SPrashanth Swaminathan/* 6 general registers, 8 vector registers, 215*1fd5a2e1SPrashanth Swaminathan 32 bytes of rvalue, 8 bytes of alignment. */ 216*1fd5a2e1SPrashanth Swaminathan#define ffi_closure_OFS_G 0 217*1fd5a2e1SPrashanth Swaminathan#define ffi_closure_OFS_V (6*8) 218*1fd5a2e1SPrashanth Swaminathan#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) 219*1fd5a2e1SPrashanth Swaminathan#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) 220*1fd5a2e1SPrashanth Swaminathan 221*1fd5a2e1SPrashanth Swaminathan/* The location of rvalue within the red zone after deallocating the frame. */ 222*1fd5a2e1SPrashanth Swaminathan#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) 223*1fd5a2e1SPrashanth Swaminathan 224*1fd5a2e1SPrashanth Swaminathan .balign 2 225*1fd5a2e1SPrashanth Swaminathan .globl C(ffi_closure_unix64_sse) 226*1fd5a2e1SPrashanth Swaminathan FFI_HIDDEN(C(ffi_closure_unix64_sse)) 227*1fd5a2e1SPrashanth Swaminathan 228*1fd5a2e1SPrashanth SwaminathanC(ffi_closure_unix64_sse): 229*1fd5a2e1SPrashanth SwaminathanL(UW5): 230*1fd5a2e1SPrashanth Swaminathan subq $ffi_closure_FS, %rsp 231*1fd5a2e1SPrashanth SwaminathanL(UW6): 232*1fd5a2e1SPrashanth Swaminathan /* cfi_adjust_cfa_offset(ffi_closure_FS) */ 233*1fd5a2e1SPrashanth Swaminathan 234*1fd5a2e1SPrashanth Swaminathan movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) 235*1fd5a2e1SPrashanth Swaminathan movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) 236*1fd5a2e1SPrashanth Swaminathan movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) 237*1fd5a2e1SPrashanth Swaminathan movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) 238*1fd5a2e1SPrashanth Swaminathan movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) 239*1fd5a2e1SPrashanth Swaminathan movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) 240*1fd5a2e1SPrashanth Swaminathan movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) 241*1fd5a2e1SPrashanth Swaminathan movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) 242*1fd5a2e1SPrashanth Swaminathan jmp L(sse_entry1) 243*1fd5a2e1SPrashanth Swaminathan 244*1fd5a2e1SPrashanth SwaminathanL(UW7): 245*1fd5a2e1SPrashanth SwaminathanENDF(C(ffi_closure_unix64_sse)) 246*1fd5a2e1SPrashanth Swaminathan 247*1fd5a2e1SPrashanth Swaminathan .balign 2 248*1fd5a2e1SPrashanth Swaminathan .globl C(ffi_closure_unix64) 249*1fd5a2e1SPrashanth Swaminathan FFI_HIDDEN(C(ffi_closure_unix64)) 250*1fd5a2e1SPrashanth Swaminathan 251*1fd5a2e1SPrashanth SwaminathanC(ffi_closure_unix64): 252*1fd5a2e1SPrashanth SwaminathanL(UW8): 253*1fd5a2e1SPrashanth Swaminathan subq $ffi_closure_FS, %rsp 254*1fd5a2e1SPrashanth SwaminathanL(UW9): 255*1fd5a2e1SPrashanth Swaminathan /* cfi_adjust_cfa_offset(ffi_closure_FS) */ 256*1fd5a2e1SPrashanth SwaminathanL(sse_entry1): 257*1fd5a2e1SPrashanth Swaminathan movq %rdi, ffi_closure_OFS_G+0x00(%rsp) 258*1fd5a2e1SPrashanth Swaminathan movq %rsi, ffi_closure_OFS_G+0x08(%rsp) 259*1fd5a2e1SPrashanth Swaminathan movq %rdx, ffi_closure_OFS_G+0x10(%rsp) 260*1fd5a2e1SPrashanth Swaminathan movq %rcx, ffi_closure_OFS_G+0x18(%rsp) 261*1fd5a2e1SPrashanth Swaminathan movq %r8, ffi_closure_OFS_G+0x20(%rsp) 262*1fd5a2e1SPrashanth Swaminathan movq %r9, ffi_closure_OFS_G+0x28(%rsp) 263*1fd5a2e1SPrashanth Swaminathan 264*1fd5a2e1SPrashanth Swaminathan#ifdef __ILP32__ 265*1fd5a2e1SPrashanth Swaminathan movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */ 266*1fd5a2e1SPrashanth Swaminathan movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */ 267*1fd5a2e1SPrashanth Swaminathan movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */ 268*1fd5a2e1SPrashanth Swaminathan#else 269*1fd5a2e1SPrashanth Swaminathan movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */ 270*1fd5a2e1SPrashanth Swaminathan movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */ 271*1fd5a2e1SPrashanth Swaminathan movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */ 272*1fd5a2e1SPrashanth Swaminathan#endif 273*1fd5a2e1SPrashanth SwaminathanL(do_closure): 274*1fd5a2e1SPrashanth Swaminathan leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */ 275*1fd5a2e1SPrashanth Swaminathan movq %rsp, %r8 /* Load reg_args */ 276*1fd5a2e1SPrashanth Swaminathan leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ 277*1fd5a2e1SPrashanth Swaminathan call PLT(C(ffi_closure_unix64_inner)) 278*1fd5a2e1SPrashanth Swaminathan 279*1fd5a2e1SPrashanth Swaminathan /* Deallocate stack frame early; return value is now in redzone. */ 280*1fd5a2e1SPrashanth Swaminathan addq $ffi_closure_FS, %rsp 281*1fd5a2e1SPrashanth SwaminathanL(UW10): 282*1fd5a2e1SPrashanth Swaminathan /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ 283*1fd5a2e1SPrashanth Swaminathan 284*1fd5a2e1SPrashanth Swaminathan /* The first byte of the return value contains the FFI_TYPE. */ 285*1fd5a2e1SPrashanth Swaminathan cmpb $UNIX64_RET_LAST, %al 286*1fd5a2e1SPrashanth Swaminathan movzbl %al, %r10d 287*1fd5a2e1SPrashanth Swaminathan leaq L(load_table)(%rip), %r11 288*1fd5a2e1SPrashanth Swaminathan ja L(la) 289*1fd5a2e1SPrashanth Swaminathan leaq (%r11, %r10, 8), %r10 290*1fd5a2e1SPrashanth Swaminathan leaq ffi_closure_RED_RVALUE(%rsp), %rsi 291*1fd5a2e1SPrashanth Swaminathan jmp *%r10 292*1fd5a2e1SPrashanth Swaminathan 293*1fd5a2e1SPrashanth Swaminathan .balign 8 294*1fd5a2e1SPrashanth SwaminathanL(load_table): 295*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_VOID) 296*1fd5a2e1SPrashanth Swaminathan ret 297*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_UINT8) 298*1fd5a2e1SPrashanth Swaminathan movzbl (%rsi), %eax 299*1fd5a2e1SPrashanth Swaminathan ret 300*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_UINT16) 301*1fd5a2e1SPrashanth Swaminathan movzwl (%rsi), %eax 302*1fd5a2e1SPrashanth Swaminathan ret 303*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_UINT32) 304*1fd5a2e1SPrashanth Swaminathan movl (%rsi), %eax 305*1fd5a2e1SPrashanth Swaminathan ret 306*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_SINT8) 307*1fd5a2e1SPrashanth Swaminathan movsbl (%rsi), %eax 308*1fd5a2e1SPrashanth Swaminathan ret 309*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_SINT16) 310*1fd5a2e1SPrashanth Swaminathan movswl (%rsi), %eax 311*1fd5a2e1SPrashanth Swaminathan ret 312*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_SINT32) 313*1fd5a2e1SPrashanth Swaminathan movl (%rsi), %eax 314*1fd5a2e1SPrashanth Swaminathan ret 315*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_INT64) 316*1fd5a2e1SPrashanth Swaminathan movq (%rsi), %rax 317*1fd5a2e1SPrashanth Swaminathan ret 318*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_XMM32) 319*1fd5a2e1SPrashanth Swaminathan movd (%rsi), %xmm0 320*1fd5a2e1SPrashanth Swaminathan ret 321*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_XMM64) 322*1fd5a2e1SPrashanth Swaminathan movq (%rsi), %xmm0 323*1fd5a2e1SPrashanth Swaminathan ret 324*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_X87) 325*1fd5a2e1SPrashanth Swaminathan fldt (%rsi) 326*1fd5a2e1SPrashanth Swaminathan ret 327*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_X87_2) 328*1fd5a2e1SPrashanth Swaminathan fldt 16(%rsi) 329*1fd5a2e1SPrashanth Swaminathan fldt (%rsi) 330*1fd5a2e1SPrashanth Swaminathan ret 331*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_ST_XMM0_RAX) 332*1fd5a2e1SPrashanth Swaminathan movq 8(%rsi), %rax 333*1fd5a2e1SPrashanth Swaminathan jmp L(l3) 334*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_ST_RAX_XMM0) 335*1fd5a2e1SPrashanth Swaminathan movq 8(%rsi), %xmm0 336*1fd5a2e1SPrashanth Swaminathan jmp L(l2) 337*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_ST_XMM0_XMM1) 338*1fd5a2e1SPrashanth Swaminathan movq 8(%rsi), %xmm1 339*1fd5a2e1SPrashanth Swaminathan jmp L(l3) 340*1fd5a2e1SPrashanth SwaminathanE(L(load_table), UNIX64_RET_ST_RAX_RDX) 341*1fd5a2e1SPrashanth Swaminathan movq 8(%rsi), %rdx 342*1fd5a2e1SPrashanth SwaminathanL(l2): 343*1fd5a2e1SPrashanth Swaminathan movq (%rsi), %rax 344*1fd5a2e1SPrashanth Swaminathan ret 345*1fd5a2e1SPrashanth Swaminathan .balign 8 346*1fd5a2e1SPrashanth SwaminathanL(l3): 347*1fd5a2e1SPrashanth Swaminathan movq (%rsi), %xmm0 348*1fd5a2e1SPrashanth Swaminathan ret 349*1fd5a2e1SPrashanth Swaminathan 350*1fd5a2e1SPrashanth SwaminathanL(la): call PLT(C(abort)) 351*1fd5a2e1SPrashanth Swaminathan 352*1fd5a2e1SPrashanth SwaminathanL(UW11): 353*1fd5a2e1SPrashanth SwaminathanENDF(C(ffi_closure_unix64)) 354*1fd5a2e1SPrashanth Swaminathan 355*1fd5a2e1SPrashanth Swaminathan .balign 2 356*1fd5a2e1SPrashanth Swaminathan .globl C(ffi_go_closure_unix64_sse) 357*1fd5a2e1SPrashanth Swaminathan FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) 358*1fd5a2e1SPrashanth Swaminathan 359*1fd5a2e1SPrashanth SwaminathanC(ffi_go_closure_unix64_sse): 360*1fd5a2e1SPrashanth SwaminathanL(UW12): 361*1fd5a2e1SPrashanth Swaminathan subq $ffi_closure_FS, %rsp 362*1fd5a2e1SPrashanth SwaminathanL(UW13): 363*1fd5a2e1SPrashanth Swaminathan /* cfi_adjust_cfa_offset(ffi_closure_FS) */ 364*1fd5a2e1SPrashanth Swaminathan 365*1fd5a2e1SPrashanth Swaminathan movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) 366*1fd5a2e1SPrashanth Swaminathan movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) 367*1fd5a2e1SPrashanth Swaminathan movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) 368*1fd5a2e1SPrashanth Swaminathan movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) 369*1fd5a2e1SPrashanth Swaminathan movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) 370*1fd5a2e1SPrashanth Swaminathan movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) 371*1fd5a2e1SPrashanth Swaminathan movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) 372*1fd5a2e1SPrashanth Swaminathan movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) 373*1fd5a2e1SPrashanth Swaminathan jmp L(sse_entry2) 374*1fd5a2e1SPrashanth Swaminathan 375*1fd5a2e1SPrashanth SwaminathanL(UW14): 376*1fd5a2e1SPrashanth SwaminathanENDF(C(ffi_go_closure_unix64_sse)) 377*1fd5a2e1SPrashanth Swaminathan 378*1fd5a2e1SPrashanth Swaminathan .balign 2 379*1fd5a2e1SPrashanth Swaminathan .globl C(ffi_go_closure_unix64) 380*1fd5a2e1SPrashanth Swaminathan FFI_HIDDEN(C(ffi_go_closure_unix64)) 381*1fd5a2e1SPrashanth Swaminathan 382*1fd5a2e1SPrashanth SwaminathanC(ffi_go_closure_unix64): 383*1fd5a2e1SPrashanth SwaminathanL(UW15): 384*1fd5a2e1SPrashanth Swaminathan subq $ffi_closure_FS, %rsp 385*1fd5a2e1SPrashanth SwaminathanL(UW16): 386*1fd5a2e1SPrashanth Swaminathan /* cfi_adjust_cfa_offset(ffi_closure_FS) */ 387*1fd5a2e1SPrashanth SwaminathanL(sse_entry2): 388*1fd5a2e1SPrashanth Swaminathan movq %rdi, ffi_closure_OFS_G+0x00(%rsp) 389*1fd5a2e1SPrashanth Swaminathan movq %rsi, ffi_closure_OFS_G+0x08(%rsp) 390*1fd5a2e1SPrashanth Swaminathan movq %rdx, ffi_closure_OFS_G+0x10(%rsp) 391*1fd5a2e1SPrashanth Swaminathan movq %rcx, ffi_closure_OFS_G+0x18(%rsp) 392*1fd5a2e1SPrashanth Swaminathan movq %r8, ffi_closure_OFS_G+0x20(%rsp) 393*1fd5a2e1SPrashanth Swaminathan movq %r9, ffi_closure_OFS_G+0x28(%rsp) 394*1fd5a2e1SPrashanth Swaminathan 395*1fd5a2e1SPrashanth Swaminathan#ifdef __ILP32__ 396*1fd5a2e1SPrashanth Swaminathan movl 4(%r10), %edi /* Load cif */ 397*1fd5a2e1SPrashanth Swaminathan movl 8(%r10), %esi /* Load fun */ 398*1fd5a2e1SPrashanth Swaminathan movl %r10d, %edx /* Load closure (user_data) */ 399*1fd5a2e1SPrashanth Swaminathan#else 400*1fd5a2e1SPrashanth Swaminathan movq 8(%r10), %rdi /* Load cif */ 401*1fd5a2e1SPrashanth Swaminathan movq 16(%r10), %rsi /* Load fun */ 402*1fd5a2e1SPrashanth Swaminathan movq %r10, %rdx /* Load closure (user_data) */ 403*1fd5a2e1SPrashanth Swaminathan#endif 404*1fd5a2e1SPrashanth Swaminathan jmp L(do_closure) 405*1fd5a2e1SPrashanth Swaminathan 406*1fd5a2e1SPrashanth SwaminathanL(UW17): 407*1fd5a2e1SPrashanth SwaminathanENDF(C(ffi_go_closure_unix64)) 408*1fd5a2e1SPrashanth Swaminathan 409*1fd5a2e1SPrashanth Swaminathan/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ 410*1fd5a2e1SPrashanth Swaminathan 411*1fd5a2e1SPrashanth Swaminathan#ifdef __APPLE__ 412*1fd5a2e1SPrashanth Swaminathan.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support 413*1fd5a2e1SPrashanth SwaminathanEHFrame0: 414*1fd5a2e1SPrashanth Swaminathan#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) 415*1fd5a2e1SPrashanth Swaminathan.section .eh_frame,"a",@unwind 416*1fd5a2e1SPrashanth Swaminathan#else 417*1fd5a2e1SPrashanth Swaminathan.section .eh_frame,"a",@progbits 418*1fd5a2e1SPrashanth Swaminathan#endif 419*1fd5a2e1SPrashanth Swaminathan 420*1fd5a2e1SPrashanth Swaminathan#ifdef HAVE_AS_X86_PCREL 421*1fd5a2e1SPrashanth Swaminathan# define PCREL(X) X - . 422*1fd5a2e1SPrashanth Swaminathan#else 423*1fd5a2e1SPrashanth Swaminathan# define PCREL(X) X@rel 424*1fd5a2e1SPrashanth Swaminathan#endif 425*1fd5a2e1SPrashanth Swaminathan 426*1fd5a2e1SPrashanth Swaminathan/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ 427*1fd5a2e1SPrashanth Swaminathan#define ADV(N, P) .byte 2, L(N)-L(P) 428*1fd5a2e1SPrashanth Swaminathan 429*1fd5a2e1SPrashanth Swaminathan .balign 8 430*1fd5a2e1SPrashanth SwaminathanL(CIE): 431*1fd5a2e1SPrashanth Swaminathan .set L(set0),L(ECIE)-L(SCIE) 432*1fd5a2e1SPrashanth Swaminathan .long L(set0) /* CIE Length */ 433*1fd5a2e1SPrashanth SwaminathanL(SCIE): 434*1fd5a2e1SPrashanth Swaminathan .long 0 /* CIE Identifier Tag */ 435*1fd5a2e1SPrashanth Swaminathan .byte 1 /* CIE Version */ 436*1fd5a2e1SPrashanth Swaminathan .ascii "zR\0" /* CIE Augmentation */ 437*1fd5a2e1SPrashanth Swaminathan .byte 1 /* CIE Code Alignment Factor */ 438*1fd5a2e1SPrashanth Swaminathan .byte 0x78 /* CIE Data Alignment Factor */ 439*1fd5a2e1SPrashanth Swaminathan .byte 0x10 /* CIE RA Column */ 440*1fd5a2e1SPrashanth Swaminathan .byte 1 /* Augmentation size */ 441*1fd5a2e1SPrashanth Swaminathan .byte 0x1b /* FDE Encoding (pcrel sdata4) */ 442*1fd5a2e1SPrashanth Swaminathan .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ 443*1fd5a2e1SPrashanth Swaminathan .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ 444*1fd5a2e1SPrashanth Swaminathan .balign 8 445*1fd5a2e1SPrashanth SwaminathanL(ECIE): 446*1fd5a2e1SPrashanth Swaminathan 447*1fd5a2e1SPrashanth Swaminathan .set L(set1),L(EFDE1)-L(SFDE1) 448*1fd5a2e1SPrashanth Swaminathan .long L(set1) /* FDE Length */ 449*1fd5a2e1SPrashanth SwaminathanL(SFDE1): 450*1fd5a2e1SPrashanth Swaminathan .long L(SFDE1)-L(CIE) /* FDE CIE offset */ 451*1fd5a2e1SPrashanth Swaminathan .long PCREL(L(UW0)) /* Initial location */ 452*1fd5a2e1SPrashanth Swaminathan .long L(UW4)-L(UW0) /* Address range */ 453*1fd5a2e1SPrashanth Swaminathan .byte 0 /* Augmentation size */ 454*1fd5a2e1SPrashanth Swaminathan ADV(UW1, UW0) 455*1fd5a2e1SPrashanth Swaminathan .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ 456*1fd5a2e1SPrashanth Swaminathan .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ 457*1fd5a2e1SPrashanth Swaminathan ADV(UW2, UW1) 458*1fd5a2e1SPrashanth Swaminathan .byte 0xa /* DW_CFA_remember_state */ 459*1fd5a2e1SPrashanth Swaminathan .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ 460*1fd5a2e1SPrashanth Swaminathan .byte 0xc0+6 /* DW_CFA_restore, %rbp */ 461*1fd5a2e1SPrashanth Swaminathan ADV(UW3, UW2) 462*1fd5a2e1SPrashanth Swaminathan .byte 0xb /* DW_CFA_restore_state */ 463*1fd5a2e1SPrashanth Swaminathan .balign 8 464*1fd5a2e1SPrashanth SwaminathanL(EFDE1): 465*1fd5a2e1SPrashanth Swaminathan 466*1fd5a2e1SPrashanth Swaminathan .set L(set2),L(EFDE2)-L(SFDE2) 467*1fd5a2e1SPrashanth Swaminathan .long L(set2) /* FDE Length */ 468*1fd5a2e1SPrashanth SwaminathanL(SFDE2): 469*1fd5a2e1SPrashanth Swaminathan .long L(SFDE2)-L(CIE) /* FDE CIE offset */ 470*1fd5a2e1SPrashanth Swaminathan .long PCREL(L(UW5)) /* Initial location */ 471*1fd5a2e1SPrashanth Swaminathan .long L(UW7)-L(UW5) /* Address range */ 472*1fd5a2e1SPrashanth Swaminathan .byte 0 /* Augmentation size */ 473*1fd5a2e1SPrashanth Swaminathan ADV(UW6, UW5) 474*1fd5a2e1SPrashanth Swaminathan .byte 0xe /* DW_CFA_def_cfa_offset */ 475*1fd5a2e1SPrashanth Swaminathan .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ 476*1fd5a2e1SPrashanth Swaminathan .balign 8 477*1fd5a2e1SPrashanth SwaminathanL(EFDE2): 478*1fd5a2e1SPrashanth Swaminathan 479*1fd5a2e1SPrashanth Swaminathan .set L(set3),L(EFDE3)-L(SFDE3) 480*1fd5a2e1SPrashanth Swaminathan .long L(set3) /* FDE Length */ 481*1fd5a2e1SPrashanth SwaminathanL(SFDE3): 482*1fd5a2e1SPrashanth Swaminathan .long L(SFDE3)-L(CIE) /* FDE CIE offset */ 483*1fd5a2e1SPrashanth Swaminathan .long PCREL(L(UW8)) /* Initial location */ 484*1fd5a2e1SPrashanth Swaminathan .long L(UW11)-L(UW8) /* Address range */ 485*1fd5a2e1SPrashanth Swaminathan .byte 0 /* Augmentation size */ 486*1fd5a2e1SPrashanth Swaminathan ADV(UW9, UW8) 487*1fd5a2e1SPrashanth Swaminathan .byte 0xe /* DW_CFA_def_cfa_offset */ 488*1fd5a2e1SPrashanth Swaminathan .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ 489*1fd5a2e1SPrashanth Swaminathan ADV(UW10, UW9) 490*1fd5a2e1SPrashanth Swaminathan .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ 491*1fd5a2e1SPrashanth SwaminathanL(EFDE3): 492*1fd5a2e1SPrashanth Swaminathan 493*1fd5a2e1SPrashanth Swaminathan .set L(set4),L(EFDE4)-L(SFDE4) 494*1fd5a2e1SPrashanth Swaminathan .long L(set4) /* FDE Length */ 495*1fd5a2e1SPrashanth SwaminathanL(SFDE4): 496*1fd5a2e1SPrashanth Swaminathan .long L(SFDE4)-L(CIE) /* FDE CIE offset */ 497*1fd5a2e1SPrashanth Swaminathan .long PCREL(L(UW12)) /* Initial location */ 498*1fd5a2e1SPrashanth Swaminathan .long L(UW14)-L(UW12) /* Address range */ 499*1fd5a2e1SPrashanth Swaminathan .byte 0 /* Augmentation size */ 500*1fd5a2e1SPrashanth Swaminathan ADV(UW13, UW12) 501*1fd5a2e1SPrashanth Swaminathan .byte 0xe /* DW_CFA_def_cfa_offset */ 502*1fd5a2e1SPrashanth Swaminathan .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ 503*1fd5a2e1SPrashanth Swaminathan .balign 8 504*1fd5a2e1SPrashanth SwaminathanL(EFDE4): 505*1fd5a2e1SPrashanth Swaminathan 506*1fd5a2e1SPrashanth Swaminathan .set L(set5),L(EFDE5)-L(SFDE5) 507*1fd5a2e1SPrashanth Swaminathan .long L(set5) /* FDE Length */ 508*1fd5a2e1SPrashanth SwaminathanL(SFDE5): 509*1fd5a2e1SPrashanth Swaminathan .long L(SFDE5)-L(CIE) /* FDE CIE offset */ 510*1fd5a2e1SPrashanth Swaminathan .long PCREL(L(UW15)) /* Initial location */ 511*1fd5a2e1SPrashanth Swaminathan .long L(UW17)-L(UW15) /* Address range */ 512*1fd5a2e1SPrashanth Swaminathan .byte 0 /* Augmentation size */ 513*1fd5a2e1SPrashanth Swaminathan ADV(UW16, UW15) 514*1fd5a2e1SPrashanth Swaminathan .byte 0xe /* DW_CFA_def_cfa_offset */ 515*1fd5a2e1SPrashanth Swaminathan .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ 516*1fd5a2e1SPrashanth Swaminathan .balign 8 517*1fd5a2e1SPrashanth SwaminathanL(EFDE5): 518*1fd5a2e1SPrashanth Swaminathan#ifdef __APPLE__ 519*1fd5a2e1SPrashanth Swaminathan .subsections_via_symbols 520*1fd5a2e1SPrashanth Swaminathan .section __LD,__compact_unwind,regular,debug 521*1fd5a2e1SPrashanth Swaminathan 522*1fd5a2e1SPrashanth Swaminathan /* compact unwind for ffi_call_unix64 */ 523*1fd5a2e1SPrashanth Swaminathan .quad C(ffi_call_unix64) 524*1fd5a2e1SPrashanth Swaminathan .set L1,L(UW4)-L(UW0) 525*1fd5a2e1SPrashanth Swaminathan .long L1 526*1fd5a2e1SPrashanth Swaminathan .long 0x04000000 /* use dwarf unwind info */ 527*1fd5a2e1SPrashanth Swaminathan .quad 0 528*1fd5a2e1SPrashanth Swaminathan .quad 0 529*1fd5a2e1SPrashanth Swaminathan 530*1fd5a2e1SPrashanth Swaminathan /* compact unwind for ffi_closure_unix64_sse */ 531*1fd5a2e1SPrashanth Swaminathan .quad C(ffi_closure_unix64_sse) 532*1fd5a2e1SPrashanth Swaminathan .set L2,L(UW7)-L(UW5) 533*1fd5a2e1SPrashanth Swaminathan .long L2 534*1fd5a2e1SPrashanth Swaminathan .long 0x04000000 /* use dwarf unwind info */ 535*1fd5a2e1SPrashanth Swaminathan .quad 0 536*1fd5a2e1SPrashanth Swaminathan .quad 0 537*1fd5a2e1SPrashanth Swaminathan 538*1fd5a2e1SPrashanth Swaminathan /* compact unwind for ffi_closure_unix64 */ 539*1fd5a2e1SPrashanth Swaminathan .quad C(ffi_closure_unix64) 540*1fd5a2e1SPrashanth Swaminathan .set L3,L(UW11)-L(UW8) 541*1fd5a2e1SPrashanth Swaminathan .long L3 542*1fd5a2e1SPrashanth Swaminathan .long 0x04000000 /* use dwarf unwind info */ 543*1fd5a2e1SPrashanth Swaminathan .quad 0 544*1fd5a2e1SPrashanth Swaminathan .quad 0 545*1fd5a2e1SPrashanth Swaminathan 546*1fd5a2e1SPrashanth Swaminathan /* compact unwind for ffi_go_closure_unix64_sse */ 547*1fd5a2e1SPrashanth Swaminathan .quad C(ffi_go_closure_unix64_sse) 548*1fd5a2e1SPrashanth Swaminathan .set L4,L(UW14)-L(UW12) 549*1fd5a2e1SPrashanth Swaminathan .long L4 550*1fd5a2e1SPrashanth Swaminathan .long 0x04000000 /* use dwarf unwind info */ 551*1fd5a2e1SPrashanth Swaminathan .quad 0 552*1fd5a2e1SPrashanth Swaminathan .quad 0 553*1fd5a2e1SPrashanth Swaminathan 554*1fd5a2e1SPrashanth Swaminathan /* compact unwind for ffi_go_closure_unix64 */ 555*1fd5a2e1SPrashanth Swaminathan .quad C(ffi_go_closure_unix64) 556*1fd5a2e1SPrashanth Swaminathan .set L5,L(UW17)-L(UW15) 557*1fd5a2e1SPrashanth Swaminathan .long L5 558*1fd5a2e1SPrashanth Swaminathan .long 0x04000000 /* use dwarf unwind info */ 559*1fd5a2e1SPrashanth Swaminathan .quad 0 560*1fd5a2e1SPrashanth Swaminathan .quad 0 561*1fd5a2e1SPrashanth Swaminathan#endif 562*1fd5a2e1SPrashanth Swaminathan 563*1fd5a2e1SPrashanth Swaminathan#endif /* __x86_64__ */ 564*1fd5a2e1SPrashanth Swaminathan#if defined __ELF__ && defined __linux__ 565*1fd5a2e1SPrashanth Swaminathan .section .note.GNU-stack,"",@progbits 566*1fd5a2e1SPrashanth Swaminathan#endif 567