xref: /aosp_15_r20/external/libdav1d/tests/checkasm/arm/checkasm_32.S (revision c09093415860a1c2373dacd84c4fde00c507cdfd)
1/******************************************************************************
2 * Copyright © 2018, VideoLAN and dav1d authors
3 * Copyright © 2015 Martin Storsjo
4 * Copyright © 2015 Janne Grunau
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice, this
11 *    list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 *    this list of conditions and the following disclaimer in the documentation
15 *    and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28
29#define PRIVATE_PREFIX checkasm_
30
31#include "src/arm/asm.S"
32#include "src/arm/32/util.S"
33
34const register_init, align=3
35        .quad 0x21f86d66c8ca00ce
36        .quad 0x75b6ba21077c48ad
37        .quad 0xed56bb2dcb3c7736
38        .quad 0x8bda43d3fd1a7e06
39        .quad 0xb64a9c9e5d318408
40        .quad 0xdf9a54b303f1d3a3
41        .quad 0x4a75479abd64e097
42        .quad 0x249214109d5d1c88
43endconst
44
45const error_message_fpscr
46        .asciz "failed to preserve register FPSCR, changed bits: %x"
47error_message_gpr:
48        .asciz "failed to preserve register r%d"
49error_message_vfp:
50        .asciz "failed to preserve register d%d"
51error_message_stack:
52        .asciz "failed to preserve stack"
53endconst
54
55@ max number of args used by any asm function.
56#define MAX_ARGS 15
57
58#define ARG_STACK 4*(MAX_ARGS - 4)
59
60@ Align the used stack space to 8 to preserve the stack alignment.
61@ +8 for stack canary reference.
62#define ARG_STACK_A (((ARG_STACK + pushed + 7) & ~7) - pushed + 8)
63
64.macro clobbercheck variant
65.equ pushed, 4*9
66function checked_call_\variant, export=1
67        push            {r4-r11, lr}
68.ifc \variant, vfp
69        vpush           {d8-d15}
70        fmrx            r4,  FPSCR
71        push            {r4}
72.equ pushed, pushed + 16*4 + 4
73.endif
74
75        movrel          r12, register_init
76.ifc \variant, vfp
77        vldm            r12, {d8-d15}
78.endif
79        ldm             r12, {r4-r11}
80
81        sub             sp,  sp,  #ARG_STACK_A
82.equ pos, 0
83.rept MAX_ARGS-4
84        ldr             r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
85        str             r12, [sp, #pos]
86.equ pos, pos + 4
87.endr
88
89        @ For stack overflows, the callee is free to overwrite the parameters
90        @ that were passed on the stack (if any), so we can only check after
91        @ that point. First figure out how many parameters the function
92        @ really took on the stack:
93        ldr             r12, [sp, #ARG_STACK_A + pushed + 8 + 4*(MAX_ARGS-4)]
94        @ Load the first non-parameter value from the stack, that should be
95        @ left untouched by the function. Store a copy of it inverted, so that
96        @ e.g. overwriting everything with zero would be noticed.
97        ldr             r12, [sp, r12, lsl #2]
98        mvn             r12, r12
99        str             r12, [sp, #ARG_STACK_A - 4]
100
101        mov             r12, r0
102        mov             r0,  r2
103        mov             r1,  r3
104        ldr             r2,  [sp, #ARG_STACK_A + pushed]
105        ldr             r3,  [sp, #ARG_STACK_A + pushed + 4]
106        @ Call the target function
107        v4blx           r12
108
109        @ Load the number of stack parameters, stack canary and its reference
110        ldr             r12, [sp, #ARG_STACK_A + pushed + 8 + 4*(MAX_ARGS-4)]
111        ldr             r2,  [sp, r12, lsl #2]
112        ldr             r3,  [sp, #ARG_STACK_A - 4]
113
114        add             sp,  sp,  #ARG_STACK_A
115        push            {r0, r1}
116
117        mvn             r3,  r3
118        cmp             r2,  r3
119        bne             5f
120
121        movrel          r12, register_init
122.ifc \variant, vfp
123.macro check_reg_vfp, dreg, offset
124        ldr             r2,  [r12, #(8 * (\offset))]
125        ldr             r3,  [r12, #(8 * (\offset)) + 4]
126        vmov            r0,  lr,  \dreg
127        eor             r2,  r2,  r0
128        eor             r3,  r3,  lr
129        orrs            r2,  r2,  r3
130        bne             4f
131.endm
132
133.irp n, 8, 9, 10, 11, 12, 13, 14, 15
134        @ keep track of the checked double/SIMD register
135        mov             r1,  #\n
136        check_reg_vfp   d\n, \n-8
137.endr
138.purgem check_reg_vfp
139
140        fmrx            r1,  FPSCR
141        ldr             r3,  [sp, #8]
142        eor             r1,  r1,  r3
143        @ Ignore changes in bits 0-4 and 7
144        bic             r1,  r1,  #0x9f
145        @ Ignore changes in the topmost 5 bits
146        bics            r1,  r1,  #0xf8000000
147        bne             3f
148.endif
149
150        @ keep track of the checked GPR
151        mov             r1,  #4
152.macro check_reg reg1, reg2=
153        ldr             r2,  [r12], #4
154        ldr             r3,  [r12], #4
155        eors            r2,  r2,  \reg1
156        bne             2f
157        add             r1,  r1,  #1
158.ifnb \reg2
159        eors            r3,  r3,  \reg2
160        bne             2f
161.endif
162        add             r1,  r1,  #1
163.endm
164        check_reg       r4,  r5
165        check_reg       r6,  r7
166@ r9 is a volatile register in the ios ABI
167#ifdef __APPLE__
168        check_reg       r8
169#else
170        check_reg       r8,  r9
171#endif
172        check_reg       r10, r11
173.purgem check_reg
174
175        b               0f
1765:
177        movrel          r0, error_message_stack
178        b               1f
1794:
180        movrel          r0, error_message_vfp
181        b               1f
1823:
183        movrel          r0, error_message_fpscr
184        b               1f
1852:
186        movrel          r0, error_message_gpr
1871:
188#ifdef PREFIX
189        bl              _checkasm_fail_func
190#else
191        bl              checkasm_fail_func
192#endif
1930:
194        pop             {r0, r1}
195.ifc \variant, vfp
196        pop             {r2}
197        fmxr            FPSCR, r2
198        vpop            {d8-d15}
199.endif
200        pop             {r4-r11, pc}
201endfunc
202.endm
203
204clobbercheck novfp
205clobbercheck vfp
206