1// asmcheck
2
3// Copyright 2018 The Go Authors. All rights reserved.
4// Use of this source code is governed by a BSD-style
5// license that can be found in the LICENSE file.
6
7package codegen
8
9// This file contains codegen tests related to bit field
10// insertion/extraction simplifications/optimizations.
11
12func extr1(x, x2 uint64) uint64 {
13	return x<<7 + x2>>57 // arm64:"EXTR\t[$]57,"
14}
15
16func extr2(x, x2 uint64) uint64 {
17	return x<<7 | x2>>57 // arm64:"EXTR\t[$]57,"
18}
19
20func extr3(x, x2 uint64) uint64 {
21	return x<<7 ^ x2>>57 // arm64:"EXTR\t[$]57,"
22}
23
24func extr4(x, x2 uint32) uint32 {
25	return x<<7 + x2>>25 // arm64:"EXTRW\t[$]25,"
26}
27
28func extr5(x, x2 uint32) uint32 {
29	return x<<7 | x2>>25 // arm64:"EXTRW\t[$]25,"
30}
31
32func extr6(x, x2 uint32) uint32 {
33	return x<<7 ^ x2>>25 // arm64:"EXTRW\t[$]25,"
34}
35
36// check 32-bit shift masking
37func mask32(x uint32) uint32 {
38	return (x << 29) >> 29 // arm64:"AND\t[$]7, R[0-9]+",-"LSR",-"LSL"
39}
40
41// check 16-bit shift masking
42func mask16(x uint16) uint16 {
43	return (x << 14) >> 14 // arm64:"AND\t[$]3, R[0-9]+",-"LSR",-"LSL"
44}
45
46// check 8-bit shift masking
47func mask8(x uint8) uint8 {
48	return (x << 7) >> 7 // arm64:"AND\t[$]1, R[0-9]+",-"LSR",-"LSL"
49}
50
51func maskshift(x uint64) uint64 {
52	// arm64:"AND\t[$]4095, R[0-9]+",-"LSL",-"LSR",-"UBFIZ",-"UBFX"
53	return ((x << 5) & (0xfff << 5)) >> 5
54}
55
56// bitfield ops
57// bfi
58func bfi1(x, y uint64) uint64 {
59	// arm64:"BFI\t[$]4, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
60	return ((x & 0xfff) << 4) | (y & 0xffffffffffff000f)
61}
62
63func bfi2(x, y uint64) uint64 {
64	// arm64:"BFI\t[$]12, R[0-9]+, [$]40",-"LSL",-"LSR",-"AND"
65	return (x << 24 >> 12) | (y & 0xfff0000000000fff)
66}
67
68// bfxil
69func bfxil1(x, y uint64) uint64 {
70	// arm64:"BFXIL\t[$]5, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
71	return ((x >> 5) & 0xfff) | (y & 0xfffffffffffff000)
72}
73
74func bfxil2(x, y uint64) uint64 {
75	// arm64:"BFXIL\t[$]12, R[0-9]+, [$]40",-"LSL",-"LSR",-"AND"
76	return (x << 12 >> 24) | (y & 0xffffff0000000000)
77}
78
79// sbfiz
80// merge shifts into sbfiz: (x << lc) >> rc && lc > rc.
81func sbfiz1(x int64) int64 {
82	// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
83	return (x << 4) >> 3
84}
85
86// merge shift and sign-extension into sbfiz.
87func sbfiz2(x int32) int64 {
88	return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]29",-"LSL"
89}
90
91func sbfiz3(x int16) int64 {
92	return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]13",-"LSL"
93}
94
95func sbfiz4(x int8) int64 {
96	return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]5",-"LSL"
97}
98
99// sbfiz combinations.
100// merge shift with sbfiz into sbfiz.
101func sbfiz5(x int32) int32 {
102	// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
103	return (x << 4) >> 3
104}
105
106func sbfiz6(x int16) int64 {
107	return int64(x+1) << 3 // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL"
108}
109
110func sbfiz7(x int8) int64 {
111	return int64(x+1) << 62 // arm64:"SBFIZ\t[$]62, R[0-9]+, [$]2",-"LSL"
112}
113
114func sbfiz8(x int32) int64 {
115	return int64(x+1) << 40 // arm64:"SBFIZ\t[$]40, R[0-9]+, [$]24",-"LSL"
116}
117
118// sbfx
119// merge shifts into sbfx: (x << lc) >> rc && lc <= rc.
120func sbfx1(x int64) int64 {
121	return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
122}
123
124func sbfx2(x int64) int64 {
125	return (x << 60) >> 60 // arm64:"SBFX\t[$]0, R[0-9]+, [$]4",-"LSL",-"ASR"
126}
127
128// merge shift and sign-extension into sbfx.
129func sbfx3(x int32) int64 {
130	return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]29",-"ASR"
131}
132
133func sbfx4(x int16) int64 {
134	return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]13",-"ASR"
135}
136
137func sbfx5(x int8) int64 {
138	return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]5",-"ASR"
139}
140
141func sbfx6(x int32) int64 {
142	return int64(x >> 30) // arm64:"SBFX\t[$]30, R[0-9]+, [$]2"
143}
144
145func sbfx7(x int16) int64 {
146	return int64(x >> 10) // arm64:"SBFX\t[$]10, R[0-9]+, [$]6"
147}
148
149func sbfx8(x int8) int64 {
150	return int64(x >> 5) // arm64:"SBFX\t[$]5, R[0-9]+, [$]3"
151}
152
153// sbfx combinations.
154// merge shifts with sbfiz into sbfx.
155func sbfx9(x int32) int32 {
156	return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
157}
158
159// merge sbfx and sign-extension into sbfx.
160func sbfx10(x int32) int64 {
161	c := x + 5
162	return int64(c >> 20) // arm64"SBFX\t[$]20, R[0-9]+, [$]12",-"MOVW\tR[0-9]+, R[0-9]+"
163}
164
165// ubfiz
166// merge shifts into ubfiz: (x<<lc)>>rc && lc>rc
167func ubfiz1(x uint64) uint64 {
168	// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
169	// s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
170	return (x << 4) >> 3
171}
172
173// merge shift and zero-extension into ubfiz.
174func ubfiz2(x uint32) uint64 {
175	return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]32",-"LSL"
176}
177
178func ubfiz3(x uint16) uint64 {
179	return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL"
180}
181
182func ubfiz4(x uint8) uint64 {
183	return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]8",-"LSL"
184}
185
186func ubfiz5(x uint8) uint64 {
187	return uint64(x) << 60 // arm64:"UBFIZ\t[$]60, R[0-9]+, [$]4",-"LSL"
188}
189
190func ubfiz6(x uint32) uint64 {
191	return uint64(x << 30) // arm64:"UBFIZ\t[$]30, R[0-9]+, [$]2",
192}
193
194func ubfiz7(x uint16) uint64 {
195	return uint64(x << 10) // arm64:"UBFIZ\t[$]10, R[0-9]+, [$]6",
196}
197
198func ubfiz8(x uint8) uint64 {
199	return uint64(x << 7) // arm64:"UBFIZ\t[$]7, R[0-9]+, [$]1",
200}
201
202// merge ANDconst into ubfiz.
203func ubfiz9(x uint64) uint64 {
204	// arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
205	// s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
206	return (x & 0xfff) << 3
207}
208
209func ubfiz10(x uint64) uint64 {
210	// arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
211	// s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
212	return (x << 4) & 0xfff0
213}
214
215// ubfiz combinations
216func ubfiz11(x uint32) uint32 {
217	// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"LSR"
218	return (x << 4) >> 3
219}
220
221func ubfiz12(x uint64) uint64 {
222	// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]20",-"LSL",-"LSR"
223	// s390x:"RISBGZ\t[$]43, [$]62, [$]1, ",-"SLD",-"SRD",-"AND"
224	return ((x & 0xfffff) << 4) >> 3
225}
226
227func ubfiz13(x uint64) uint64 {
228	// arm64:"UBFIZ\t[$]5, R[0-9]+, [$]13",-"LSL",-"LSR",-"AND"
229	return ((x << 3) & 0xffff) << 2
230}
231
232func ubfiz14(x uint64) uint64 {
233	// arm64:"UBFIZ\t[$]7, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
234	// s390x:"RISBGZ\t[$]45, [$]56, [$]7, ",-"SLD",-"SRD",-"AND"
235	return ((x << 5) & (0xfff << 5)) << 2
236}
237
238// ubfx
239// merge shifts into ubfx: (x<<lc)>>rc && lc<rc
240func ubfx1(x uint64) uint64 {
241	// arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
242	// s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
243	return (x << 1) >> 2
244}
245
246// merge shift and zero-extension into ubfx.
247func ubfx2(x uint32) uint64 {
248	return uint64(x >> 15) // arm64:"UBFX\t[$]15, R[0-9]+, [$]17",-"LSR"
249}
250
251func ubfx3(x uint16) uint64 {
252	return uint64(x >> 9) // arm64:"UBFX\t[$]9, R[0-9]+, [$]7",-"LSR"
253}
254
255func ubfx4(x uint8) uint64 {
256	return uint64(x >> 3) // arm64:"UBFX\t[$]3, R[0-9]+, [$]5",-"LSR"
257}
258
259func ubfx5(x uint32) uint64 {
260	return uint64(x) >> 30 // arm64:"UBFX\t[$]30, R[0-9]+, [$]2"
261}
262
263func ubfx6(x uint16) uint64 {
264	return uint64(x) >> 10 // arm64:"UBFX\t[$]10, R[0-9]+, [$]6"
265}
266
267func ubfx7(x uint8) uint64 {
268	return uint64(x) >> 3 // arm64:"UBFX\t[$]3, R[0-9]+, [$]5"
269}
270
271// merge ANDconst into ubfx.
272func ubfx8(x uint64) uint64 {
273	// arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
274	// s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
275	return (x >> 25) & 1023
276}
277
278func ubfx9(x uint64) uint64 {
279	// arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
280	// s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
281	return (x & 0x0ff0) >> 4
282}
283
284// ubfx combinations.
285func ubfx10(x uint32) uint32 {
286	// arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR"
287	return (x << 1) >> 2
288}
289
290func ubfx11(x uint64) uint64 {
291	// arm64:"UBFX\t[$]1, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
292	// s390x:"RISBGZ\t[$]52, [$]63, [$]63,",-"SLD",-"SRD",-"AND"
293	return ((x << 1) >> 2) & 0xfff
294}
295
296func ubfx12(x uint64) uint64 {
297	// arm64:"UBFX\t[$]4, R[0-9]+, [$]11",-"LSL",-"LSR",-"AND"
298	// s390x:"RISBGZ\t[$]53, [$]63, [$]60, ",-"SLD",-"SRD",-"AND"
299	return ((x >> 3) & 0xfff) >> 1
300}
301
302func ubfx13(x uint64) uint64 {
303	// arm64:"UBFX\t[$]5, R[0-9]+, [$]56",-"LSL",-"LSR"
304	// s390x:"RISBGZ\t[$]8, [$]63, [$]59, ",-"SLD",-"SRD"
305	return ((x >> 2) << 5) >> 8
306}
307
308func ubfx14(x uint64) uint64 {
309	// arm64:"UBFX\t[$]1, R[0-9]+, [$]19",-"LSL",-"LSR"
310	// s390x:"RISBGZ\t[$]45, [$]63, [$]63, ",-"SLD",-"SRD",-"AND"
311	return ((x & 0xfffff) << 3) >> 4
312}
313
314// merge ubfx and zero-extension into ubfx.
315func ubfx15(x uint64) bool {
316	midr := x + 10
317	part_num := uint16((midr >> 4) & 0xfff)
318	if part_num == 0xd0c { // arm64:"UBFX\t[$]4, R[0-9]+, [$]12",-"MOVHU\tR[0-9]+, R[0-9]+"
319		return true
320	}
321	return false
322}
323
324// merge ANDconst and ubfx into ubfx
325func ubfx16(x uint64) uint64 {
326	// arm64:"UBFX\t[$]4, R[0-9]+, [$]6",-"AND\t[$]63"
327	return ((x >> 3) & 0xfff) >> 1 & 0x3f
328}
329
330// Check that we don't emit comparisons for constant shifts.
331//
332//go:nosplit
333func shift_no_cmp(x int) int {
334	// arm64:`LSL\t[$]17`,-`CMP`
335	// mips64:`SLLV\t[$]17`,-`SGT`
336	return x << 17
337}
338
339func rev16(c uint64) (uint64, uint64, uint64) {
340	// arm64:`REV16`,-`AND`,-`LSR`,-`AND`,-`ORR\tR[0-9]+<<8`
341	b1 := ((c & 0xff00ff00ff00ff00) >> 8) | ((c & 0x00ff00ff00ff00ff) << 8)
342	// arm64:-`ADD\tR[0-9]+<<8`
343	b2 := ((c & 0xff00ff00ff00ff00) >> 8) + ((c & 0x00ff00ff00ff00ff) << 8)
344	// arm64:-`EOR\tR[0-9]+<<8`
345	b3 := ((c & 0xff00ff00ff00ff00) >> 8) ^ ((c & 0x00ff00ff00ff00ff) << 8)
346	return b1, b2, b3
347}
348
349func rev16w(c uint32) (uint32, uint32, uint32) {
350	// arm64:`REV16W`,-`AND`,-`UBFX`,-`AND`,-`ORR\tR[0-9]+<<8`
351	b1 := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8)
352	// arm64:-`ADD\tR[0-9]+<<8`
353	b2 := ((c & 0xff00ff00) >> 8) + ((c & 0x00ff00ff) << 8)
354	// arm64:-`EOR\tR[0-9]+<<8`
355	b3 := ((c & 0xff00ff00) >> 8) ^ ((c & 0x00ff00ff) << 8)
356	return b1, b2, b3
357}
358
359func shift(x uint32, y uint16, z uint8) uint64 {
360	// arm64:-`MOVWU`,-`LSR\t[$]32`
361	a := uint64(x) >> 32
362	// arm64:-`MOVHU
363	b := uint64(y) >> 16
364	// arm64:-`MOVBU`
365	c := uint64(z) >> 8
366	// arm64:`MOVD\tZR`,-`ADD\tR[0-9]+>>16`,-`ADD\tR[0-9]+>>8`,
367	return a + b + c
368}
369