1// asmcheck
2
3// Copyright 2018 The Go Authors. All rights reserved.
4// Use of this source code is governed by a BSD-style
5// license that can be found in the LICENSE file.
6
7package codegen
8
9import (
10	"cmp"
11	"unsafe"
12)
13
14// This file contains code generation tests related to the comparison
15// operators.
16
17// -------------- //
18//    Equality    //
19// -------------- //
20
21// Check that compare to constant string use 2/4/8 byte compares
22
23func CompareString1(s string) bool {
24	// amd64:`CMPW\t\(.*\), [$]`
25	// arm64:`MOVHU\t\(.*\), [R]`,`MOVD\t[$]`,`CMPW\tR`
26	// ppc64le:`MOVHZ\t\(.*\), [R]`,`CMPW\t.*, [$]`
27	// s390x:`MOVHBR\t\(.*\), [R]`,`CMPW\t.*, [$]`
28	return s == "xx"
29}
30
31func CompareString2(s string) bool {
32	// amd64:`CMPL\t\(.*\), [$]`
33	// arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]`
34	// ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]`
35	// s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [$]`
36	return s == "xxxx"
37}
38
39func CompareString3(s string) bool {
40	// amd64:`CMPQ\t\(.*\), [A-Z]`
41	// arm64:-`CMPW\t`
42	// ppc64x:-`CMPW\t`
43	// s390x:-`CMPW\t`
44	return s == "xxxxxxxx"
45}
46
47// Check that arrays compare use 2/4/8 byte compares
48
49func CompareArray1(a, b [2]byte) bool {
50	// amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
51	// arm64:-`MOVBU\t`
52	// ppc64le:-`MOVBZ\t`
53	// s390x:-`MOVBZ\t`
54	return a == b
55}
56
57func CompareArray2(a, b [3]uint16) bool {
58	// amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
59	// amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
60	return a == b
61}
62
63func CompareArray3(a, b [3]int16) bool {
64	// amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
65	// amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
66	return a == b
67}
68
69func CompareArray4(a, b [12]int8) bool {
70	// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
71	// amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
72	return a == b
73}
74
75func CompareArray5(a, b [15]byte) bool {
76	// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
77	return a == b
78}
79
80// This was a TODO in mapaccess1_faststr
81func CompareArray6(a, b unsafe.Pointer) bool {
82	// amd64:`CMPL\t\(.*\), [A-Z]`
83	// arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]`
84	// ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]`
85	// s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [R]`
86	return *((*[4]byte)(a)) != *((*[4]byte)(b))
87}
88
89// Check that some structs generate 2/4/8 byte compares.
90
91type T1 struct {
92	a [8]byte
93}
94
95func CompareStruct1(s1, s2 T1) bool {
96	// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
97	// amd64:-`CALL`
98	return s1 == s2
99}
100
101type T2 struct {
102	a [16]byte
103}
104
105func CompareStruct2(s1, s2 T2) bool {
106	// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
107	// amd64:-`CALL`
108	return s1 == s2
109}
110
111// Assert that a memequal call is still generated when
112// inlining would increase binary size too much.
113
114type T3 struct {
115	a [24]byte
116}
117
118func CompareStruct3(s1, s2 T3) bool {
119	// amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
120	// amd64:`CALL`
121	return s1 == s2
122}
123
124type T4 struct {
125	a [32]byte
126}
127
128func CompareStruct4(s1, s2 T4) bool {
129	// amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
130	// amd64:`CALL`
131	return s1 == s2
132}
133
134// -------------- //
135//    Ordering    //
136// -------------- //
137
138// Test that LEAQ/ADDQconst are folded into SETx ops
139
140var r bool
141
142func CmpFold(x uint32) {
143	// amd64:`SETHI\t.*\(SB\)`
144	r = x > 4
145}
146
147// Test that direct comparisons with memory are generated when
148// possible
149
150func CmpMem1(p int, q *int) bool {
151	// amd64:`CMPQ\t\(.*\), [A-Z]`
152	return p < *q
153}
154
155func CmpMem2(p *int, q int) bool {
156	// amd64:`CMPQ\t\(.*\), [A-Z]`
157	return *p < q
158}
159
160func CmpMem3(p *int) bool {
161	// amd64:`CMPQ\t\(.*\), [$]7`
162	return *p < 7
163}
164
165func CmpMem4(p *int) bool {
166	// amd64:`CMPQ\t\(.*\), [$]7`
167	return 7 < *p
168}
169
170func CmpMem5(p **int) {
171	// amd64:`CMPL\truntime.writeBarrier\(SB\), [$]0`
172	*p = nil
173}
174
175func CmpMem6(a []int) int {
176	// 386:`CMPL\s8\([A-Z]+\),`
177	// amd64:`CMPQ\s16\([A-Z]+\),`
178	if a[1] > a[2] {
179		return 1
180	} else {
181		return 2
182	}
183}
184
185// Check tbz/tbnz are generated when comparing against zero on arm64
186
187func CmpZero1(a int32, ptr *int) {
188	if a < 0 { // arm64:"TBZ"
189		*ptr = 0
190	}
191}
192
193func CmpZero2(a int64, ptr *int) {
194	if a < 0 { // arm64:"TBZ"
195		*ptr = 0
196	}
197}
198
199func CmpZero3(a int32, ptr *int) {
200	if a >= 0 { // arm64:"TBNZ"
201		*ptr = 0
202	}
203}
204
205func CmpZero4(a int64, ptr *int) {
206	if a >= 0 { // arm64:"TBNZ"
207		*ptr = 0
208	}
209}
210
211func CmpToZero(a, b, d int32, e, f int64, deOptC0, deOptC1 bool) int32 {
212	// arm:`TST`,-`AND`
213	// arm64:`TSTW`,-`AND`
214	// 386:`TESTL`,-`ANDL`
215	// amd64:`TESTL`,-`ANDL`
216	c0 := a&b < 0
217	// arm:`CMN`,-`ADD`
218	// arm64:`CMNW`,-`ADD`
219	c1 := a+b < 0
220	// arm:`TEQ`,-`XOR`
221	c2 := a^b < 0
222	// arm64:`TST`,-`AND`
223	// amd64:`TESTQ`,-`ANDQ`
224	c3 := e&f < 0
225	// arm64:`CMN`,-`ADD`
226	c4 := e+f < 0
227	// not optimized to single CMNW/CMN due to further use of b+d
228	// arm64:`ADD`,-`CMNW`
229	// arm:`ADD`,-`CMN`
230	c5 := b+d == 0
231	// not optimized to single TSTW/TST due to further use of a&d
232	// arm64:`AND`,-`TSTW`
233	// arm:`AND`,-`TST`
234	// 386:`ANDL`
235	c6 := a&d >= 0
236	// arm64:`TST\sR[0-9]+<<3,\sR[0-9]+`
237	c7 := e&(f<<3) < 0
238	// arm64:`CMN\sR[0-9]+<<3,\sR[0-9]+`
239	c8 := e+(f<<3) < 0
240	// arm64:`TST\sR[0-9],\sR[0-9]+`
241	c9 := e&(-19) < 0
242	if c0 {
243		return 1
244	} else if c1 {
245		return 2
246	} else if c2 {
247		return 3
248	} else if c3 {
249		return 4
250	} else if c4 {
251		return 5
252	} else if c5 {
253		return 6
254	} else if c6 {
255		return 7
256	} else if c7 {
257		return 9
258	} else if c8 {
259		return 10
260	} else if c9 {
261		return 11
262	} else if deOptC0 {
263		return b + d
264	} else if deOptC1 {
265		return a & d
266	} else {
267		return 0
268	}
269}
270
271func CmpLogicalToZero(a, b, c uint32, d, e uint64) uint64 {
272
273	// ppc64x:"ANDCC",-"CMPW"
274	// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
275	if a&63 == 0 {
276		return 1
277	}
278
279	// ppc64x:"ANDCC",-"CMP"
280	// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
281	if d&255 == 0 {
282		return 1
283	}
284
285	// ppc64x:"ANDCC",-"CMP"
286	// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
287	if d&e == 0 {
288		return 1
289	}
290	// ppc64x:"ORCC",-"CMP"
291	// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
292	if d|e == 0 {
293		return 1
294	}
295
296	// ppc64x:"XORCC",-"CMP"
297	// wasm:"I64Eqz","I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
298	if e^d == 0 {
299		return 1
300	}
301	return 0
302}
303
304// The following CmpToZero_ex* check that cmp|cmn with bmi|bpl are generated for
305// 'comparing to zero' expressions
306
307// var + const
308// 'x-const' might be canonicalized to 'x+(-const)', so we check both
309// CMN and CMP for subtraction expressions to make the pattern robust.
310func CmpToZero_ex1(a int64, e int32) int {
311	// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
312	if a+3 < 0 {
313		return 1
314	}
315
316	// arm64:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)`
317	if a+5 <= 0 {
318		return 1
319	}
320
321	// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
322	if a+13 >= 0 {
323		return 2
324	}
325
326	// arm64:`CMP|CMN`,-`(ADD|SUB)`,`(BMI|BPL)`
327	if a-7 < 0 {
328		return 3
329	}
330
331	// arm64:`SUB`,`TBZ`
332	if a-11 >= 0 {
333		return 4
334	}
335
336	// arm64:`SUB`,`CMP`,`BGT`
337	if a-19 > 0 {
338		return 4
339	}
340
341	// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
342	// arm:`CMN`,-`ADD`,`(BMI|BPL)`
343	if e+3 < 0 {
344		return 5
345	}
346
347	// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
348	// arm:`CMN`,-`ADD`,`(BMI|BPL)`
349	if e+13 >= 0 {
350		return 6
351	}
352
353	// arm64:`CMPW|CMNW`,`(BMI|BPL)`
354	// arm:`CMP|CMN`, -`(ADD|SUB)`, `(BMI|BPL)`
355	if e-7 < 0 {
356		return 7
357	}
358
359	// arm64:`SUB`,`TBNZ`
360	// arm:`CMP|CMN`, -`(ADD|SUB)`, `(BMI|BPL)`
361	if e-11 >= 0 {
362		return 8
363	}
364
365	return 0
366}
367
368// var + var
369// TODO: optimize 'var - var'
370func CmpToZero_ex2(a, b, c int64, e, f, g int32) int {
371	// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
372	if a+b < 0 {
373		return 1
374	}
375
376	// arm64:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)`
377	if a+c <= 0 {
378		return 1
379	}
380
381	// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
382	if b+c >= 0 {
383		return 2
384	}
385
386	// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
387	// arm:`CMN`,-`ADD`,`(BMI|BPL)`
388	if e+f < 0 {
389		return 5
390	}
391
392	// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
393	// arm:`CMN`,-`ADD`,`(BMI|BPL)`
394	if f+g >= 0 {
395		return 6
396	}
397	return 0
398}
399
400// var + var*var
401func CmpToZero_ex3(a, b, c, d int64, e, f, g, h int32) int {
402	// arm64:`CMN`,-`MADD`,`MUL`,`(BMI|BPL)`
403	if a+b*c < 0 {
404		return 1
405	}
406
407	// arm64:`CMN`,-`MADD`,`MUL`,`(BMI|BPL)`
408	if b+c*d >= 0 {
409		return 2
410	}
411
412	// arm64:`CMNW`,-`MADDW`,`MULW`,`BEQ`,`(BMI|BPL)`
413	// arm:`CMN`,-`MULA`,`MUL`,`BEQ`,`(BMI|BPL)`
414	if e+f*g > 0 {
415		return 5
416	}
417
418	// arm64:`CMNW`,-`MADDW`,`MULW`,`BEQ`,`(BMI|BPL)`
419	// arm:`CMN`,-`MULA`,`MUL`,`BEQ`,`(BMI|BPL)`
420	if f+g*h <= 0 {
421		return 6
422	}
423	return 0
424}
425
426// var - var*var
427func CmpToZero_ex4(a, b, c, d int64, e, f, g, h int32) int {
428	// arm64:`CMP`,-`MSUB`,`MUL`,`BEQ`,`(BMI|BPL)`
429	if a-b*c > 0 {
430		return 1
431	}
432
433	// arm64:`CMP`,-`MSUB`,`MUL`,`(BMI|BPL)`
434	if b-c*d >= 0 {
435		return 2
436	}
437
438	// arm64:`CMPW`,-`MSUBW`,`MULW`,`(BMI|BPL)`
439	if e-f*g < 0 {
440		return 5
441	}
442
443	// arm64:`CMPW`,-`MSUBW`,`MULW`,`(BMI|BPL)`
444	if f-g*h >= 0 {
445		return 6
446	}
447	return 0
448}
449
450func CmpToZero_ex5(e, f int32, u uint32) int {
451	// arm:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)`
452	if e+f<<1 > 0 {
453		return 1
454	}
455
456	// arm:`CMP`,-`SUB`,`(BMI|BPL)`
457	if f-int32(u>>2) >= 0 {
458		return 2
459	}
460	return 0
461}
462
463func UintLtZero(a uint8, b uint16, c uint32, d uint64) int {
464	// amd64: -`(TESTB|TESTW|TESTL|TESTQ|JCC|JCS)`
465	// arm64: -`(CMPW|CMP|BHS|BLO)`
466	if a < 0 || b < 0 || c < 0 || d < 0 {
467		return 1
468	}
469	return 0
470}
471
472func UintGeqZero(a uint8, b uint16, c uint32, d uint64) int {
473	// amd64: -`(TESTB|TESTW|TESTL|TESTQ|JCS|JCC)`
474	// arm64: -`(CMPW|CMP|BLO|BHS)`
475	if a >= 0 || b >= 0 || c >= 0 || d >= 0 {
476		return 1
477	}
478	return 0
479}
480
481func UintGtZero(a uint8, b uint16, c uint32, d uint64) int {
482	// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BLS|BHI)`
483	if a > 0 || b > 0 || c > 0 || d > 0 {
484		return 1
485	}
486	return 0
487}
488
489func UintLeqZero(a uint8, b uint16, c uint32, d uint64) int {
490	// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BHI|BLS)`
491	if a <= 0 || b <= 0 || c <= 0 || d <= 0 {
492		return 1
493	}
494	return 0
495}
496
497func UintLtOne(a uint8, b uint16, c uint32, d uint64) int {
498	// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BHS|BLO)`
499	if a < 1 || b < 1 || c < 1 || d < 1 {
500		return 1
501	}
502	return 0
503}
504
505func UintGeqOne(a uint8, b uint16, c uint32, d uint64) int {
506	// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BLO|BHS)`
507	if a >= 1 || b >= 1 || c >= 1 || d >= 1 {
508		return 1
509	}
510	return 0
511}
512
513func CmpToZeroU_ex1(a uint8, b uint16, c uint32, d uint64) int {
514	// wasm:"I64Eqz"-"I64LtU"
515	if 0 < a {
516		return 1
517	}
518	// wasm:"I64Eqz"-"I64LtU"
519	if 0 < b {
520		return 1
521	}
522	// wasm:"I64Eqz"-"I64LtU"
523	if 0 < c {
524		return 1
525	}
526	// wasm:"I64Eqz"-"I64LtU"
527	if 0 < d {
528		return 1
529	}
530	return 0
531}
532
533func CmpToZeroU_ex2(a uint8, b uint16, c uint32, d uint64) int {
534	// wasm:"I64Eqz"-"I64LeU"
535	if a <= 0 {
536		return 1
537	}
538	// wasm:"I64Eqz"-"I64LeU"
539	if b <= 0 {
540		return 1
541	}
542	// wasm:"I64Eqz"-"I64LeU"
543	if c <= 0 {
544		return 1
545	}
546	// wasm:"I64Eqz"-"I64LeU"
547	if d <= 0 {
548		return 1
549	}
550	return 0
551}
552
553func CmpToOneU_ex1(a uint8, b uint16, c uint32, d uint64) int {
554	// wasm:"I64Eqz"-"I64LtU"
555	if a < 1 {
556		return 1
557	}
558	// wasm:"I64Eqz"-"I64LtU"
559	if b < 1 {
560		return 1
561	}
562	// wasm:"I64Eqz"-"I64LtU"
563	if c < 1 {
564		return 1
565	}
566	// wasm:"I64Eqz"-"I64LtU"
567	if d < 1 {
568		return 1
569	}
570	return 0
571}
572
573func CmpToOneU_ex2(a uint8, b uint16, c uint32, d uint64) int {
574	// wasm:"I64Eqz"-"I64LeU"
575	if 1 <= a {
576		return 1
577	}
578	// wasm:"I64Eqz"-"I64LeU"
579	if 1 <= b {
580		return 1
581	}
582	// wasm:"I64Eqz"-"I64LeU"
583	if 1 <= c {
584		return 1
585	}
586	// wasm:"I64Eqz"-"I64LeU"
587	if 1 <= d {
588		return 1
589	}
590	return 0
591}
592
593// Check that small memequals are replaced with eq instructions
594
595func equalConstString1() bool {
596	a := string("A")
597	b := string("Z")
598	// amd64:-".*memequal"
599	// arm64:-".*memequal"
600	// ppc64x:-".*memequal"
601	return a == b
602}
603
604func equalVarString1(a string) bool {
605	b := string("Z")
606	// amd64:-".*memequal"
607	// arm64:-".*memequal"
608	// ppc64x:-".*memequal"
609	return a[:1] == b
610}
611
612func equalConstString2() bool {
613	a := string("AA")
614	b := string("ZZ")
615	// amd64:-".*memequal"
616	// arm64:-".*memequal"
617	// ppc64x:-".*memequal"
618	return a == b
619}
620
621func equalVarString2(a string) bool {
622	b := string("ZZ")
623	// amd64:-".*memequal"
624	// arm64:-".*memequal"
625	// ppc64x:-".*memequal"
626	return a[:2] == b
627}
628
629func equalConstString4() bool {
630	a := string("AAAA")
631	b := string("ZZZZ")
632	// amd64:-".*memequal"
633	// arm64:-".*memequal"
634	// ppc64x:-".*memequal"
635	return a == b
636}
637
638func equalVarString4(a string) bool {
639	b := string("ZZZZ")
640	// amd64:-".*memequal"
641	// arm64:-".*memequal"
642	// ppc64x:-".*memequal"
643	return a[:4] == b
644}
645
646func equalConstString8() bool {
647	a := string("AAAAAAAA")
648	b := string("ZZZZZZZZ")
649	// amd64:-".*memequal"
650	// arm64:-".*memequal"
651	// ppc64x:-".*memequal"
652	return a == b
653}
654
655func equalVarString8(a string) bool {
656	b := string("ZZZZZZZZ")
657	// amd64:-".*memequal"
658	// arm64:-".*memequal"
659	// ppc64x:-".*memequal"
660	return a[:8] == b
661}
662
663func cmpToCmn(a, b, c, d int) int {
664	var c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 int
665	// arm64:`CMN`,-`CMP`
666	if a < -8 {
667		c1 = 1
668	}
669	// arm64:`CMN`,-`CMP`
670	if a+1 == 0 {
671		c2 = 1
672	}
673	// arm64:`CMN`,-`CMP`
674	if a+3 != 0 {
675		c3 = 1
676	}
677	// arm64:`CMN`,-`CMP`
678	if a+b == 0 {
679		c4 = 1
680	}
681	// arm64:`CMN`,-`CMP`
682	if b+c != 0 {
683		c5 = 1
684	}
685	// arm64:`CMN`,-`CMP`
686	if a == -c {
687		c6 = 1
688	}
689	// arm64:`CMN`,-`CMP`
690	if b != -d {
691		c7 = 1
692	}
693	// arm64:`CMN`,-`CMP`
694	if a*b+c == 0 {
695		c8 = 1
696	}
697	// arm64:`CMN`,-`CMP`
698	if a*c+b != 0 {
699		c9 = 1
700	}
701	// arm64:`CMP`,-`CMN`
702	if b*c-a == 0 {
703		c10 = 1
704	}
705	// arm64:`CMP`,-`CMN`
706	if a*d-b != 0 {
707		c11 = 1
708	}
709	return c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11
710}
711
712func cmpToCmnLessThan(a, b, c, d int) int {
713	var c1, c2, c3, c4 int
714	// arm64:`CMN`,`CSET\tMI`,-`CMP`
715	if a+1 < 0 {
716		c1 = 1
717	}
718	// arm64:`CMN`,`CSET\tMI`,-`CMP`
719	if a+b < 0 {
720		c2 = 1
721	}
722	// arm64:`CMN`,`CSET\tMI`,-`CMP`
723	if a*b+c < 0 {
724		c3 = 1
725	}
726	// arm64:`CMP`,`CSET\tMI`,-`CMN`
727	if a-b*c < 0 {
728		c4 = 1
729	}
730	return c1 + c2 + c3 + c4
731}
732
733func cmpToCmnGreaterThanEqual(a, b, c, d int) int {
734	var c1, c2, c3, c4 int
735	// arm64:`CMN`,`CSET\tPL`,-`CMP`
736	if a+1 >= 0 {
737		c1 = 1
738	}
739	// arm64:`CMN`,`CSET\tPL`,-`CMP`
740	if a+b >= 0 {
741		c2 = 1
742	}
743	// arm64:`CMN`,`CSET\tPL`,-`CMP`
744	if a*b+c >= 0 {
745		c3 = 1
746	}
747	// arm64:`CMP`,`CSET\tPL`,-`CMN`
748	if a-b*c >= 0 {
749		c4 = 1
750	}
751	return c1 + c2 + c3 + c4
752}
753
754func cmp1(val string) bool {
755	var z string
756	// amd64:-".*memequal"
757	return z == val
758}
759
760func cmp2(val string) bool {
761	var z string
762	// amd64:-".*memequal"
763	return val == z
764}
765
766func cmp3(val string) bool {
767	z := "food"
768	// amd64:-".*memequal"
769	return z == val
770}
771
772func cmp4(val string) bool {
773	z := "food"
774	// amd64:-".*memequal"
775	return val == z
776}
777
778func cmp5[T comparable](val T) bool {
779	var z T
780	// amd64:-".*memequal"
781	return z == val
782}
783
784func cmp6[T comparable](val T) bool {
785	var z T
786	// amd64:-".*memequal"
787	return val == z
788}
789
790func cmp7() {
791	cmp5[string]("") // force instantiation
792	cmp6[string]("") // force instantiation
793}
794
795type Point struct {
796	X, Y int
797}
798
799// invertLessThanNoov checks (LessThanNoov (InvertFlags x)) is lowered as
800// CMP, CSET, CSEL instruction sequence. InvertFlags are only generated under
801// certain conditions, see canonLessThan, so if the code below does not
802// generate an InvertFlags OP, this check may fail.
803func invertLessThanNoov(p1, p2, p3 Point) bool {
804	// arm64:`CMP`,`CSET`,`CSEL`
805	return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
806}
807
808func cmpstring1(x, y string) int {
809	// amd64:".*cmpstring"
810	if x < y {
811		return -1
812	}
813	// amd64:-".*cmpstring"
814	if x > y {
815		return +1
816	}
817	return 0
818}
819func cmpstring2(x, y string) int {
820	// We want to fail if there are two calls to cmpstring.
821	// They will both have the same line number, so a test
822	// like in cmpstring1 will not work. Instead, we
823	// look for spill/restore instructions, which only
824	// need to exist if there are 2 calls.
825	//amd64:-`MOVQ\t.*\(SP\)`
826	return cmp.Compare(x, y)
827}
828