1 /*
2 * Copyright (C) 2021 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "va_compiler.h"
25
26 /* Valhall specific instruction selection optimizations */
27
28 static enum bi_opcode
va_op_add_imm(enum bi_opcode op)29 va_op_add_imm(enum bi_opcode op)
30 {
31 switch (op) {
32 case BI_OPCODE_FADD_F32:
33 return BI_OPCODE_FADD_IMM_F32;
34 case BI_OPCODE_FADD_V2F16:
35 return BI_OPCODE_FADD_IMM_V2F16;
36 case BI_OPCODE_IADD_S32:
37 case BI_OPCODE_IADD_U32:
38 return BI_OPCODE_IADD_IMM_I32;
39 case BI_OPCODE_IADD_V2S16:
40 case BI_OPCODE_IADD_V2U16:
41 return BI_OPCODE_IADD_IMM_V2I16;
42 case BI_OPCODE_IADD_V4S8:
43 case BI_OPCODE_IADD_V4U8:
44 return BI_OPCODE_IADD_IMM_V4I8;
45 default:
46 return 0;
47 }
48 }
49
50 static bool
va_is_add_imm(bi_instr * I,unsigned s)51 va_is_add_imm(bi_instr *I, unsigned s)
52 {
53 assert(s < I->nr_srcs);
54
55 return I->src[s].swizzle == BI_SWIZZLE_H01 && !I->src[s].abs &&
56 !I->src[s].neg && !I->clamp && !I->round;
57 }
58
59 static unsigned
va_choose_imm(bi_instr * I)60 va_choose_imm(bi_instr *I)
61 {
62 for (unsigned i = 0; i < 2; ++i) {
63 if (I->src[i].type == BI_INDEX_CONSTANT)
64 return i;
65 }
66
67 return ~0;
68 }
69
70 /* Lower MOV.i32 #constant --> IADD_IMM.i32 0x0, #constant */
71 static void
va_lower_mov_imm(bi_instr * I)72 va_lower_mov_imm(bi_instr *I)
73 {
74 assert(I->nr_srcs == 1);
75
76 if (I->src[0].type == BI_INDEX_CONSTANT) {
77 I->op = BI_OPCODE_IADD_IMM_I32;
78 I->index = I->src[0].value;
79 I->src[0] = bi_zero();
80 }
81 }
82
83 void
va_fuse_add_imm(bi_instr * I)84 va_fuse_add_imm(bi_instr *I)
85 {
86 if (I->op == BI_OPCODE_MOV_I32) {
87 va_lower_mov_imm(I);
88 return;
89 }
90
91 enum bi_opcode op = va_op_add_imm(I->op);
92 if (!op)
93 return;
94
95 unsigned s = va_choose_imm(I);
96 if (s > 1)
97 return;
98 if (!va_is_add_imm(I, 1 - s))
99 return;
100
101 I->op = op;
102 I->index = bi_apply_swizzle(I->src[s].value, I->src[s].swizzle);
103
104 assert(!I->src[s].abs && "redundant .abs set");
105
106 /* If the constant is negated, flip the sign bit */
107 if (I->src[s].neg) {
108 if (I->op == BI_OPCODE_FADD_IMM_F32)
109 I->index ^= (1u << 31);
110 else if (I->op == BI_OPCODE_FADD_IMM_V2F16)
111 I->index ^= (1u << 31) | (1u << 15);
112 else
113 unreachable("unexpected .neg");
114 }
115
116 I->src[0] = I->src[1 - s];
117 bi_drop_srcs(I, 1);
118 }
119
120 void
va_optimize(bi_context * ctx)121 va_optimize(bi_context *ctx)
122 {
123 bi_foreach_instr_global(ctx, I) {
124 va_fuse_add_imm(I);
125 }
126 }
127