xref: /aosp_15_r20/external/mesa3d/src/amd/compiler/aco_opt_value_numbering.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "aco_ir.h"
8 #include "aco_util.h"
9 
10 #include <unordered_map>
11 #include <vector>
12 
13 /*
14  * Implements the algorithm for dominator-tree value numbering
15  * from "Value Numbering" by Briggs, Cooper, and Simpson.
16  */
17 
18 namespace aco {
19 namespace {
20 
21 inline uint32_t
murmur_32_scramble(uint32_t h,uint32_t k)22 murmur_32_scramble(uint32_t h, uint32_t k)
23 {
24    k *= 0xcc9e2d51;
25    k = (k << 15) | (k >> 17);
26    h ^= k * 0x1b873593;
27    h = (h << 13) | (h >> 19);
28    h = h * 5 + 0xe6546b64;
29    return h;
30 }
31 
32 struct InstrHash {
33    /* This hash function uses the Murmur3 algorithm written by Austin Appleby
34     * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
35     *
36     * In order to calculate the expression set, only the right-hand-side of an
37     * instruction is used for the hash, i.e. everything except the definitions.
38     */
operator ()aco::__anon68d814cd0111::InstrHash39    std::size_t operator()(Instruction* instr) const
40    {
41       uint32_t hash = uint32_t(instr->format) << 16 | uint32_t(instr->opcode);
42 
43       for (const Operand& op : instr->operands)
44          hash = murmur_32_scramble(hash, op.constantValue());
45 
46       size_t data_size = get_instr_data_size(instr->format);
47 
48       /* skip format, opcode and pass_flags and op/def spans */
49       for (unsigned i = sizeof(Instruction) >> 2; i < (data_size >> 2); i++) {
50          uint32_t u;
51          /* Accesses it though a byte array, so doesn't violate the strict aliasing rule */
52          memcpy(&u, reinterpret_cast<uint8_t*>(instr) + i * 4, 4);
53          hash = murmur_32_scramble(hash, u);
54       }
55 
56       /* Finalize. */
57       uint32_t len = instr->operands.size() + instr->definitions.size();
58       hash ^= len;
59       hash ^= hash >> 16;
60       hash *= 0x85ebca6b;
61       hash ^= hash >> 13;
62       hash *= 0xc2b2ae35;
63       hash ^= hash >> 16;
64       return hash;
65    }
66 };
67 
68 struct InstrPred {
operator ()aco::__anon68d814cd0111::InstrPred69    bool operator()(Instruction* a, Instruction* b) const
70    {
71       if (a->format != b->format)
72          return false;
73       if (a->opcode != b->opcode)
74          return false;
75       if (a->operands.size() != b->operands.size() ||
76           a->definitions.size() != b->definitions.size())
77          return false; /* possible with pseudo-instructions */
78       for (unsigned i = 0; i < a->operands.size(); i++) {
79          if (a->operands[i].isConstant()) {
80             if (!b->operands[i].isConstant())
81                return false;
82             if (a->operands[i].constantValue() != b->operands[i].constantValue())
83                return false;
84          } else if (a->operands[i].isTemp()) {
85             if (!b->operands[i].isTemp())
86                return false;
87             if (a->operands[i].tempId() != b->operands[i].tempId())
88                return false;
89          } else if (a->operands[i].isUndefined() ^ b->operands[i].isUndefined())
90             return false;
91          if (a->operands[i].isFixed()) {
92             if (!b->operands[i].isFixed())
93                return false;
94             if (a->operands[i].physReg() != b->operands[i].physReg())
95                return false;
96             if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags)
97                return false;
98          }
99       }
100       for (unsigned i = 0; i < a->definitions.size(); i++) {
101          if (a->definitions[i].isTemp()) {
102             if (!b->definitions[i].isTemp())
103                return false;
104             if (a->definitions[i].regClass() != b->definitions[i].regClass())
105                return false;
106          }
107          if (a->definitions[i].isFixed()) {
108             if (!b->definitions[i].isFixed())
109                return false;
110             if (a->definitions[i].physReg() != b->definitions[i].physReg())
111                return false;
112             if (a->definitions[i].physReg() == exec)
113                return false;
114          }
115       }
116 
117       if (a->isVALU()) {
118          VALU_instruction& aV = a->valu();
119          VALU_instruction& bV = b->valu();
120          if (aV.abs != bV.abs || aV.neg != bV.neg || aV.clamp != bV.clamp || aV.omod != bV.omod ||
121              aV.opsel != bV.opsel || aV.opsel_lo != bV.opsel_lo || aV.opsel_hi != bV.opsel_hi)
122             return false;
123 
124          if (a->opcode == aco_opcode::v_permlane16_b32 ||
125              a->opcode == aco_opcode::v_permlanex16_b32 ||
126              a->opcode == aco_opcode::v_permlane64_b32 ||
127              a->opcode == aco_opcode::v_readfirstlane_b32)
128             return aV.pass_flags == bV.pass_flags;
129       }
130       if (a->isDPP16()) {
131          DPP16_instruction& aDPP = a->dpp16();
132          DPP16_instruction& bDPP = b->dpp16();
133          return aDPP.pass_flags == bDPP.pass_flags && aDPP.dpp_ctrl == bDPP.dpp_ctrl &&
134                 aDPP.bank_mask == bDPP.bank_mask && aDPP.row_mask == bDPP.row_mask &&
135                 aDPP.bound_ctrl == bDPP.bound_ctrl && aDPP.fetch_inactive == bDPP.fetch_inactive;
136       }
137       if (a->isDPP8()) {
138          DPP8_instruction& aDPP = a->dpp8();
139          DPP8_instruction& bDPP = b->dpp8();
140          return aDPP.pass_flags == bDPP.pass_flags && aDPP.lane_sel == bDPP.lane_sel &&
141                 aDPP.fetch_inactive == bDPP.fetch_inactive;
142       }
143       if (a->isSDWA()) {
144          SDWA_instruction& aSDWA = a->sdwa();
145          SDWA_instruction& bSDWA = b->sdwa();
146          return aSDWA.sel[0] == bSDWA.sel[0] && aSDWA.sel[1] == bSDWA.sel[1] &&
147                 aSDWA.dst_sel == bSDWA.dst_sel;
148       }
149 
150       switch (a->format) {
151       case Format::SOP1: {
152          if (a->opcode == aco_opcode::s_sendmsg_rtn_b32 ||
153              a->opcode == aco_opcode::s_sendmsg_rtn_b64)
154             return false;
155          return true;
156       }
157       case Format::SOPK: {
158          if (a->opcode == aco_opcode::s_getreg_b32)
159             return false;
160          SALU_instruction& aK = a->salu();
161          SALU_instruction& bK = b->salu();
162          return aK.imm == bK.imm;
163       }
164       case Format::SMEM: {
165          SMEM_instruction& aS = a->smem();
166          SMEM_instruction& bS = b->smem();
167          return aS.sync == bS.sync && aS.cache.value == bS.cache.value;
168       }
169       case Format::VINTRP: {
170          VINTRP_instruction& aI = a->vintrp();
171          VINTRP_instruction& bI = b->vintrp();
172          return aI.attribute == bI.attribute && aI.component == bI.component &&
173                 aI.high_16bits == bI.high_16bits;
174       }
175       case Format::VINTERP_INREG: {
176          VINTERP_inreg_instruction& aI = a->vinterp_inreg();
177          VINTERP_inreg_instruction& bI = b->vinterp_inreg();
178          return aI.wait_exp == bI.wait_exp;
179       }
180       case Format::PSEUDO_REDUCTION: {
181          Pseudo_reduction_instruction& aR = a->reduction();
182          Pseudo_reduction_instruction& bR = b->reduction();
183          return aR.pass_flags == bR.pass_flags && aR.reduce_op == bR.reduce_op &&
184                 aR.cluster_size == bR.cluster_size;
185       }
186       case Format::DS: {
187          assert(a->opcode == aco_opcode::ds_bpermute_b32 ||
188                 a->opcode == aco_opcode::ds_permute_b32 || a->opcode == aco_opcode::ds_swizzle_b32);
189          DS_instruction& aD = a->ds();
190          DS_instruction& bD = b->ds();
191          return aD.sync == bD.sync && aD.pass_flags == bD.pass_flags && aD.gds == bD.gds &&
192                 aD.offset0 == bD.offset0 && aD.offset1 == bD.offset1;
193       }
194       case Format::LDSDIR: {
195          LDSDIR_instruction& aD = a->ldsdir();
196          LDSDIR_instruction& bD = b->ldsdir();
197          return aD.sync == bD.sync && aD.attr == bD.attr && aD.attr_chan == bD.attr_chan &&
198                 aD.wait_vdst == bD.wait_vdst;
199       }
200       case Format::MTBUF: {
201          MTBUF_instruction& aM = a->mtbuf();
202          MTBUF_instruction& bM = b->mtbuf();
203          return aM.sync == bM.sync && aM.dfmt == bM.dfmt && aM.nfmt == bM.nfmt &&
204                 aM.offset == bM.offset && aM.offen == bM.offen && aM.idxen == bM.idxen &&
205                 aM.cache.value == bM.cache.value && aM.tfe == bM.tfe &&
206                 aM.disable_wqm == bM.disable_wqm;
207       }
208       case Format::MUBUF: {
209          MUBUF_instruction& aM = a->mubuf();
210          MUBUF_instruction& bM = b->mubuf();
211          return aM.sync == bM.sync && aM.offset == bM.offset && aM.offen == bM.offen &&
212                 aM.idxen == bM.idxen && aM.cache.value == bM.cache.value && aM.tfe == bM.tfe &&
213                 aM.lds == bM.lds && aM.disable_wqm == bM.disable_wqm;
214       }
215       case Format::MIMG: {
216          MIMG_instruction& aM = a->mimg();
217          MIMG_instruction& bM = b->mimg();
218          return aM.sync == bM.sync && aM.dmask == bM.dmask && aM.unrm == bM.unrm &&
219                 aM.cache.value == bM.cache.value && aM.tfe == bM.tfe && aM.da == bM.da &&
220                 aM.lwe == bM.lwe && aM.r128 == bM.r128 && aM.a16 == bM.a16 && aM.d16 == bM.d16 &&
221                 aM.disable_wqm == bM.disable_wqm;
222       }
223       case Format::FLAT:
224       case Format::GLOBAL:
225       case Format::SCRATCH:
226       case Format::EXP:
227       case Format::SOPP:
228       case Format::PSEUDO_BRANCH:
229       case Format::PSEUDO_BARRIER: unreachable("unsupported instruction format");
230       default: return true;
231       }
232    }
233 };
234 
235 using expr_set = aco::unordered_map<Instruction*, uint32_t, InstrHash, InstrPred>;
236 
237 struct vn_ctx {
238    Program* program;
239    monotonic_buffer_resource m;
240    expr_set expr_values;
241    aco::unordered_map<uint32_t, Temp> renames;
242 
243    /* The exec id should be the same on the same level of control flow depth.
244     * Together with the check for dominator relations, it is safe to assume
245     * that the same exec_id also means the same execution mask.
246     * Discards increment the exec_id, so that it won't return to the previous value.
247     */
248    uint32_t exec_id = 1;
249 
vn_ctxaco::__anon68d814cd0111::vn_ctx250    vn_ctx(Program* program_) : program(program_), m(), expr_values(m), renames(m)
251    {
252       static_assert(sizeof(Temp) == 4, "Temp must fit in 32bits");
253       unsigned size = 0;
254       for (Block& block : program->blocks)
255          size += block.instructions.size();
256       expr_values.reserve(size);
257    }
258 };
259 
260 /* dominates() returns true if the parent block dominates the child block and
261  * if the parent block is part of the same loop or has a smaller loop nest depth.
262  */
263 bool
dominates(vn_ctx & ctx,uint32_t parent,uint32_t child)264 dominates(vn_ctx& ctx, uint32_t parent, uint32_t child)
265 {
266    Block& parent_b = ctx.program->blocks[parent];
267    Block& child_b = ctx.program->blocks[child];
268    if (!dominates_logical(parent_b, child_b) || parent_b.loop_nest_depth > child_b.loop_nest_depth)
269       return false;
270    if (parent_b.loop_nest_depth == child_b.loop_nest_depth && parent_b.loop_nest_depth == 0)
271       return true;
272 
273    unsigned parent_loop_nest_depth = ctx.program->blocks[parent].loop_nest_depth;
274    while (parent < child && parent_loop_nest_depth <= ctx.program->blocks[child].loop_nest_depth)
275       child = ctx.program->blocks[child].logical_idom;
276 
277    return parent == child;
278 }
279 
280 /** Returns whether this instruction can safely be removed
281  *  and replaced by an equal expression.
282  *  This is in particular true for ALU instructions and
283  *  read-only memory instructions.
284  *
285  *  Note that expr_set must not be used with instructions
286  *  which cannot be eliminated.
287  */
288 bool
can_eliminate(aco_ptr<Instruction> & instr)289 can_eliminate(aco_ptr<Instruction>& instr)
290 {
291    switch (instr->format) {
292    case Format::FLAT:
293    case Format::GLOBAL:
294    case Format::SCRATCH:
295    case Format::EXP:
296    case Format::SOPP:
297    case Format::PSEUDO_BRANCH:
298    case Format::PSEUDO_BARRIER: return false;
299    case Format::DS:
300       return instr->opcode == aco_opcode::ds_bpermute_b32 ||
301              instr->opcode == aco_opcode::ds_permute_b32 ||
302              instr->opcode == aco_opcode::ds_swizzle_b32;
303    case Format::SMEM:
304    case Format::MUBUF:
305    case Format::MIMG:
306    case Format::MTBUF:
307       if (!get_sync_info(instr.get()).can_reorder())
308          return false;
309       break;
310    default: break;
311    }
312 
313    if (instr->definitions.empty() || instr->opcode == aco_opcode::p_phi ||
314        instr->opcode == aco_opcode::p_linear_phi ||
315        instr->opcode == aco_opcode::p_pops_gfx9_add_exiting_wave_id ||
316        instr->definitions[0].isNoCSE())
317       return false;
318 
319    return true;
320 }
321 
322 bool
is_trivial_phi(Block & block,Instruction * instr)323 is_trivial_phi(Block& block, Instruction* instr)
324 {
325    if (!is_phi(instr))
326       return false;
327 
328    /* Logical LCSSA phis must be kept in order to prevent the optimizer
329     * from doing invalid transformations. */
330    if (instr->opcode == aco_opcode::p_phi && (block.kind & block_kind_loop_exit))
331       return false;
332 
333    return std::all_of(instr->operands.begin(), instr->operands.end(),
334                       [&](Operand& op) { return op == instr->operands[0]; });
335 }
336 
337 void
process_block(vn_ctx & ctx,Block & block)338 process_block(vn_ctx& ctx, Block& block)
339 {
340    std::vector<aco_ptr<Instruction>> new_instructions;
341    new_instructions.reserve(block.instructions.size());
342 
343    for (aco_ptr<Instruction>& instr : block.instructions) {
344       /* first, rename operands */
345       for (Operand& op : instr->operands) {
346          if (!op.isTemp())
347             continue;
348          auto it = ctx.renames.find(op.tempId());
349          if (it != ctx.renames.end())
350             op.setTemp(it->second);
351       }
352 
353       if (instr->opcode == aco_opcode::p_discard_if ||
354           instr->opcode == aco_opcode::p_demote_to_helper || instr->opcode == aco_opcode::p_end_wqm)
355          ctx.exec_id++;
356 
357       /* simple copy-propagation through renaming */
358       bool copy_instr =
359          is_trivial_phi(block, instr.get()) || instr->opcode == aco_opcode::p_parallelcopy ||
360          (instr->opcode == aco_opcode::p_create_vector && instr->operands.size() == 1);
361       if (copy_instr && !instr->definitions[0].isFixed() && instr->operands[0].isTemp() &&
362           instr->operands[0].regClass() == instr->definitions[0].regClass()) {
363          ctx.renames[instr->definitions[0].tempId()] = instr->operands[0].getTemp();
364          continue;
365       }
366 
367       if (!can_eliminate(instr)) {
368          new_instructions.emplace_back(std::move(instr));
369          continue;
370       }
371 
372       instr->pass_flags = ctx.exec_id;
373       std::pair<expr_set::iterator, bool> res = ctx.expr_values.emplace(instr.get(), block.index);
374 
375       /* if there was already an expression with the same value number */
376       if (!res.second) {
377          Instruction* orig_instr = res.first->first;
378          assert(instr->definitions.size() == orig_instr->definitions.size());
379          /* check if the original instruction dominates the current one */
380          if (dominates(ctx, res.first->second, block.index) &&
381              ctx.program->blocks[res.first->second].fp_mode.canReplace(block.fp_mode)) {
382             for (unsigned i = 0; i < instr->definitions.size(); i++) {
383                assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass());
384                assert(instr->definitions[i].isTemp());
385                ctx.renames[instr->definitions[i].tempId()] = orig_instr->definitions[i].getTemp();
386                if (instr->definitions[i].isPrecise())
387                   orig_instr->definitions[i].setPrecise(true);
388                if (instr->definitions[i].isSZPreserve())
389                   orig_instr->definitions[i].setSZPreserve(true);
390                if (instr->definitions[i].isInfPreserve())
391                   orig_instr->definitions[i].setInfPreserve(true);
392                if (instr->definitions[i].isNaNPreserve())
393                   orig_instr->definitions[i].setNaNPreserve(true);
394                /* SPIR_V spec says that an instruction marked with NUW wrapping
395                 * around is undefined behaviour, so we can break additions in
396                 * other contexts.
397                 */
398                if (instr->definitions[i].isNUW())
399                   orig_instr->definitions[i].setNUW(true);
400             }
401          } else {
402             ctx.expr_values.erase(res.first);
403             ctx.expr_values.emplace(instr.get(), block.index);
404             new_instructions.emplace_back(std::move(instr));
405          }
406       } else {
407          new_instructions.emplace_back(std::move(instr));
408       }
409    }
410 
411    block.instructions = std::move(new_instructions);
412 }
413 
414 void
rename_phi_operands(Block & block,aco::unordered_map<uint32_t,Temp> & renames)415 rename_phi_operands(Block& block, aco::unordered_map<uint32_t, Temp>& renames)
416 {
417    for (aco_ptr<Instruction>& phi : block.instructions) {
418       if (!is_phi(phi))
419          break;
420 
421       for (Operand& op : phi->operands) {
422          if (!op.isTemp())
423             continue;
424          auto it = renames.find(op.tempId());
425          if (it != renames.end())
426             op.setTemp(it->second);
427       }
428    }
429 }
430 } /* end namespace */
431 
432 void
value_numbering(Program * program)433 value_numbering(Program* program)
434 {
435    vn_ctx ctx(program);
436    std::vector<unsigned> loop_headers;
437 
438    for (Block& block : program->blocks) {
439       assert(ctx.exec_id > 0);
440       /* decrement exec_id when leaving nested control flow */
441       if (block.kind & block_kind_loop_header)
442          loop_headers.push_back(block.index);
443       if (block.kind & block_kind_merge) {
444          ctx.exec_id--;
445       } else if (block.kind & block_kind_loop_exit) {
446          ctx.exec_id -= program->blocks[loop_headers.back()].linear_preds.size();
447          ctx.exec_id -= block.linear_preds.size();
448          loop_headers.pop_back();
449       }
450 
451       if (block.logical_idom == (int)block.index)
452          ctx.expr_values.clear();
453 
454       if (block.logical_idom != -1)
455          process_block(ctx, block);
456       else
457          rename_phi_operands(block, ctx.renames);
458 
459       /* increment exec_id when entering nested control flow */
460       if (block.kind & block_kind_branch || block.kind & block_kind_loop_preheader ||
461           block.kind & block_kind_break || block.kind & block_kind_continue)
462          ctx.exec_id++;
463       else if (block.kind & block_kind_continue_or_break)
464          ctx.exec_id += 2;
465    }
466 
467    /* rename loop header phi operands */
468    for (Block& block : program->blocks) {
469       if (block.kind & block_kind_loop_header)
470          rename_phi_operands(block, ctx.renames);
471    }
472 }
473 
474 } // namespace aco
475