1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_FUSED_IR_EMITTER_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_FUSED_IR_EMITTER_H_ 18 19 #include <utility> 20 21 #include "absl/container/flat_hash_map.h" 22 #include "llvm/IR/Value.h" 23 #include "tensorflow/compiler/xla/service/elemental_ir_emitter.h" 24 #include "tensorflow/compiler/xla/service/hlo_instruction.h" 25 #include "tensorflow/compiler/xla/statusor.h" 26 27 namespace xla { 28 29 // FusedIrEmitter is used to generate code for fusion nodes. 30 // 31 // Unlike IrEmitter and its ilk, which directly create LLVM IR in an LLVM 32 // Module, FusedIrEmitter is better understood as "IR generator generator". 33 // FusedIrEmitter recursively creates a generator (a host function) which the 34 // compiler can invoke at a later time. Invoking the generator emits LLVM IR 35 // that, when run, produces the value at a particular index of the output. 36 // 37 // After building this generator, the compiler creates a loop (or its moral 38 // equivalent, e.g. a GPU kernel) and calls the generator from within the loop. 39 // This generates code that produces each element of the output. 40 // 41 // This class handles both vanilla fusion and multi-output fusion. In the MOF 42 // case, the fusion node ends with a kTuple instruction, and the generator 43 // created produces an LLVM struct with N elements, one for each element of the 44 // arrays in the tuple. It follows that the arrays in the tuple must have the 45 // same length. 46 class FusedIrEmitter { 47 public: 48 using IndexedGenerator = llvm_ir::ElementGenerator; 49 FusedIrEmitter(ElementalIrEmitter & elemental_emitter)50 explicit FusedIrEmitter(ElementalIrEmitter& elemental_emitter) 51 : elemental_emitter_(elemental_emitter) {} 52 BindGenerator(const HloInstruction & instruction,llvm_ir::ElementGenerator generator)53 void BindGenerator(const HloInstruction& instruction, 54 llvm_ir::ElementGenerator generator) { 55 indexed_generators_[&instruction] = std::move(generator); 56 } 57 58 // Returns the generator function for the given instruction. 59 StatusOr<IndexedGenerator> GetGenerator(const HloInstruction& instruction); 60 61 // Evaluates whether fusing 'producer' into 'consumer' might cause exponential 62 // behavior in FusedIrEmitter. We currently can have exponential time/memory 63 // requirements for emitting certain fusion kernels, in which case we don't 64 // want to fuse. 65 // TODO(b/119692968): Remove this once we have fixed our fusion emitter. 66 static bool IsFusedIrEmitterInefficient(const HloInstruction& consumer, 67 const HloInstruction& producer); 68 69 private: 70 StatusOr<IndexedGenerator> CreateGenerator(const HloInstruction& instruction); 71 StatusOr<IndexedGenerator> DefaultAction(const HloInstruction& instruction); 72 IndexedGenerator HandleConstant(const HloInstruction& constant); 73 StatusOr<IndexedGenerator> HandleTuple(const HloInstruction& tuple); 74 75 ElementalIrEmitter& elemental_emitter_; 76 77 // Map from instructions to functions that generate code for the output 78 // elements. If an instruction is a GetTupleElement instruction, the 79 // instruction produces non-tuple result. 80 absl::flat_hash_map<const HloInstruction*, IndexedGenerator> 81 indexed_generators_; 82 83 // Cache of generated values, lest we regenerate an element of a node with 84 // multiple outgoing edges. 85 // Use instruction and index values as the key. 86 using ValueCacheKey = 87 std::pair<const HloInstruction*, std::vector<llvm::Value*>>; 88 absl::flat_hash_map<ValueCacheKey, llvm::Value*> value_cache_; 89 }; 90 91 } // namespace xla 92 93 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_FUSED_IR_EMITTER_H_ 94