1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_ 17 #define TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_ 18 19 #include <functional> 20 21 #include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h" 22 #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project 23 #include "mlir/IR/BuiltinOps.h" // from @llvm-project 24 #include "tensorflow/core/common_runtime/function_optimization_registry.h" 25 #include "tensorflow/core/common_runtime/optimization_registry.h" 26 27 namespace tensorflow { 28 29 // -------------------------------------------------------------------------- // 30 // MLIR passes running on Tensorflow function graphs (Tensorflow V2). 31 // -------------------------------------------------------------------------- // 32 33 // Disabled - skip execution of the pass. 34 // Enabled - execute the pass, propagate errors to the caller if any. 35 // FallbackEnabled - execute the pass and commit all the changes to the MLIR 36 // module in case of success. Do not commit any changes in case of failures, 37 // let the rest of the pipeline run. 38 enum class MlirOptimizationPassState { Disabled, Enabled, FallbackEnabled }; 39 40 // An API for registering MLIR ModulePass with the Tensorflow runtime. These 41 // passes are running only for function graphs built by Tensorflow V2 and 42 // instantiated by the process_function_library_runtime (see 43 // FunctionOptimizationPass for details). 44 class MlirOptimizationPass { 45 public: 46 virtual ~MlirOptimizationPass() = default; 47 virtual llvm::StringRef name() const = 0; 48 49 // Returns an enum value: 50 // Enabled if the pass is enabled for the given graph with specified config. 51 // Disabled if the pass is disabled. 52 // FallbackEnabled if the pass needs to be executed in fallback mode. 53 // 54 // When the pass is FallbackEnabled, the pass is executed and the changes it 55 // makes to the MLIR module will be committed only if the pass was successful, 56 // otherwise no changes are committed and the rest of the pipeline is run. 57 // 58 // `device_set` can be nullptr if the devices information is not 59 // available or no device specific filtering is required. 60 // `function_library` contains function definitions for function calls in 61 // `graph` not included in the `graph` FunctionLibraryDefinition. 62 virtual MlirOptimizationPassState GetPassState( 63 const DeviceSet* device_set, const ConfigProto& config_proto, 64 const Graph& graph, 65 const FunctionLibraryDefinition& function_library) const = 0; 66 67 virtual Status Run(const ConfigProto& config_proto, mlir::ModuleOp module, 68 const Graph& graph, 69 const FunctionLibraryDefinition& function_library) = 0; 70 }; 71 72 class MlirOptimizationPassRegistry { 73 public: 74 struct PassRegistration { 75 int priority; 76 std::unique_ptr<MlirOptimizationPass> pass; 77 }; 78 79 struct PriorityComparator { operatorPriorityComparator80 bool operator()(const PassRegistration& x, 81 const PassRegistration& y) const { 82 return x.priority < y.priority; 83 } 84 }; 85 86 using Passes = std::set<PassRegistration, PriorityComparator>; 87 88 // Returns the global registry of MLIR optimization passes. 89 static MlirOptimizationPassRegistry& Global(); 90 91 // Register optimization `pass` with the given `priority`. Add(int priority,std::unique_ptr<MlirOptimizationPass> pass)92 void Add(int priority, std::unique_ptr<MlirOptimizationPass> pass) { 93 auto inserted = passes_.insert({priority, std::move(pass)}); 94 CHECK(inserted.second) 95 << "Pass priority must be unique. " 96 << "Previously registered pass with the same priority: " 97 << inserted.first->pass->name().str(); 98 } 99 100 // Free the memory allocated for all passes. ClearPasses()101 void ClearPasses() { passes_.clear(); } 102 passes()103 const Passes& passes() const { return passes_; } 104 105 private: 106 Passes passes_; 107 }; 108 109 // Function optimization pass that runs all MLIR passes registered in 110 // MlirOptimizationPassRegistry. 111 class MlirFunctionOptimizationPass : public FunctionOptimizationPass { 112 public: 113 explicit MlirFunctionOptimizationPass( 114 const MlirOptimizationPassRegistry* registry = 115 &MlirOptimizationPassRegistry::Global()) registry_(registry)116 : registry_(registry) {} 117 118 // Executes all of the underlying registered MlirOptimizationPasses. 119 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, 120 std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def, 121 std::vector<std::string>* control_ret_node_names, 122 bool* control_rets_updated) override; 123 124 private: 125 const MlirOptimizationPassRegistry* registry_; 126 }; 127 128 // -------------------------------------------------------------------------- // 129 // MLIR passes running on Tensorflow V1 graphs. 130 // -------------------------------------------------------------------------- // 131 132 // An API for registering MLIR ModulePass with the Tensorflow runtime. These 133 // passes are running only for V1 graphs (legacy graphs) executed via Session 134 // runtime. Graph importer updates legacy graph behavior to V2 constructs (e.g. 135 // it raises control flow from Switch/Merge nodes to functional control flow 136 // with If/While operations). 137 class MlirV1CompatOptimizationPass { 138 public: 139 virtual ~MlirV1CompatOptimizationPass() = default; 140 virtual llvm::StringRef name() const = 0; 141 142 // Returns a MlirOptimizationPassState based on the given graph and 143 // config. See comments on `MlirOptimizationPassState` enum for more info 144 // on exact values. 145 virtual MlirOptimizationPassState GetPassState( 146 const DeviceSet* device_set, const ConfigProto& config_proto, 147 const Graph& graph, 148 const FunctionLibraryDefinition& function_library) const = 0; 149 150 virtual Status Run(const GraphOptimizationPassOptions& options, 151 mlir::ModuleOp module) = 0; 152 }; 153 154 class MlirV1CompatOptimizationPassRegistry { 155 public: 156 // Returns the global registry of MLIR optimization passes. 157 static MlirV1CompatOptimizationPassRegistry& Global(); 158 Add(std::unique_ptr<MlirV1CompatOptimizationPass> pass)159 void Add(std::unique_ptr<MlirV1CompatOptimizationPass> pass) { 160 CHECK(pass_ == nullptr) << "Only a single pass can be registered"; 161 pass_ = std::move(pass); 162 } 163 pass()164 MlirV1CompatOptimizationPass* pass() const { 165 return pass_ ? pass_.get() : nullptr; 166 } 167 168 private: 169 std::unique_ptr<MlirV1CompatOptimizationPass> pass_{}; 170 }; 171 172 class MlirV1CompatGraphOptimizationPass : public GraphOptimizationPass { 173 public: 174 explicit MlirV1CompatGraphOptimizationPass( 175 const MlirV1CompatOptimizationPassRegistry* registry = 176 &MlirV1CompatOptimizationPassRegistry::Global()) registry_(registry)177 : registry_(registry) {} 178 179 Status Run(const GraphOptimizationPassOptions& options) override; 180 181 private: 182 const MlirV1CompatOptimizationPassRegistry* registry_; 183 }; 184 185 // -------------------------------------------------------------------------- // 186 // Helper classes for static registration of MLIR (V1 Compat) passes in the 187 // corresponding registry. 188 // -------------------------------------------------------------------------- // 189 190 namespace mlir_pass_registration { 191 192 class MlirOptimizationPassRegistration { 193 public: MlirOptimizationPassRegistration(int priority,std::unique_ptr<MlirOptimizationPass> pass)194 explicit MlirOptimizationPassRegistration( 195 int priority, std::unique_ptr<MlirOptimizationPass> pass) { 196 MlirOptimizationPassRegistry::Global().Add(priority, std::move(pass)); 197 } 198 }; 199 200 class MlirV1CompatOptimizationPassRegistration { 201 public: MlirV1CompatOptimizationPassRegistration(std::unique_ptr<MlirV1CompatOptimizationPass> pass)202 explicit MlirV1CompatOptimizationPassRegistration( 203 std::unique_ptr<MlirV1CompatOptimizationPass> pass) { 204 MlirV1CompatOptimizationPassRegistry::Global().Add(std::move(pass)); 205 } 206 }; 207 208 } // namespace mlir_pass_registration 209 210 } // namespace tensorflow 211 212 #endif // TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_ 213