1 /* Copyright 2022 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TRIANGULAR_SOLVE_REWRITER_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TRIANGULAR_SOLVE_REWRITER_H_ 18 19 #include "absl/strings/string_view.h" 20 #include "tensorflow/compiler/xla/service/hlo_module.h" 21 #include "tensorflow/compiler/xla/service/hlo_pass_interface.h" 22 #include "tensorflow/compiler/xla/statusor.h" 23 24 namespace xla { 25 namespace gpu { 26 27 // Rewrites HLO TriangularSolve ops into a custom-call. 28 // 29 // The motivation for this is that we need to add temp memory to batched 30 // triangular-solve ops in order to call cublas trsmBatched. We rewrite batch 1 31 // ops as well so that we have fewer codepaths to worry about in the backend. 32 // 33 // cublas trsmBatched takes arrays in GPU memory of pointers to the inputs and 34 // outputs, `a` and `b`. In XLA the inputs/outputs are always contiguous, but 35 // we still have to materialize out these arrays. 36 // 37 // We use the same trick as for cudnn convolutions: This custom-call returns a 38 // tuple (actual-result, temp-memory). In this our case the temp buffer always 39 // has size 2 * sizeof(void*) * batch_size, because we need two arrays of 40 // pointers. 41 // 42 // The custom-call has a backend-config equal to the TriangularSolveOptions 43 // object. 44 class TriangularSolveRewriter : public HloModulePass { 45 public: name()46 absl::string_view name() const override { 47 return "triangular-solve-rewriter"; 48 } 49 50 using HloPassInterface::Run; 51 StatusOr<bool> Run( 52 HloModule* module, 53 const absl::flat_hash_set<absl::string_view>& execution_threads) override; 54 }; 55 56 } // namespace gpu 57 } // namespace xla 58 59 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TRIANGULAR_SOLVE_REWRITER_H_ 60