xref: /aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/precompiled_kernels.h (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_PRECOMPILED_KERNELS_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_PRECOMPILED_KERNELS_H_
18 
19 #include "tensorflow/compiler/xla/status.h"
20 #include "tensorflow/compiler/xla/types.h"
21 #include "tensorflow/stream_executor/device_memory.h"
22 #include "tensorflow/stream_executor/gpu/gpu_asm_opts.h"
23 #include "tensorflow/stream_executor/stream.h"
24 
25 // Hardcoded GPU kernels for various simple tasks.
26 
27 namespace xla {
28 namespace gpu {
29 
30 // In GPU memory, does
31 //
32 //   char* base_ptr = ...;
33 //   void* ptrs_out = ...;
34 //   for (i = 0; i < n; i++) {
35 //     ptrs_out[i] = base_ptr + i * stride;
36 //   }
37 //
38 // This is useful for functions like cublasTrsmBatched that operate on an array
39 // of pointers in GPU memory.  In XLA these aren't usually arbitrary pointers
40 // but rather are all contiguous values.
41 //
42 // Instead of using a kernel, a simpler way of doing this would be to create
43 // this buffer on the host and then copy it to device.  But using a kernel
44 // instead of an H2D copy avoids a few performance pitfalls.
45 //
46 //  - Only one H2D copy can run on a given GPU at a time.  If there's already
47 //    a copy ongoing as part of other work on the GPU, our copy here will
48 //    block.  In contrast, multiple kernels can run simultaneously.
49 //
50 //  - H2D copies from CUDA unpinned memory can acquire a global lock in the
51 //    driver and slow down *all* work on the GPU.  So to do this right, we'd
52 //    need to allocate the host memory as pinned, one alloc per stream.  Then
53 //    we'd need to manage this memory without leaks.  This becomes complex!
54 Status MakeBatchPointers(se::Stream* stream, const se::GpuAsmOpts& asm_opts,
55                          se::DeviceMemoryBase base_ptr, int stride_bytes, int n,
56                          se::DeviceMemoryBase ptrs_out);
57 
58 }  // namespace gpu
59 }  // namespace xla
60 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_PRECOMPILED_KERNELS_H_
61