xref: /aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/gpu_executable_run_options.h (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_EXECUTABLE_RUN_OPTIONS_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_EXECUTABLE_RUN_OPTIONS_H_
18 
19 #include <functional>
20 #include <optional>
21 #include <string>
22 #include <vector>
23 
24 #include "tensorflow/compiler/xla/service/global_device_id.h"
25 #include "tensorflow/compiler/xla/service/service_executable_run_options.h"
26 #include "tensorflow/compiler/xla/statusor.h"
27 #include "tensorflow/compiler/xla/types.h"
28 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
29 
30 namespace xla {
31 namespace gpu {
32 
33 // Key for naming up a particular NCCL clique.  This is just a set of unique
34 // device IDs (i.e. GPU IDs). The device IDs must be global within a cluster.
35 class NcclCliqueKey {
36  public:
37   explicit NcclCliqueKey(std::vector<GlobalDeviceId> devices);
38 
39   template <typename H>
AbslHashValue(H h,const NcclCliqueKey & k)40   friend H AbslHashValue(H h, const NcclCliqueKey& k) {
41     return H::combine(std::move(h), k.devices_);
42   }
43   friend bool operator==(const NcclCliqueKey& a, const NcclCliqueKey& b) {
44     return a.devices_ == b.devices_;
45   }
46 
devices()47   const std::vector<GlobalDeviceId>& devices() const { return devices_; }
48 
49   std::string ToString() const;
50 
51  private:
52   std::vector<GlobalDeviceId> devices_;
53 };
54 
55 using NcclUniqueIdCallback =
56     std::function<StatusOr<std::string>(const NcclCliqueKey&)>;
57 
58 // GPU-specific executable options.
59 // We keep these separate from ExecutableRunOptions to avoid adding
60 // dependencies to ExecutableRunOptions.
61 class GpuExecutableRunOptions {
62  public:
63   // Sets a mapping from local device ordinals to global device IDs.
64   // Used only on NVidia GPUs for cross-host NCCL collectives. If set, the
65   // elements of `device_assignment` are interpreted as global device IDs, not
66   // local device ordinals.
67   GpuExecutableRunOptions& set_gpu_global_device_ids(
68       std::optional<std::vector<GlobalDeviceId>> gpu_global_device_ids);
69   const std::optional<std::vector<GlobalDeviceId>>& gpu_global_device_ids()
70       const;
71 
72   // Callback that returns a ncclUniqueId encoded as a string for a group of
73   // communicating GPU devices. Used only on NVidia GPUs.
74   GpuExecutableRunOptions& set_nccl_unique_id_callback(
75       NcclUniqueIdCallback nccl_unique_id_callback);
76   const NcclUniqueIdCallback& nccl_unique_id_callback() const;
77 
78  private:
79   std::optional<std::vector<GlobalDeviceId>> gpu_global_device_ids_;
80   NcclUniqueIdCallback nccl_unique_id_callback_;
81 };
82 
83 // NCCL-related execution parameters.
84 struct NcclExecuteParams {
85   NcclExecuteParams(const ServiceExecutableRunOptions& run_options,
86                     se::Stream* stream);
87 
88   se::Stream* stream;
89   RunId run_id;
90   const DeviceAssignment* device_assn;                       // never null
91   const std::vector<GlobalDeviceId>* gpu_global_device_ids;  // may be null
92   const NcclUniqueIdCallback* nccl_unique_id_callback;       // may be null
93 
94   StatusOr<GlobalDeviceId> GetGlobalDeviceId() const;
95 };
96 
97 }  // namespace gpu
98 }  // namespace xla
99 
100 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_EXECUTABLE_RUN_OPTIONS_H_
101