1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/service/compile_only_service.h"
17
18 #include <string>
19 #include <utility>
20 #include <vector>
21
22 #include "absl/strings/str_cat.h"
23 #include "tensorflow/compiler/xla/debug_options_flags.h"
24 #include "tensorflow/compiler/xla/service/backend.h"
25 #include "tensorflow/compiler/xla/service/computation_layout.h"
26 #include "tensorflow/compiler/xla/service/dump.h"
27 #include "tensorflow/compiler/xla/service/platform_util.h"
28 #include "tensorflow/compiler/xla/status_macros.h"
29 #include "tensorflow/compiler/xla/types.h"
30 #include "tensorflow/compiler/xla/util.h"
31 #include "tensorflow/core/platform/logging.h"
32 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
33
34 namespace xla {
35
36 /* static */ StatusOr<std::unique_ptr<CompileOnlyService>>
NewService(se::Platform * platform)37 CompileOnlyService::NewService(se::Platform* platform) {
38 ServiceOptions default_options;
39 default_options.set_platform(platform);
40 return NewService(default_options);
41 }
42
43 /* static */ StatusOr<std::unique_ptr<CompileOnlyService>>
NewService(const ServiceOptions & options)44 CompileOnlyService::NewService(const ServiceOptions& options) {
45 se::Platform* platform = options.platform();
46 if (platform == nullptr) {
47 TF_ASSIGN_OR_RETURN(platform, PlatformUtil::GetDefaultPlatform());
48 }
49
50 TF_ASSIGN_OR_RETURN(auto compiler, Compiler::GetForPlatform(platform));
51
52 std::unique_ptr<CompileOnlyService> service(
53 new CompileOnlyService(options, compiler));
54 return std::move(service);
55 }
56
CompileOnlyService(const ServiceOptions & options,Compiler * compiler)57 CompileOnlyService::CompileOnlyService(const ServiceOptions& options,
58 Compiler* compiler)
59 : Service(options, /*execute_backend=*/nullptr), compiler_(compiler) {}
60
61 StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(absl::Span<const AotXlaComputationInstance> computations,const AotCompilationOptions & options,std::unique_ptr<AotCompilationMetadata> * metadata)62 CompileOnlyService::CompileAheadOfTime(
63 absl::Span<const AotXlaComputationInstance> computations,
64 const AotCompilationOptions& options,
65 std::unique_ptr<AotCompilationMetadata>* metadata) {
66 std::vector<std::unique_ptr<HloModule>> hlo_modules;
67
68 const DebugOptions& debug_options = options.debug_options();
69 ExecutionOptions execution_options;
70 *execution_options.mutable_debug_options() = debug_options;
71 // Capture replica_count, num_cores, and device_assignment in ExecutionOptions
72 // to later save in a proto dump.
73 if (options.replica_count() > 0) {
74 execution_options.set_num_replicas(options.replica_count());
75 if (options.has_static_device_assignment()) {
76 CHECK_EQ(options.replica_count(),
77 options.static_device_assignment().replica_count());
78 }
79 }
80 if (options.num_cores() > 0) {
81 execution_options.set_num_partitions(options.num_cores());
82 if (options.has_static_device_assignment()) {
83 CHECK_EQ(options.num_cores(),
84 options.static_device_assignment().computation_count());
85 }
86 }
87 if (options.has_static_device_assignment()) {
88 TF_RETURN_IF_ERROR(options.static_device_assignment().Serialize(
89 execution_options.mutable_device_assignment()));
90 }
91 execution_options.set_use_spmd_partitioning(options.use_spmd_partitioning());
92 execution_options.set_use_auto_spmd_partitioning(
93 options.use_auto_spmd_partitioning());
94 for (auto t : options.auto_spmd_partitioning_mesh_shape()) {
95 execution_options.mutable_auto_spmd_partitioning_mesh_shape()->Add(t);
96 }
97 for (auto t : options.auto_spmd_partitioning_mesh_ids()) {
98 execution_options.mutable_auto_spmd_partitioning_mesh_ids()->Add(t);
99 }
100 execution_options.set_deduplicate_hlo(options.deduplicate_hlo());
101 for (const AotXlaComputationInstance& instance : computations) {
102 TF_RET_CHECK(instance.computation.has_host_program_shape());
103 auto update_shape_with_empty_tiles = [this](Shape* subshape,
104 const xla::ShapeIndex& index) {
105 if (subshape->IsArray() &&
106 (!subshape->has_layout() || subshape->layout().tiles().empty())) {
107 *subshape = compiler_->DefaultDeviceShapeRepresentation(*subshape);
108 }
109 };
110 Shape result_layout(instance.result_layout);
111 ShapeUtil::ForEachMutableSubshape(&result_layout,
112 update_shape_with_empty_tiles);
113 *execution_options.mutable_shape_with_output_layout() =
114 result_layout.ToProto();
115 for (auto shape : instance.argument_layouts) {
116 ShapeUtil::ForEachMutableSubshape(const_cast<Shape*>(shape),
117 update_shape_with_empty_tiles);
118 }
119
120 TF_ASSIGN_OR_RETURN(
121 std::unique_ptr<HloModuleConfig> module_config,
122 CreateModuleConfig(
123 ProgramShape(instance.computation.host_program_shape()),
124 instance.argument_layouts, &execution_options, &options));
125
126 TF_ASSIGN_OR_RETURN(
127 std::unique_ptr<HloModule> hlo_module,
128 HloModule::CreateFromProto(instance.computation, *module_config));
129 DumpHloModuleIfEnabled(*hlo_module, "before_optimizations");
130 hlo_modules.push_back(std::move(hlo_module));
131 }
132
133 return compiler_->CompileAheadOfTime(
134 std::make_unique<HloModuleGroup>(hlo_modules[0]->name(),
135 absl::MakeSpan(hlo_modules)),
136 options, metadata);
137 }
138
139 } // namespace xla
140