xref: /aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/expansions/dtensor_op_spmd_expander.h (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_DTENSOR_MLIR_EXPANSIONS_DTENSOR_OP_SPMD_EXPANDER_H_
17 #define TENSORFLOW_DTENSOR_MLIR_EXPANSIONS_DTENSOR_OP_SPMD_EXPANDER_H_
18 
19 #include "mlir/IR/Builders.h"  // from @llvm-project
20 #include "tensorflow/dtensor/cc/dstatus.h"
21 #include "tensorflow/dtensor/mlir/spmd_expander.h"
22 
23 namespace tensorflow {
24 namespace dtensor {
25 
26 // Converts layout of input tensor to target layout inserting split or reduction
27 // ops if necessary.
28 class RelayoutSPMDExpander : public SPMDExpanderBase {
29  public:
30   StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override;
31 
32   StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
33       mlir::Operation* op,
34       const llvm::DenseMap<int, Layout>& input_layouts) override;
35 
36   StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
37       mlir::Operation* op,
38       const llvm::DenseMap<int, Layout>& output_layouts) override;
39 };
40 
41 // Lowers DTensorSend op to backend specific TF send/ xla send operation.
42 // Following is the semantics for DTensorSend/Recv.
43 // a) Both replicated/sharded DTensors can be sent/received.
44 // b) When sharded DTensor is sent to another mesh, the DTensor is first
45 //    all-to-all'ed to replicated tensor and sent to target mesh.
46 // c) Send/Recv mesh must be from or to CPU mesh. That is, TPU<->TPU or
47 //    GPU<->GTU is not supported.
48 // d) Cross host send/recv is not supported. That is, sending tensor from
49 //    TPU device of TPUWorker 0 to host of TPUWorker 1 is unsupported.
50 class DTensorSendSPMDExpander : public SPMDExpanderBase {
51  public:
52   StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override;
53 
54   StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
55       mlir::Operation* op,
56       const llvm::DenseMap<int, Layout>& input_layouts) override;
57 
58   StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
59       mlir::Operation* op,
60       const llvm::DenseMap<int, Layout>& output_layouts) override;
61 };
62 
63 // Lowers DTensorRecv op to backend specific TF recv/ xla recv operation.
64 class DTensorRecvSPMDExpander : public SPMDExpanderBase {
65  public:
66   StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override;
67 
68   StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
69       mlir::Operation* op,
70       const llvm::DenseMap<int, Layout>& input_layouts) override;
71 
72   StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
73       mlir::Operation* op,
74       const llvm::DenseMap<int, Layout>& output_layouts) override;
75 };
76 
77 }  // namespace dtensor
78 }  // namespace tensorflow
79 
80 #endif  // TENSORFLOW_DTENSOR_MLIR_EXPANSIONS_DTENSOR_OP_SPMD_EXPANDER_H_
81