1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include <numeric>
17
18 #include "mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
19 #include "mlir/Dialect/Func/IR/FuncOps.h"
20 #include "mlir/IR/MLIRContext.h"
21 #include "mlir/IR/Operation.h"
22 #include "mlir/IR/PatternMatch.h"
23 #include "mlir/Transforms/DialectConversion.h"
24
25 namespace mlir {
26 namespace mhlo {
27
28 namespace {
29
30 // Converts ClampOp with broadcast semantics. ClampOp requires "all three arrays
31 // must be the same shape. Alternatively, as a restricted form of broadcasting,
32 // min and/or max can be a scalar of type T."
33 struct ClampWithBroadcastConvert : public OpRewritePattern<ClampOp> {
ClampWithBroadcastConvertmlir::mhlo::__anon15911d260111::ClampWithBroadcastConvert34 explicit ClampWithBroadcastConvert(MLIRContext *context)
35 : OpRewritePattern<ClampOp>(context) {}
36
matchAndRewritemlir::mhlo::__anon15911d260111::ClampWithBroadcastConvert37 LogicalResult matchAndRewrite(ClampOp op,
38 PatternRewriter &rewriter) const override {
39 auto operandType = op.operand().getType().dyn_cast<RankedTensorType>();
40 auto maxType = op.max().getType().dyn_cast<RankedTensorType>();
41 auto minType = op.min().getType().dyn_cast<RankedTensorType>();
42 // Unrancked types are not supported.
43 if (!operandType || !maxType || !minType) return failure();
44 // Does not support operand with dynamic dimensions for now.
45 if (!operandType.hasStaticShape()) return failure();
46
47 ArrayRef<int64_t> operandShape = operandType.getShape();
48
49 Value maxValue = op.max();
50 if (maxType != operandType) {
51 assert(maxType.getRank() == 0);
52 maxValue = rewriter.createOrFold<BroadcastOp>(
53 op.getLoc(), operandType, maxValue,
54 rewriter.getI64TensorAttr(operandShape));
55 }
56
57 Value minValue = op.min();
58 if (minType != operandType) {
59 assert(minType.getRank() == 0);
60 minValue = rewriter.createOrFold<BroadcastOp>(
61 op.getLoc(), operandType, minValue,
62 rewriter.getI64TensorAttr(operandShape));
63 }
64
65 rewriter.replaceOpWithNewOp<ClampOp>(op, op.getType(), minValue,
66 op.operand(), maxValue);
67 return success();
68 }
69 };
70
71 } // namespace
72
setupMaterializeBroadcastsLegality(MLIRContext *,ConversionTarget * conversionTarget)73 void setupMaterializeBroadcastsLegality(MLIRContext * /*context*/,
74 ConversionTarget *conversionTarget) {
75 conversionTarget->addDynamicallyLegalOp<ClampOp>([](ClampOp op) {
76 return op.max().getType() == op.operand().getType() &&
77 op.min().getType() == op.operand().getType();
78 });
79 }
80
populateMaterializeBroadcastsPatterns(MLIRContext * context,RewritePatternSet * patterns)81 void populateMaterializeBroadcastsPatterns(MLIRContext *context,
82 RewritePatternSet *patterns) {
83 // ClampOp. This op has a special case where it accepts either same-shaped
84 // inputs or scalars (a restricted form of broadcasting). This makes the
85 // broadcast explicit.
86 patterns->add<ClampWithBroadcastConvert>(context);
87 }
88
89 } // namespace mhlo
90 } // namespace mlir
91