1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_OPERATOR_H_ 17 #define TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_OPERATOR_H_ 18 19 #include <stdint.h> 20 21 #include <string> 22 #include <utility> 23 #include <vector> 24 25 #include "absl/container/flat_hash_map.h" 26 #include "flatbuffers/flatbuffers.h" // from @flatbuffers 27 #include "llvm/ADT/Optional.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/Analysis/AssumeBundleQueries.h" 30 #include "mlir/IR/Attributes.h" // from @llvm-project 31 #include "mlir/IR/Builders.h" // from @llvm-project 32 #include "mlir/IR/Operation.h" // from @llvm-project 33 #include "tensorflow/core/platform/status.h" 34 #include "tensorflow/core/platform/statusor.h" 35 #include "tensorflow/lite/schema/schema_generated.h" 36 37 namespace mlir { 38 39 // Returns the MLIR op name for the flatbuffer operator corresponding to 40 // `op_code`. 41 std::string GetMlirOpNameFromOpCode(const ::tflite::OperatorCodeT &op_code); 42 43 // Returns the builtin op code for the given MLIR operation on success; emits 44 // error and returns llvm::None on failure. 45 llvm::Optional<tflite::BuiltinOperator> GetBuiltinOpCode(Operation *mlir_op); 46 47 // Packs the given MLIR operation into a TFLite FlatBuffer operator object. 48 // Returns the FlatBuffer offset for the operator on success; emits error and 49 // returns llvm::None on failure. 50 llvm::Optional<flatbuffers::Offset<tflite::Operator>> CreateFlatBufferOperator( 51 Operation *mlir_op, uint32_t opcode_index, 52 const std::vector<int32_t> &operands, const std::vector<int32_t> &results, 53 const std::vector<int32_t> &intermediates, 54 flatbuffers::FlatBufferBuilder *fbb); 55 56 // Populates the array of mlir::NamedAttributes corresponding to the given 57 // tflite::FlatbufferOptionsUnion. 58 // We use an out parameter per LLVM convention 59 void BuiltinOptionsToAttributes( 60 tflite::BuiltinOptionsUnion op_union, mlir::Builder builder, 61 // NOLINTNEXTLINE 62 llvm::SmallVectorImpl<mlir::NamedAttribute> &attributes); 63 64 // While the last several tensors could be optional tensors for an tfl op, the 65 // number of input operands could vary. This function gets the min/max number of 66 // operands from tflite op name. 67 llvm::MinMax OperandNumbersMinMax(llvm::StringRef op_name); 68 69 // Populates the `custom_code` and `custom_options` to attributes. 70 // `custom_code` is used to identify CustomOp. 71 // `custom_options` are opaque attribute used to store infomations for this 72 // custom op. 73 tensorflow::Status CustomOptionsToAttributes( 74 const std::string &custom_code, const std::vector<uint8_t> &custom_options, 75 mlir::Builder builder, 76 // NOLINTNEXTLINE 77 Location loc, llvm::SmallVectorImpl<mlir::NamedAttribute> *attributes); 78 79 } // namespace mlir 80 81 #endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_OPERATOR_H_ 82