1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_CALIBRATOR_H_ 16 #define TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_CALIBRATOR_H_ 17 18 #include <unordered_map> 19 20 #include "flatbuffers/flatbuffers.h" // from @flatbuffers 21 #include "tensorflow/lite/core/api/op_resolver.h" 22 #include "tensorflow/lite/interpreter.h" 23 #include "tensorflow/lite/model.h" 24 #include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h" 25 26 namespace tflite { 27 namespace optimize { 28 namespace calibration { 29 30 // Warning: This is not a public API and subject to change. 31 32 // Builds a interpreter that logs the calibration data in memory. 33 // The calibration data can be recovered using |calibration_reader|. 34 // 35 // Sample usage: 36 // std::unique_ptr<Interpreter> interpreter; 37 // std::unique_ptr<CalibrationReader> calibration_reader; 38 // BuiltinOpResolver resolver = ... 39 // FlatBufferModel model = .. 40 // 41 // BuildLoggingInterpreter(model, resolver, &interpreter, 42 // &calibration_reader); 43 // 44 // 45 // * Allocate tensors... 46 // * Call interpreter->invoke on calibration dataset. 47 // 48 // Calibration data can be read either directly by calling 49 // std::unordered_map<int, CalibrationStats>> tensor_index_to_stats; 50 // calibration_reader->GetTensorStatsAsMap(&tensor_index_to_stats); 51 // 52 // or adding calibration data to model itself. 53 // ModelT * original_floating_point_model = ... 54 // calibration_reader->AddCalibrationToModel(original_floating_point_model, 55 // false); 56 // 57 TfLiteStatus BuildLoggingInterpreter( 58 const FlatBufferModel& model, const OpResolver& op_resolver, 59 std::unique_ptr<Interpreter>* interpreter, 60 std::unique_ptr<CalibrationReader>* calibration_reader); 61 62 // Same as above, except gets separate tflite::Model and ErrorReporter pointers. 63 TfLiteStatus BuildLoggingInterpreter( 64 const tflite::Model* model, ErrorReporter* error_reporter, 65 const OpResolver& op_resolver, std::unique_ptr<Interpreter>* interpreter, 66 std::unique_ptr<CalibrationReader>* calibration_reader); 67 68 } // namespace calibration 69 } // namespace optimize 70 } // namespace tflite 71 72 #endif // TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATION_CALIBRATOR_H_ 73