1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
17
18 #if GOOGLE_CUDA && GOOGLE_TENSORRT
19
20 #include <vector>
21
22 #include "tensorflow/compiler/tf2tensorrt/common/utils.h"
23 #include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
24 #include "tensorflow/compiler/tf2tensorrt/utils/trt_experimental_features.h"
25 #include "tensorflow/core/platform/logging.h"
26
27 namespace tensorflow {
28 namespace tensorrt {
29
filter_string(string msg)30 bool filter_string(string msg) {
31 // This function checks for known substrings that shall be ignored.
32
33 static const std::vector<string> substr_patterns{
34 // Automatic messages generated by TensorRT when combined with
35 // Automatic Mixed Precision - TensorRT 8.2
36 "Missing scale and zero-point for",
37 "Subnormal FP16 values detected",
38 "If this is not the desired behavior, please modify the weights",
39 "had the following issues when converted to FP16",
40 "Values less than smallest positive FP16 Subnormal value detected.",
41 // Deprecation Warnings
42 "The implicit batch dimension mode has been deprecated.",
43 "The getMaxBatchSize() function should not be used with an engine built",
44 // Input-Warnings
45 "[RemoveDeadLayers] Input Tensor input is unused or used only at",
46 "Unused Input:",
47 // Data Type Warnings
48 "Tensor DataType is determined at build time for tensors not marked as",
49 // Myelin Performance Warning in dynamic shape mode
50 "Myelin graph with multiple dynamic values may have poor performance",
51 "(# 0 (SHAPE",
52 };
53
54 for (int i = 0; i < substr_patterns.size(); i++) {
55 std::size_t is_found = msg.find(substr_patterns[i]);
56 if (is_found != string::npos) {
57 return true;
58 }
59 }
60 return false;
61 }
62
63 // Use TF logging for TensorRT informations
log(Severity severity,const char * msg)64 void Logger::log(Severity severity, const char* msg) noexcept {
65 static const bool filter_messages = []() {
66 return !isExperimentalFeatureActivated("disable_logger_filtering");
67 }();
68
69 if (filter_messages && filter_string(msg)) return;
70
71 if (!isValidSeverity(severity, msg) || suppressedMsg_ & (1 << (int)severity))
72 return;
73
74 // Suppress info-level messages
75 switch (severity) {
76 case Severity::kVERBOSE:
77 case Severity::kINFO: { // Mark TRT info messages as debug!
78 VLOG(2) << name_ << " " << msg;
79 break;
80 }
81 case Severity::kWARNING: {
82 LOG_WARNING_WITH_PREFIX << name_ << " " << msg;
83 break;
84 }
85 case Severity::kERROR: {
86 LOG(ERROR) << name_ << " " << msg;
87 break;
88 }
89 case Severity::kINTERNAL_ERROR: {
90 LOG(FATAL) << name_ << " " << msg;
91 break;
92 }
93 }
94 }
95
suppressLoggerMsgs(Severity severity)96 void Logger::suppressLoggerMsgs(Severity severity) {
97 if (isValidSeverity(severity)) {
98 suppressedMsg_ |= 1 << (int)severity;
99 }
100 }
101
unsuppressLoggerMsgs(Severity severity)102 void Logger::unsuppressLoggerMsgs(Severity severity) {
103 if (isValidSeverity(severity)) {
104 suppressedMsg_ &= (-1) ^ (1 << (int)severity);
105 }
106 }
107
isValidSeverity(Severity severity,const char * msg)108 bool Logger::isValidSeverity(Severity severity, const char* msg) noexcept {
109 switch (severity) {
110 case Severity::kVERBOSE:
111 case Severity::kINFO:
112 case Severity::kWARNING:
113 case Severity::kERROR:
114 case Severity::kINTERNAL_ERROR:
115 return true;
116 }
117 return false;
118 }
119
120 // static
GetLogger()121 Logger* Logger::GetLogger() {
122 static Logger* logger = new Logger("DefaultLogger");
123 return logger;
124 }
125
126 REGISTER_TENSORRT_LOGGER("DefaultLogger", Logger::GetLogger());
127
128 } // namespace tensorrt
129 } // namespace tensorflow
130
131 #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
132