xref: /aosp_15_r20/external/tensorflow/tensorflow/core/util/util.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/util/util.h"
17 
18 #include <string>
19 #include <vector>
20 
21 #include "absl/base/call_once.h"
22 #include "tensorflow/core/framework/device_factory.h"
23 #include "tensorflow/core/lib/gtl/inlined_vector.h"
24 #include "tensorflow/core/lib/strings/strcat.h"
25 #include "tensorflow/core/platform/cpu_info.h"
26 #include "tensorflow/core/platform/logging.h"
27 #include "tensorflow/core/util/env_var.h"
28 
29 namespace tensorflow {
30 
NodeNamePrefix(const StringPiece & op_name)31 StringPiece NodeNamePrefix(const StringPiece& op_name) {
32   StringPiece sp(op_name);
33   auto p = sp.find('/');
34   if (p == StringPiece::npos || p == 0) {
35     return "";
36   } else {
37     return StringPiece(sp.data(), p);
38   }
39 }
40 
NodeNameFullPrefix(const StringPiece & op_name)41 StringPiece NodeNameFullPrefix(const StringPiece& op_name) {
42   StringPiece sp(op_name);
43   auto p = sp.rfind('/');
44   if (p == StringPiece::npos || p == 0) {
45     return "";
46   } else {
47     return StringPiece(sp.data(), p);
48   }
49 }
50 
MovingAverage(int window)51 MovingAverage::MovingAverage(int window)
52     : window_(window),
53       sum_(0.0),
54       data_(new double[window_]),
55       head_(0),
56       count_(0) {
57   CHECK_GE(window, 1);
58 }
59 
~MovingAverage()60 MovingAverage::~MovingAverage() { delete[] data_; }
61 
Clear()62 void MovingAverage::Clear() {
63   count_ = 0;
64   head_ = 0;
65   sum_ = 0;
66 }
67 
GetAverage() const68 double MovingAverage::GetAverage() const {
69   if (count_ == 0) {
70     return 0;
71   } else {
72     return static_cast<double>(sum_) / count_;
73   }
74 }
75 
AddValue(double v)76 void MovingAverage::AddValue(double v) {
77   if (count_ < window_) {
78     // This is the warmup phase. We don't have a full window's worth of data.
79     head_ = count_;
80     data_[count_++] = v;
81   } else {
82     if (window_ == ++head_) {
83       head_ = 0;
84     }
85     // Toss the oldest element
86     sum_ -= data_[head_];
87     // Add the newest element
88     data_[head_] = v;
89   }
90   sum_ += v;
91 }
92 
93 static char hex_char[] = "0123456789abcdef";
94 
PrintMemory(const char * ptr,size_t n)95 string PrintMemory(const char* ptr, size_t n) {
96   string ret;
97   ret.resize(n * 3);
98   for (int i = 0; i < n; ++i) {
99     ret[i * 3] = ' ';
100     ret[i * 3 + 1] = hex_char[ptr[i] >> 4];
101     ret[i * 3 + 2] = hex_char[ptr[i] & 0xf];
102   }
103   return ret;
104 }
105 
SliceDebugString(const TensorShape & shape,const int64_t flat)106 string SliceDebugString(const TensorShape& shape, const int64_t flat) {
107   // Special case rank 0 and 1
108   const int dims = shape.dims();
109   if (dims == 0) return "";
110   if (dims == 1) return strings::StrCat("[", flat, "]");
111 
112   // Compute strides
113   gtl::InlinedVector<int64_t, 32> strides(dims);
114   strides.back() = 1;
115   for (int i = dims - 2; i >= 0; i--) {
116     strides[i] = strides[i + 1] * shape.dim_size(i + 1);
117   }
118 
119   // Unflatten index
120   int64_t left = flat;
121   string result;
122   for (int i = 0; i < dims; i++) {
123     strings::StrAppend(&result, i ? "," : "[", left / strides[i]);
124     left %= strides[i];
125   }
126   strings::StrAppend(&result, "]");
127   return result;
128 }
129 
IsMKLEnabled()130 bool IsMKLEnabled() {
131 #ifndef INTEL_MKL
132   return false;
133 #endif  // !INTEL_MKL
134   static absl::once_flag once;
135 #ifdef ENABLE_MKL
136   // Keeping TF_DISABLE_MKL env variable for legacy reasons.
137   static bool oneDNN_disabled = false;
138   absl::call_once(once, [&] {
139     TF_CHECK_OK(ReadBoolFromEnvVar("TF_DISABLE_MKL", false, &oneDNN_disabled));
140     if (oneDNN_disabled) VLOG(2) << "TF-MKL: Disabling oneDNN";
141   });
142   return (!oneDNN_disabled);
143 #else
144   // Linux: Turn oneDNN on by default for CPUs with neural network features.
145   // Windows: oneDNN is off by default.
146   // No need to guard for other platforms here because INTEL_MKL is only defined
147   // for non-mobile Linux or Windows.
148   static bool oneDNN_enabled =
149 #ifdef __linux__
150       port::TestCPUFeature(port::CPUFeature::AVX512_VNNI) ||
151       port::TestCPUFeature(port::CPUFeature::AVX512_BF16) ||
152       port::TestCPUFeature(port::CPUFeature::AVX_VNNI) ||
153       port::TestCPUFeature(port::CPUFeature::AMX_TILE) ||
154       port::TestCPUFeature(port::CPUFeature::AMX_INT8) ||
155       port::TestCPUFeature(port::CPUFeature::AMX_BF16);
156 #else
157       false;
158 #endif  // __linux__
159   absl::call_once(once, [&] {
160     auto status = ReadBoolFromEnvVar("TF_ENABLE_ONEDNN_OPTS", oneDNN_enabled,
161                                      &oneDNN_enabled);
162     if (!status.ok()) {
163       LOG(WARNING) << "TF_ENABLE_ONEDNN_OPTS is not set to either '0', 'false',"
164                    << " '1', or 'true'. Using the default setting: "
165                    << oneDNN_enabled;
166     }
167     if (oneDNN_enabled) {
168 #ifndef DNNL_AARCH64_USE_ACL
169       LOG(INFO) << "oneDNN custom operations are on. "
170                 << "You may see slightly different numerical results due to "
171                 << "floating-point round-off errors from different computation "
172                 << "orders. To turn them off, set the environment variable "
173                 << "`TF_ENABLE_ONEDNN_OPTS=0`.";
174 #else
175       LOG(INFO) << "Experimental oneDNN custom operations are on. "
176                 << "If you experience issues, please turn them off by setting "
177                 << "the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.";
178 #endif  // !DNNL_AARCH64_USE_ACL
179     }
180   });
181   return oneDNN_enabled;
182 #endif  // ENABLE_MKL
183 }
184 
185 }  // namespace tensorflow
186