xref: /aosp_15_r20/external/tensorflow/tensorflow/core/profiler/internal/advisor/operation_checker.h (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2016 The TensorFlow Authors All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 // This checker checks common wrong configurations of operations.
16 //
17 #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_OPERATION_CHECKER_H_
18 #define TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_OPERATION_CHECKER_H_
19 
20 #include "absl/strings/str_format.h"
21 #include "tensorflow/core/profiler/internal/advisor/checker.h"
22 
23 namespace tensorflow {
24 namespace tfprof {
25 
26 class OperationChecker : public Checker {
27  public:
name()28   string name() const override { return kCheckers[1]; }
29 
30  private:
Check(const AdvisorOptionsProto::CheckerOption & options,const TFStats * stats)31   AdviceProto::Checker Check(const AdvisorOptionsProto::CheckerOption& options,
32                              const TFStats* stats) override {
33     if (!stats) {
34       absl::FPrintF(
35           stderr, "Missing profiles (e.g. graph, run_meta). Skip %s\n", name());
36       return reports_;
37     }
38     bool use_batch_norm = false;
39     bool use_fused_batch_norm = false;
40     bool recommend_nchw = false;
41     for (const auto& n : stats->nodes()) {
42       const TFGraphNode* node = n.second.get();
43       if (node->name().find("BatchNorm") != node->name().npos) {
44         use_batch_norm = true;
45       }
46       if (node->op_types().find("FusedBatchNorm") != node->op_types().end()) {
47         use_fused_batch_norm = true;
48       }
49 
50       const AttrValue* attr = node->op_attrs("data_format");
51       if (attr) {
52         if (attr->s() == "NHWC" &&
53             IsPlacedOnAccelerator(node->canonical_device())) {
54           recommend_nchw = true;
55         }
56       }
57     }
58     if (use_batch_norm && !use_fused_batch_norm) {
59       reports_.add_reports(
60           "Maybe use faster FusedBatchNorm instead of BatchNorm");
61     }
62     if (recommend_nchw) {
63       // TODO(xpan): Maybe print which Op supports NCHW.
64       reports_.add_reports(
65           "Found operation using NHWC data_format on GPU. Maybe "
66           "NCHW is faster.");
67     }
68     return reports_;
69   }
70 
71  private:
72   AdviceProto::Checker reports_;
73 };
74 
75 }  // namespace tfprof
76 }  // namespace tensorflow
77 
78 #endif  // TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_OPERATION_CHECKER_H_
79