1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/hexagon/builders/reduce_builder.h"
16
17 #include <stdint.h>
18
19 #include <limits>
20 #include <vector>
21
22 #include "tensorflow/lite/c/builtin_op_data.h"
23 #include "tensorflow/lite/delegates/hexagon/hexagon_nn/hexagon_nn.h"
24 #include "tensorflow/lite/kernels/kernel_util.h"
25 #include "tensorflow/lite/util.h"
26
27 namespace tflite {
28 namespace delegates {
29 namespace hexagon {
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)30 TfLiteStatus ReduceOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
31 const TfLiteIntArray* outputs,
32 TfLiteContext* context) {
33 // Input data tensor.
34 int tensor_id = inputs->data[0];
35 const auto& input_tensor = context->tensors[tensor_id];
36 AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
37 TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, input_tensor));
38
39 // Axes tensor should be constant.
40 int axes_tensor_id = inputs->data[1];
41 const auto& axes_tensor = context->tensors[axes_tensor_id];
42 if (axes_tensor.allocation_type != kTfLiteMmapRo) {
43 TF_LITE_KERNEL_LOG(context, "Reduction op doesn't have constant axis");
44 return kTfLiteError;
45 }
46
47 // Hexagon assumes a 4-D input tensor. If the input tensor is not 4-D, we
48 // need to apply the supplemental offset to the axis.
49 auto* const_axes_node =
50 graph_builder_->AddConstNodeWithData(tensor_id, axes_tensor);
51 if (input_tensor.dims->size < 4) {
52 const int axes_size = NumElements(&axes_tensor);
53 auto offset = 4 - input_tensor.dims->size;
54 std::vector<int> axes(axes_size);
55 for (auto i = 0; i < axes.size(); ++i) {
56 axes[i] = axes_tensor.data.i32[i] + offset;
57 }
58 const std::vector<int> axes_shape = {1, 1, 1, axes_size};
59 auto axes_node = graph_builder_->AddConstNodeWithData(
60 axes_shape.data(), reinterpret_cast<char*>(axes.data()),
61 axes.size() * sizeof(axes[0]));
62 AddInput(TensorID(axes_node->GetID(), 0));
63 } else {
64 AddInput(TensorID(const_axes_node->GetID(), 0));
65 }
66
67 auto& output_tensor = context->tensors[outputs->data[0]];
68 int output_batch_size, output_height_size, output_width_size,
69 output_depth_size;
70 GetDims(&output_batch_size, &output_height_size, &output_width_size,
71 &output_depth_size, output_tensor.dims);
72
73 float output_min = -1, output_max = -1;
74 ComputeMinAndMaxQuantValues(output_tensor, &output_min, &output_max);
75 auto* output_min_const = graph_builder_->AddConstNodeWithData(
76 kScalarShape, reinterpret_cast<char*>(&output_min), sizeof(output_min));
77 auto* output_max_const = graph_builder_->AddConstNodeWithData(
78 kScalarShape, reinterpret_cast<char*>(&output_max), sizeof(output_max));
79 // Min/max values for output tensor.
80 AddInput(TensorID(output_min_const->GetID(), 0));
81 AddInput(TensorID(output_max_const->GetID(), 0));
82
83 // Add outputs
84 size_t output_element_size = 0;
85 TF_LITE_ENSURE_STATUS(
86 GetSizeOfType(context, output_tensor.type, &output_element_size));
87 auto mean_output = AddOutput(output_element_size, 4,
88 {output_batch_size, output_height_size,
89 output_width_size, output_depth_size});
90 auto mean_out_min = AddOutput(output_element_size, 4, kScalarShape);
91 auto mean_out_max = AddOutput(output_element_size, 4, kScalarShape);
92 // Mean op doesn't honor the passed min/max for output, so we need
93 // to add requantize.
94 auto* requantize_op = graph_builder_->AddNode(GetTFLiteNodeID());
95 requantize_op->SetOpType(OP_Requantize_8to8);
96 requantize_op->AddInput(mean_output);
97 requantize_op->AddInput(mean_out_min);
98 requantize_op->AddInput(mean_out_max);
99 requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
100 requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
101 node_output_ =
102 requantize_op->AddOutput(sizeof(uint8_t), 4,
103 {output_batch_size, output_height_size,
104 output_width_size, output_depth_size});
105 requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
106 requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
107
108 return kTfLiteOk;
109 }
110
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)111 TfLiteStatus ReduceOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
112 TfLiteContext* context) {
113 // Should be only 1 output.
114 graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
115 node_output_.second);
116
117 return kTfLiteOk;
118 }
119
~ReduceOpBuilder()120 ReduceOpBuilder::~ReduceOpBuilder() {}
121
CreateReduceBuilder(GraphBuilder * graph_builder,int op_type)122 OpBuilder* CreateReduceBuilder(GraphBuilder* graph_builder, int op_type) {
123 return new ReduceOpBuilder(graph_builder, op_type);
124 }
125
126 } // namespace hexagon
127 } // namespace delegates
128 } // namespace tflite
129