1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/coreml/builders/fully_connected_op_builder.h"
16 
17 #include <algorithm>
18 #include <memory>
19 #include <string>
20 
21 #include "tensorflow/lite/c/builtin_op_data.h"
22 #include "tensorflow/lite/c/common.h"
23 #include "tensorflow/lite/delegates/coreml/builders/activation_layer_builder.h"
24 #include "tensorflow/lite/delegates/coreml/builders/op_factory.h"
25 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
26 #include "tensorflow/lite/kernels/kernel_util.h"
27 
28 namespace tflite {
29 namespace delegates {
30 namespace coreml {
DebugName()31 const std::string& FullyConnectedOpBuilder::DebugName() {
32   if (debug_name_.empty()) SetDebugName("FullyConnectedOpBuilder", node_id_);
33   return debug_name_;
34 }
35 
SetWeights(TfLiteTensor * weights)36 void FullyConnectedOpBuilder::SetWeights(TfLiteTensor* weights) {
37   weights_ = weights;
38 }
39 
SetBias(TfLiteTensor * bias)40 void FullyConnectedOpBuilder::SetBias(TfLiteTensor* bias) { bias_ = bias; }
41 
Build()42 CoreML::Specification::NeuralNetworkLayer* FullyConnectedOpBuilder::Build() {
43   if (layer_ == nullptr) {
44     layer_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
45   }
46   layer_->set_name(DebugName());
47 
48   FillCoreMLWeights();
49   FillCoreMLBias();
50 
51   return layer_.release();
52 }
53 
FillCoreMLWeights()54 void FullyConnectedOpBuilder::FillCoreMLWeights() {
55   layer_->mutable_innerproduct()->set_inputchannels(weights_->dims->data[1]);
56   layer_->mutable_innerproduct()->set_outputchannels(weights_->dims->data[0]);
57   if (weights_->type == kTfLiteFloat32) {
58     const float* weights_data = GetTensorData<float>(weights_);
59     std::copy(weights_data, weights_data + NumElements(weights_),
60               google::protobuf::RepeatedFieldBackInserter(layer_->mutable_innerproduct()
61                                                     ->mutable_weights()
62                                                     ->mutable_floatvalue()));
63   } else if (weights_->type == kTfLiteFloat16) {
64     // float16value has type of bytes (std::string)
65     layer_->mutable_innerproduct()
66         ->mutable_weights()
67         ->mutable_float16value()
68         ->assign(weights_->data.raw, weights_->bytes);
69   }
70 }
71 
FillCoreMLBias()72 void FullyConnectedOpBuilder::FillCoreMLBias() {
73   if (bias_ != nullptr) {
74     layer_->mutable_innerproduct()->set_hasbias(true);
75     if (bias_->type == kTfLiteFloat32) {
76       const float* bias_data = GetTensorData<float>(bias_);
77       std::copy(bias_data, bias_data + NumElements(bias_),
78                 google::protobuf::RepeatedFieldBackInserter(layer_->mutable_innerproduct()
79                                                       ->mutable_bias()
80                                                       ->mutable_floatvalue()));
81     } else if (bias_->type == kTfLiteFloat16) {
82       // float16value has type of bytes (std::string)
83       layer_->mutable_innerproduct()
84           ->mutable_bias()
85           ->mutable_float16value()
86           ->assign(bias_->data.raw, bias_->bytes);
87     }
88   }
89 }
90 
PopulateSubgraph(TfLiteContext * context)91 TfLiteStatus FullyConnectedOpBuilder::PopulateSubgraph(TfLiteContext* context) {
92   const auto* fc_params =
93       reinterpret_cast<const TfLiteFullyConnectedParams*>(builtin_data_);
94   TfLiteFusedActivation activation = fc_params->activation;
95 
96   if (activation == kTfLiteActNone) {
97     builder_output_ = AddOutput();
98   } else {
99     ActivationLayerBuilder* activation_builder =
100         reinterpret_cast<ActivationLayerBuilder*>(
101             graph_builder_->AddBuilder(CreateActivationLayerBuilder, nullptr));
102     activation_builder->SetActivation(activation);
103     activation_builder->AddInput(AddOutput());
104     activation_builder->PopulateSubgraph(context);
105     builder_output_ = activation_builder->GetOutput(context);
106   }
107   return kTfLiteOk;
108 }
109 
RegisterInputs(const TfLiteIntArray * inputs,TfLiteContext * context)110 TfLiteStatus FullyConnectedOpBuilder::RegisterInputs(
111     const TfLiteIntArray* inputs, TfLiteContext* context) {
112   const int kInput = 0;
113   const int kWeights = 1;
114   const int kBias = 2;
115   AddInput(inputs->data[kInput]);
116   SetWeights(&context->tensors[inputs->data[kWeights]]);
117   if (inputs->size > 2) {
118     SetBias(&context->tensors[inputs->data[kBias]]);
119   }
120   return kTfLiteOk;
121 }
122 
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)123 TfLiteStatus FullyConnectedOpBuilder::RegisterOutputs(
124     const TfLiteIntArray* outputs, TfLiteContext* context) {
125   if (outputs->size != 1) {
126     TF_LITE_KERNEL_LOG(context, "Wrong # of outputs!.");
127     return kTfLiteError;
128   }
129   TensorID output_tensor = GetOutput(context);
130   if (output_tensor.NodeID() == -1) {
131     TF_LITE_KERNEL_LOG(context, "Failed to build output tensor.");
132     return kTfLiteError;
133   }
134   graph_builder_->AddTensorWithID(outputs->data[0], output_tensor);
135   return kTfLiteOk;
136 }
137 
CreateFullyConnectedOpBuilder(GraphBuilder * graph_builder)138 OpBuilder* CreateFullyConnectedOpBuilder(GraphBuilder* graph_builder) {
139   return new FullyConnectedOpBuilder(graph_builder);
140 }
141 
IsFloatType(TfLiteType type)142 bool IsFloatType(TfLiteType type) {
143   return type == kTfLiteFloat32 || type == kTfLiteFloat16;
144 }
145 
IsFullyConnectedOpSupported(const TfLiteRegistration * registration,const TfLiteNode * node,TfLiteContext * context)146 bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration,
147                                  const TfLiteNode* node,
148                                  TfLiteContext* context) {
149   if (node->builtin_data == nullptr) return false;
150   const auto* fc_params =
151       reinterpret_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
152   const int kInput = 0;
153   const int kWeights = 1;
154   const int kBias = 2;
155 
156   if (fc_params->weights_format != kTfLiteFullyConnectedWeightsFormatDefault) {
157     return false;
158   }
159   const TfLiteTensor* input;
160   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInput, &input));
161   const TfLiteTensor* weights;
162   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeights, &weights));
163 
164   if (!IsFloatType(input->type)) {
165     return false;
166   }
167   if (!IsFloatType(weights->type) || !IsConstantTensor(weights)) {
168     return false;
169   }
170   // Core ML 2 only supports single-batch fully connected layer, thus dimensions
171   // except the last one should be 1.
172   if (input->dims->data[input->dims->size - 1] != NumElements(input)) {
173     return false;
174   }
175 
176   if (node->inputs->size > 2) {
177     const TfLiteTensor* bias;
178     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBias, &bias));
179     if (!IsFloatType(bias->type) || !IsConstantTensor(bias)) {
180       return false;
181     }
182   }
183 
184   TfLiteFusedActivation activation = fc_params->activation;
185   if (activation == kTfLiteActSignBit) {
186     return false;
187   }
188   return true;
189 }
190 
191 }  // namespace coreml
192 }  // namespace delegates
193 }  // namespace tflite
194