1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include <tensorflow/lite/builtin_ops.h>
9 #include <tensorflow/lite/c/builtin_op_data.h>
10 #include <tensorflow/lite/c/common.h>
11 #include <tensorflow/lite/minimal_logging.h>
12
13 namespace armnnDelegate
14 {
15
VisitLogicalBinaryOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t logicalOperatorCode,armnn::LogicalBinaryOperation binaryOperation)16 TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
17 TfLiteContext* tfLiteContext,
18 TfLiteNode* tfLiteNode,
19 int nodeIndex,
20 int32_t logicalOperatorCode,
21 armnn::LogicalBinaryOperation binaryOperation)
22 {
23 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
24 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
25
26 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
27 const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
28 if (!IsValid(tfLiteContext, tfLiteInputTensor0, logicalOperatorCode, nodeIndex))
29 {
30 return kTfLiteError;
31 }
32
33 const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
34 if (!IsValid(tfLiteContext, tfLiteInputTensor1, logicalOperatorCode, nodeIndex))
35 {
36 return kTfLiteError;
37 }
38
39 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
40 if (!IsValid(tfLiteContext, tfLiteOutputTensor, logicalOperatorCode, nodeIndex))
41 {
42 return kTfLiteError;
43 }
44
45 armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
46 armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
47 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
48
49 // Check if we need to expand the dims of any of the input tensor infos.
50 // This is required for a few of the backends.
51 if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
52 {
53 ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
54 }
55
56 // Setup descriptor and assign operation
57 armnn::LogicalBinaryDescriptor desc;
58 desc.m_Operation = binaryOperation;
59
60 // Check if supported
61 bool isSupported = false;
62 armnn::BackendId setBackend;
63 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
64 {
65 FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
66 tfLiteContext,
67 IsLogicalBinarySupported,
68 delegateData.m_Backends,
69 isSupported,
70 setBackend,
71 inputTensorInfo0,
72 inputTensorInfo1,
73 outputTensorInfo,
74 desc);
75 };
76
77 if (!delegateData.m_Network)
78 {
79 validateFunc(outputTensorInfo, isSupported);
80 return isSupported ? kTfLiteOk : kTfLiteError;
81 }
82
83 armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
84 logicalBinaryLayer->SetBackendId(setBackend);
85 ARMNN_ASSERT(logicalBinaryLayer != nullptr);
86
87 armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
88 outputSlot.SetTensorInfo(outputTensorInfo);
89
90 auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
91 delegateData,
92 tfLiteContext,
93 tfLiteNode);
94 if (inputsTensorsProcess == kTfLiteError)
95 {
96 return inputsTensorsProcess;
97 }
98
99 return Connect(logicalBinaryLayer, tfLiteNode, delegateData);
100 }
101
102 } // namespace armnnDelegate
103