1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <armnn/Types.hpp>
7 #include <armnn/utility/PolymorphicDowncast.hpp>
8 #include <armnn/Tensor.hpp>
9 #include <armnn/backends/ILayerSupport.hpp>
10 #include <armnn/utility/IgnoreUnused.hpp>
11
12 namespace armnn
13 {
14
15 ARMNN_NO_DEPRECATE_WARN_BEGIN
16 // IsLayerSupported() forwards to the deprecated virtual methods depending on input LayerType.
17 // Allows backends continue to behave as before maintaining backward compatibility.
IsLayerSupported(const LayerType & type,const std::vector<TensorInfo> & infos,const BaseDescriptor & descriptor,const Optional<LstmInputParamsInfo> & lstmParamsInfo,const Optional<QuantizedLstmInputParamsInfo> & quantizedLstmParamsInfo,Optional<std::string &> reasonIfUnsupported) const18 bool ILayerSupport::IsLayerSupported(const LayerType& type,
19 const std::vector<TensorInfo>& infos,
20 const BaseDescriptor& descriptor,
21 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
22 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
23 Optional<std::string&> reasonIfUnsupported) const
24 {
25 switch (type)
26 {
27 case LayerType::Activation:
28 return IsActivationSupported(infos[0],
29 infos[1],
30 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
31 reasonIfUnsupported);
32 case LayerType::Addition:
33 return IsAdditionSupported(infos[0],
34 infos[1],
35 infos[2],
36 reasonIfUnsupported);
37 case LayerType::ArgMinMax:
38 return IsArgMinMaxSupported(infos[0],
39 infos[1],
40 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
41 reasonIfUnsupported);
42 case LayerType::BatchNormalization:
43 return IsBatchNormalizationSupported(infos[0],
44 infos[1],
45 infos[2],
46 infos[3],
47 infos[4],
48 infos[5],
49 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
50 (&descriptor)),
51 reasonIfUnsupported);
52 case LayerType::BatchToSpaceNd:
53 return IsBatchToSpaceNdSupported(infos[0],
54 infos[1],
55 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
56 reasonIfUnsupported);
57 case LayerType::Comparison:
58 {
59 return IsComparisonSupported(infos[0],
60 infos[1],
61 infos[2],
62 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
63 reasonIfUnsupported);
64 }
65 case LayerType::Concat:
66 {
67 std::vector<const TensorInfo*> inputInfos;
68 for (uint32_t i = 0; i < (infos.size() - 1); i++)
69 {
70 inputInfos.push_back(&infos[i]);
71 }
72 return IsConcatSupported(inputInfos,
73 infos[infos.size() - 1],
74 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
75 reasonIfUnsupported);
76 }
77 case LayerType::Constant:
78 return IsConstantSupported(infos[0],
79 reasonIfUnsupported);
80 case LayerType::ConvertFp16ToFp32:
81 return IsConvertFp16ToFp32Supported(infos[0],
82 infos[1],
83 reasonIfUnsupported);
84 case LayerType::ConvertFp32ToFp16:
85 return IsConvertFp32ToFp16Supported(infos[0],
86 infos[1],
87 reasonIfUnsupported);
88 case LayerType::Convolution2d:
89 {
90 if (infos.size() != 4)
91 {
92 throw InvalidArgumentException("Invalid number of Convolution2d "
93 "TensorInfos. TensorInfos should be of format: "
94 "{input, output, weights, biases}.");
95 }
96
97 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
98 if (infos[3] == TensorInfo())
99 {
100 return IsConvolution2dSupported(infos[0],
101 infos[1],
102 desc,
103 infos[2],
104 EmptyOptional(),
105 reasonIfUnsupported);
106 }
107 else
108 {
109 return IsConvolution2dSupported(infos[0],
110 infos[1],
111 desc,
112 infos[2],
113 infos[3],
114 reasonIfUnsupported);
115 }
116 }
117 case LayerType::Debug:
118 return IsDebugSupported(infos[0],
119 infos[1],
120 reasonIfUnsupported);
121 case LayerType::DepthToSpace:
122 return IsDepthToSpaceSupported(infos[0],
123 infos[1],
124 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
125 reasonIfUnsupported);
126 case LayerType::DepthwiseConvolution2d:
127 {
128 if (infos.size() != 4)
129 {
130 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d "
131 "TensorInfos. TensorInfos should be of format: "
132 "{input, output, weights, biases}.");
133 }
134
135 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
136 if (infos[3] == TensorInfo())
137 {
138 return IsDepthwiseConvolutionSupported(infos[0],
139 infos[1],
140 desc,
141 infos[2],
142 EmptyOptional(),
143 reasonIfUnsupported);
144 }
145 else
146 {
147 return IsDepthwiseConvolutionSupported(infos[0],
148 infos[1],
149 desc,
150 infos[2],
151 infos[3],
152 reasonIfUnsupported);
153 }
154 }
155 case LayerType::Dequantize:
156 return IsDequantizeSupported(infos[0],
157 infos[1],
158 reasonIfUnsupported);
159 case LayerType::DetectionPostProcess:
160 return IsDetectionPostProcessSupported(infos[0],
161 infos[1],
162 infos[2],
163 infos[3],
164 infos[4],
165 infos[5],
166 infos[6],
167 *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
168 (&descriptor)),
169 reasonIfUnsupported);
170 case LayerType::Division:
171 return IsDivisionSupported(infos[0],
172 infos[1],
173 infos[2],
174 reasonIfUnsupported);
175 case LayerType::ElementwiseUnary:
176 return IsElementwiseUnarySupported(infos[0],
177 infos[1],
178 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>
179 (&descriptor)),
180 reasonIfUnsupported);
181 case LayerType::FakeQuantization:
182 return IsFakeQuantizationSupported(infos[0],
183 *(PolymorphicDowncast<const FakeQuantizationDescriptor*>
184 (&descriptor)),
185 reasonIfUnsupported);
186 case LayerType::Fill:
187 return IsFillSupported(infos[0],
188 infos[1],
189 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
190 reasonIfUnsupported);
191 case LayerType::Floor:
192 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
193 case LayerType::FullyConnected:
194 return IsFullyConnectedSupported(infos[0],
195 infos[1],
196 infos[2],
197 infos[3],
198 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
199 reasonIfUnsupported);
200 case LayerType::Gather:
201 return IsGatherSupported(infos[0],
202 infos[1],
203 infos[2],
204 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
205 reasonIfUnsupported);
206 case LayerType::Input:
207 return IsInputSupported(infos[0], reasonIfUnsupported);
208 case LayerType::InstanceNormalization:
209 return IsInstanceNormalizationSupported(infos[0],
210 infos[1],
211 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
212 (&descriptor)),
213 reasonIfUnsupported);
214 case LayerType::L2Normalization:
215 return IsL2NormalizationSupported(infos[0],
216 infos[1],
217 *(PolymorphicDowncast<const L2NormalizationDescriptor*>
218 (&descriptor)),
219 reasonIfUnsupported);
220 case LayerType::LogicalBinary:
221 return IsLogicalBinarySupported(infos[0],
222 infos[1],
223 infos[2],
224 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
225 reasonIfUnsupported);
226 case LayerType::LogSoftmax:
227 return IsLogSoftmaxSupported(infos[0],
228 infos[1],
229 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
230 reasonIfUnsupported);
231 case LayerType::Lstm:
232 return IsLstmSupported(infos[0],
233 infos[1],
234 infos[2],
235 infos[3],
236 infos[4],
237 infos[5],
238 infos[6],
239 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
240 lstmParamsInfo.value(),
241 reasonIfUnsupported);
242 case LayerType::QLstm:
243 return IsQLstmSupported(infos[0],
244 infos[1],
245 infos[2],
246 infos[3],
247 infos[4],
248 infos[5],
249 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
250 lstmParamsInfo.value(),
251 reasonIfUnsupported);
252 case LayerType::Map:
253 return true;
254 case LayerType::Maximum:
255 return IsMaximumSupported(infos[0],
256 infos[1],
257 infos[2],
258 reasonIfUnsupported);
259 case LayerType::Mean:
260 return IsMeanSupported(infos[0],
261 infos[1],
262 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
263 reasonIfUnsupported);
264 case LayerType::MemCopy:
265 return IsMemCopySupported(std::move(infos[0]),
266 std::move(infos[1]),
267 reasonIfUnsupported);
268 case LayerType::MemImport:
269 return IsMemImportSupported(infos[0],
270 infos[1],
271 reasonIfUnsupported);
272 case LayerType::Merge:
273 return IsMergeSupported(infos[0],
274 infos[1],
275 infos[2],
276 reasonIfUnsupported);
277 case LayerType::Minimum:
278 return IsMinimumSupported(infos[0],
279 infos[1],
280 infos[2],
281 reasonIfUnsupported);
282 case LayerType::Multiplication:
283 return IsMultiplicationSupported(infos[0],
284 infos[1],
285 infos[2],
286 reasonIfUnsupported);
287 case LayerType::Normalization:
288 return IsNormalizationSupported(infos[0],
289 infos[1],
290 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
291 reasonIfUnsupported);
292 case LayerType::Output:
293 return IsOutputSupported(infos[0], reasonIfUnsupported);
294 case LayerType::Pad:
295 return IsPadSupported(infos[0],
296 infos[1],
297 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
298 reasonIfUnsupported);
299 case LayerType::Permute:
300 return IsPermuteSupported(infos[0],
301 infos[1],
302 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
303 reasonIfUnsupported);
304 case LayerType::Pooling2d:
305 return IsPooling2dSupported(infos[0],
306 infos[1],
307 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
308 reasonIfUnsupported);
309 case LayerType::PreCompiled:
310 return IsPreCompiledSupported(infos[0],
311 *(PolymorphicDowncast<const PreCompiledDescriptor*>(&descriptor)),
312 reasonIfUnsupported);
313 case LayerType::Prelu:
314 return IsPreluSupported(infos[0],
315 infos[1],
316 infos[2],
317 reasonIfUnsupported);
318 case LayerType::Quantize:
319 return IsQuantizeSupported(infos[0],
320 infos[1],
321 reasonIfUnsupported);
322 case LayerType::QuantizedLstm:
323 return IsQuantizedLstmSupported(infos[0],
324 infos[1],
325 infos[2],
326 infos[3],
327 infos[4],
328 quantizedLstmParamsInfo.value(),
329 reasonIfUnsupported);
330 case LayerType::Reshape:
331 return IsReshapeSupported(infos[0],
332 infos[1],
333 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
334 reasonIfUnsupported);
335 case LayerType::Rank:
336 return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
337 case LayerType::Resize:
338 return IsResizeSupported(infos[0],
339 infos[1],
340 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
341 reasonIfUnsupported);
342 case LayerType::Reduce:
343 return IsReduceSupported(infos[0],
344 infos[1],
345 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
346 reasonIfUnsupported);
347 case LayerType::Slice:
348 return IsSliceSupported(infos[0],
349 infos[1],
350 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
351 reasonIfUnsupported);
352 case LayerType::Softmax:
353 return IsSoftmaxSupported(infos[0],
354 infos[1],
355 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
356 reasonIfUnsupported);
357 case LayerType::SpaceToBatchNd:
358 return IsSpaceToBatchNdSupported(infos[0],
359 infos[1],
360 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
361 reasonIfUnsupported);
362 case LayerType::SpaceToDepth:
363 return IsSpaceToDepthSupported(infos[0],
364 infos[1],
365 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
366 reasonIfUnsupported);
367 case LayerType::Splitter:
368 {
369 std::vector<TensorInfo> outputInfos;
370 for (uint32_t i = 1; i < infos.size(); i++)
371 {
372 outputInfos.push_back(infos[i]);
373 }
374 return IsSplitterSupported(infos[0],
375 {outputInfos.begin(), outputInfos.end()},
376 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
377 reasonIfUnsupported);
378 }
379 case LayerType::Stack:
380 {
381 std::vector<const TensorInfo*> inputInfos;
382 for (uint32_t i = 0; i < infos.size() - 1; i++)
383 {
384 inputInfos.push_back(&infos[i]);
385 }
386 return IsStackSupported(inputInfos,
387 infos[infos.size() - 1],
388 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
389 reasonIfUnsupported);
390 }
391 case LayerType::StandIn:
392 {
393 auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor));
394
395 if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs))
396 {
397 throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal "
398 "the combined number of input and output slots assigned "
399 "to the StandIn descriptor");
400 }
401
402 std::vector<const TensorInfo*> inputInfos;
403 for (uint32_t i = 0; i < desc.m_NumInputs; i++)
404 {
405 inputInfos.push_back(&infos[i]);
406 }
407 std::vector<const TensorInfo*> outputInfos;
408 for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++)
409 {
410 outputInfos.push_back(&infos[i]);
411 }
412
413 return IsStandInSupported(inputInfos,
414 outputInfos,
415 desc,
416 reasonIfUnsupported);
417 }
418 case LayerType::StridedSlice:
419 return IsStridedSliceSupported(infos[0],
420 infos[1],
421 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
422 reasonIfUnsupported);
423 case LayerType::Subtraction:
424 return IsSubtractionSupported(infos[0],
425 infos[1],
426 infos[2],
427 reasonIfUnsupported);
428 case LayerType::Switch:
429 return IsSwitchSupported(infos[0],
430 infos[1],
431 infos[2],
432 infos[3],
433 reasonIfUnsupported);
434 case LayerType::Transpose:
435 return IsTransposeSupported(infos[0],
436 infos[1],
437 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
438 reasonIfUnsupported);
439 case LayerType::TransposeConvolution2d:
440 {
441 if (infos.size() != 4)
442 {
443 throw InvalidArgumentException("Invalid number of TransposeConvolution2d "
444 "TensorInfos. TensorInfos should be of format: "
445 "{input, output, weights, biases}.");
446 }
447
448 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
449 if (infos[3] == TensorInfo())
450 {
451 return IsTransposeConvolution2dSupported(infos[0],
452 infos[1],
453 desc,
454 infos[2],
455 EmptyOptional(),
456 reasonIfUnsupported);
457 }
458 else
459 {
460 return IsTransposeConvolution2dSupported(infos[0],
461 infos[1],
462 desc,
463 infos[2],
464 infos[3],
465 reasonIfUnsupported);
466 }
467 }
468 case LayerType::Unmap:
469 return true;
470 case LayerType::Cast:
471 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
472 case LayerType::Shape:
473 return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
474 case LayerType::UnidirectionalSequenceLstm:
475 {
476 if (infos.size() != 6)
477 {
478 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. TensorInfos "
479 "should be of format: {input, outputStateIn, cellStateIn, "
480 "hiddenStateOutputVal, cellStateOutputVal, output}");
481 }
482 auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
483 return IsUnidirectionalSequenceLstmSupported(infos[0],
484 infos[1],
485 infos[2],
486 infos[3],
487 infos[4],
488 infos[5],
489 desc,
490 lstmParamsInfo.value(),
491 reasonIfUnsupported);
492 }
493 case LayerType::ChannelShuffle:
494 return IsChannelShuffleSupported(infos[0],
495 infos[1],
496 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
497 reasonIfUnsupported);
498 case LayerType::Convolution3d:
499 {
500 if (infos.size() != 4)
501 {
502 throw InvalidArgumentException("Invalid number of Convolution3d "
503 "TensorInfos. TensorInfos should be of format: "
504 "{input, output, weights, biases}.");
505 }
506
507 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
508 if (infos[3] == TensorInfo())
509 {
510 return IsConvolution3dSupported(infos[0],
511 infos[1],
512 desc,
513 infos[2],
514 EmptyOptional(),
515 reasonIfUnsupported);
516 }
517 else
518 {
519 return IsConvolution3dSupported(infos[0],
520 infos[1],
521 desc,
522 infos[2],
523 infos[3],
524 reasonIfUnsupported);
525 }
526 }
527 case LayerType::Pooling3d:
528 return IsPooling3dSupported(infos[0],
529 infos[1],
530 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
531 reasonIfUnsupported);
532 default:
533 return false;
534 }
535 }
536
IsActivationSupported(const TensorInfo & input,const TensorInfo & output,const ActivationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const537 bool ILayerSupport::IsActivationSupported(const TensorInfo& input,
538 const TensorInfo& output,
539 const ActivationDescriptor& descriptor,
540 Optional<std::string&> reasonIfUnsupported) const
541 {
542 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
543 return false;
544 }
545
IsAdditionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const546 bool ILayerSupport::IsAdditionSupported(const TensorInfo& input0,
547 const TensorInfo& input1,
548 const TensorInfo& output,
549 Optional<std::string&> reasonIfUnsupported) const
550 {
551 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
552 return false;
553 }
554
IsArgMinMaxSupported(const TensorInfo & input,const TensorInfo & output,const ArgMinMaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const555 bool ILayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
556 const TensorInfo& output,
557 const ArgMinMaxDescriptor& descriptor,
558 Optional<std::string&> reasonIfUnsupported) const
559 {
560 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
561 return false;
562 }
563
IsBatchNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & mean,const TensorInfo & var,const TensorInfo & beta,const TensorInfo & gamma,const BatchNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const564 bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
565 const TensorInfo& output,
566 const TensorInfo& mean,
567 const TensorInfo& var,
568 const TensorInfo& beta,
569 const TensorInfo& gamma,
570 const BatchNormalizationDescriptor& descriptor,
571 Optional<std::string&> reasonIfUnsupported) const
572 {
573 IgnoreUnused(input, output, mean, var, beta, gamma, descriptor, reasonIfUnsupported);
574 return false;
575 }
576
IsBatchToSpaceNdSupported(const TensorInfo & input,const TensorInfo & output,const BatchToSpaceNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const577 bool ILayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
578 const TensorInfo& output,
579 const BatchToSpaceNdDescriptor& descriptor,
580 Optional<std::string&> reasonIfUnsupported) const
581 {
582 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
583 return false;
584 }
585
IsCastSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const586 bool ILayerSupport::IsCastSupported(const TensorInfo& input,
587 const TensorInfo& output,
588 Optional<std::string&> reasonIfUnsupported) const
589 {
590 IgnoreUnused(input, output, reasonIfUnsupported);
591 return false;
592 }
593
IsChannelShuffleSupported(const TensorInfo & input,const TensorInfo & output,const ChannelShuffleDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const594 bool ILayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
595 const TensorInfo& output,
596 const ChannelShuffleDescriptor& descriptor,
597 Optional<std::string&> reasonIfUnsupported) const
598 {
599 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
600 return false;
601 }
602
IsComparisonSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const ComparisonDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const603 bool ILayerSupport::IsComparisonSupported(const TensorInfo& input0,
604 const TensorInfo& input1,
605 const TensorInfo& output,
606 const ComparisonDescriptor& descriptor,
607 Optional<std::string&> reasonIfUnsupported) const
608 {
609 IgnoreUnused(input0, input1, output, descriptor, reasonIfUnsupported);
610 return false;
611 }
612
IsConcatSupported(const std::vector<const TensorInfo * > inputs,const TensorInfo & output,const OriginsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const613 bool ILayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
614 const TensorInfo& output,
615 const OriginsDescriptor& descriptor,
616 Optional<std::string&> reasonIfUnsupported) const
617 {
618 IgnoreUnused(inputs, output, descriptor, reasonIfUnsupported);
619 return false;
620 }
621
IsConstantSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const622 bool ILayerSupport::IsConstantSupported(const TensorInfo& output,
623 Optional<std::string&> reasonIfUnsupported) const
624 {
625 IgnoreUnused(output, reasonIfUnsupported);
626 return false;
627 }
628
IsConvertFp16ToFp32Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const629 bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
630 const TensorInfo& output,
631 Optional<std::string&> reasonIfUnsupported) const
632 {
633 IgnoreUnused(input, output, reasonIfUnsupported);
634 return false;
635 }
636
IsConvertFp32ToFp16Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const637 bool ILayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
638 const TensorInfo& output,
639 Optional<std::string&> reasonIfUnsupported) const
640 {
641 IgnoreUnused(input, output, reasonIfUnsupported);
642 return false;
643 }
644
IsConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const645 bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input,
646 const TensorInfo& output,
647 const Convolution2dDescriptor& descriptor,
648 const TensorInfo& weights,
649 const Optional<TensorInfo>& biases,
650 Optional<std::string&> reasonIfUnsupported) const
651 {
652 IgnoreUnused(input, output, descriptor, weights, biases, reasonIfUnsupported);
653 return false;
654 }
655
IsConvolution3dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution3dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const656 bool ILayerSupport::IsConvolution3dSupported(const TensorInfo& input,
657 const TensorInfo& output,
658 const Convolution3dDescriptor& descriptor,
659 const TensorInfo& weights,
660 const Optional<TensorInfo>& biases,
661 Optional<std::string&> reasonIfUnsupported) const
662 {
663 IgnoreUnused(input, output, descriptor, weights, biases, reasonIfUnsupported);
664 return false;
665 }
666
IsDebugSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const667 bool ILayerSupport::IsDebugSupported(const TensorInfo& input,
668 const TensorInfo& output,
669 Optional<std::string&> reasonIfUnsupported) const
670 {
671 IgnoreUnused(input, output, reasonIfUnsupported);
672 return false;
673 }
674
IsDepthToSpaceSupported(const TensorInfo & input,const TensorInfo & output,const DepthToSpaceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const675 bool ILayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
676 const TensorInfo& output,
677 const DepthToSpaceDescriptor& descriptor,
678 Optional<std::string&> reasonIfUnsupported) const
679 {
680 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
681 return false;
682 }
683
IsDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const684 bool ILayerSupport::IsDepthwiseConvolutionSupported(
685 const TensorInfo& input,
686 const TensorInfo& output,
687 const DepthwiseConvolution2dDescriptor& descriptor,
688 const TensorInfo& weights,
689 const Optional<TensorInfo>& biases,
690 Optional<std::string&> reasonIfUnsupported) const
691 {
692 IgnoreUnused(input,
693 output,
694 descriptor,
695 weights,
696 biases,
697 reasonIfUnsupported);
698 return false;
699 }
700
IsDequantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const701 bool ILayerSupport::IsDequantizeSupported(const TensorInfo& input,
702 const TensorInfo& output,
703 Optional<std::string&> reasonIfUnsupported) const
704 {
705 IgnoreUnused(input, output, reasonIfUnsupported);
706 return false;
707 }
708
IsDetectionPostProcessSupported(const TensorInfo & boxEncodings,const TensorInfo & scores,const TensorInfo & anchors,const TensorInfo & detectionBoxes,const TensorInfo & detectionClasses,const TensorInfo & detectionScores,const TensorInfo & numDetections,const DetectionPostProcessDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const709 bool ILayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
710 const TensorInfo& scores,
711 const TensorInfo& anchors,
712 const TensorInfo& detectionBoxes,
713 const TensorInfo& detectionClasses,
714 const TensorInfo& detectionScores,
715 const TensorInfo& numDetections,
716 const DetectionPostProcessDescriptor& descriptor,
717 Optional<std::string&> reasonIfUnsupported) const
718 {
719 IgnoreUnused(boxEncodings,
720 scores,
721 anchors,
722 detectionBoxes,
723 detectionClasses,
724 detectionScores,
725 numDetections,
726 descriptor,
727 reasonIfUnsupported);
728 return false;
729 }
730
IsDilatedDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const731 bool ILayerSupport::IsDilatedDepthwiseConvolutionSupported(
732 const TensorInfo& input,
733 const TensorInfo& output,
734 const DepthwiseConvolution2dDescriptor& descriptor,
735 const TensorInfo& weights,
736 const Optional<TensorInfo>& biases,
737 Optional<std::string&> reasonIfUnsupported) const
738 {
739 IgnoreUnused(input, output, descriptor, weights, biases, reasonIfUnsupported);
740 return false;
741 }
742
IsDivisionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const743 bool ILayerSupport::IsDivisionSupported(const TensorInfo& input0,
744 const TensorInfo& input1,
745 const TensorInfo& output,
746 Optional<std::string&> reasonIfUnsupported) const
747 {
748 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
749 return false;
750 }
751
IsElementwiseUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const752 bool ILayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
753 const TensorInfo& output,
754 const ElementwiseUnaryDescriptor& descriptor,
755 Optional<std::string&> reasonIfUnsupported) const
756 {
757 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
758 return false;
759 }
760
IsFakeQuantizationSupported(const TensorInfo & input,const FakeQuantizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const761 bool ILayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
762 const FakeQuantizationDescriptor& descriptor,
763 Optional<std::string&> reasonIfUnsupported) const
764 {
765 IgnoreUnused(input, descriptor, reasonIfUnsupported);
766 return false;
767 }
768
IsFillSupported(const TensorInfo & input,const TensorInfo & output,const FillDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const769 bool ILayerSupport::IsFillSupported(const TensorInfo& input,
770 const TensorInfo& output,
771 const FillDescriptor& descriptor,
772 Optional<std::string&> reasonIfUnsupported) const
773 {
774 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
775 return false;
776 }
777
IsFloorSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const778 bool ILayerSupport::IsFloorSupported(const TensorInfo& input,
779 const TensorInfo& output,
780 Optional<std::string&> reasonIfUnsupported) const
781 {
782 IgnoreUnused(input, output, reasonIfUnsupported);
783 return false;
784 }
785
IsFullyConnectedSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & weights,const TensorInfo & biases,const FullyConnectedDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const786 bool ILayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
787 const TensorInfo& output,
788 const TensorInfo& weights,
789 const TensorInfo& biases,
790 const FullyConnectedDescriptor& descriptor,
791 Optional<std::string&> reasonIfUnsupported) const
792 {
793 IgnoreUnused(input, output, weights, biases, descriptor, reasonIfUnsupported);
794 return false;
795 }
796
IsGatherSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const GatherDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const797 bool ILayerSupport::IsGatherSupported(const TensorInfo& input0,
798 const TensorInfo& input1,
799 const TensorInfo& output,
800 const GatherDescriptor& descriptor,
801 Optional<std::string&> reasonIfUnsupported) const
802 {
803 IgnoreUnused(input0, input1, output, descriptor, reasonIfUnsupported);
804 return false;
805 }
806
IsInputSupported(const TensorInfo & input,Optional<std::string &> reasonIfUnsupported) const807 bool ILayerSupport::IsInputSupported(const TensorInfo& input,
808 Optional<std::string&> reasonIfUnsupported) const
809 {
810 IgnoreUnused(input, reasonIfUnsupported);
811 return false;
812 }
813
IsInstanceNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const InstanceNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const814 bool ILayerSupport::IsInstanceNormalizationSupported(
815 const TensorInfo& input,
816 const TensorInfo& output,
817 const InstanceNormalizationDescriptor& descriptor,
818 Optional<std::string&> reasonIfUnsupported) const
819 {
820 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
821 return false;
822 }
823
IsL2NormalizationSupported(const TensorInfo & input,const TensorInfo & output,const L2NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const824 bool ILayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
825 const TensorInfo& output,
826 const L2NormalizationDescriptor& descriptor,
827 Optional<std::string&> reasonIfUnsupported) const
828 {
829 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
830 return false;
831 }
832
IsLogicalBinarySupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const LogicalBinaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const833 bool ILayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
834 const TensorInfo& input1,
835 const TensorInfo& output,
836 const LogicalBinaryDescriptor& descriptor,
837 Optional<std::string&> reasonIfUnsupported) const
838 {
839 IgnoreUnused(input0, input1, output, descriptor, reasonIfUnsupported);
840 return false;
841 }
842
IsLogicalUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const843 bool ILayerSupport::IsLogicalUnarySupported(const TensorInfo& input,
844 const TensorInfo& output,
845 const ElementwiseUnaryDescriptor& descriptor,
846 Optional<std::string&> reasonIfUnsupported) const
847 {
848 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
849 return false;
850 }
851
IsLogSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const LogSoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const852 bool ILayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
853 const TensorInfo& output,
854 const LogSoftmaxDescriptor& descriptor,
855 Optional<std::string&> reasonIfUnsupported) const
856 {
857 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
858 return false;
859 }
860
IsLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & scratchBuffer,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const LstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const861 bool ILayerSupport::IsLstmSupported(const TensorInfo& input,
862 const TensorInfo& outputStateIn,
863 const TensorInfo& cellStateIn,
864 const TensorInfo& scratchBuffer,
865 const TensorInfo& outputStateOut,
866 const TensorInfo& cellStateOut,
867 const TensorInfo& output,
868 const LstmDescriptor& descriptor,
869 const LstmInputParamsInfo& paramsInfo,
870 Optional<std::string&> reasonIfUnsupported) const
871 {
872 IgnoreUnused(input,
873 outputStateIn,
874 cellStateIn,
875 scratchBuffer,
876 outputStateOut,
877 cellStateOut,
878 output,
879 descriptor,
880 paramsInfo,
881 reasonIfUnsupported);
882 return false;
883 }
884
IsMaximumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const885 bool ILayerSupport::IsMaximumSupported(const TensorInfo& input0,
886 const TensorInfo& input1,
887 const TensorInfo& output,
888 Optional<std::string&> reasonIfUnsupported) const
889 {
890 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
891 return false;
892 }
893
IsMeanSupported(const TensorInfo & input,const TensorInfo & output,const MeanDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const894 bool ILayerSupport::IsMeanSupported(const TensorInfo& input,
895 const TensorInfo& output,
896 const MeanDescriptor& descriptor,
897 Optional<std::string&> reasonIfUnsupported) const
898 {
899 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
900 return false;
901 }
902
IsMemCopySupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const903 bool ILayerSupport::IsMemCopySupported(const TensorInfo& input,
904 const TensorInfo& output,
905 Optional<std::string&> reasonIfUnsupported) const
906 {
907 IgnoreUnused(input, output, reasonIfUnsupported);
908 return false;
909 }
910
IsMemImportSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const911 bool ILayerSupport::IsMemImportSupported(const TensorInfo& input,
912 const TensorInfo& output,
913 Optional<std::string&> reasonIfUnsupported) const
914 {
915 IgnoreUnused(input, output, reasonIfUnsupported);
916 return false;
917 }
918
IsMergeSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const919 bool ILayerSupport::IsMergeSupported(const TensorInfo& input0,
920 const TensorInfo& input1,
921 const TensorInfo& output,
922 Optional<std::string&> reasonIfUnsupported) const
923 {
924 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
925 return false;
926 }
927
IsMinimumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const928 bool ILayerSupport::IsMinimumSupported(const TensorInfo& input0,
929 const TensorInfo& input1,
930 const TensorInfo& output,
931 Optional<std::string&> reasonIfUnsupported) const
932 {
933 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
934 return false;
935 }
936
IsMultiplicationSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const937 bool ILayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
938 const TensorInfo& input1,
939 const TensorInfo& output,
940 Optional<std::string&> reasonIfUnsupported) const
941 {
942 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
943 return false;
944 }
945
IsNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const946 bool ILayerSupport::IsNormalizationSupported(const TensorInfo& input,
947 const TensorInfo& output,
948 const NormalizationDescriptor& descriptor,
949 Optional<std::string&> reasonIfUnsupported) const
950 {
951 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
952 return false;
953 }
954
IsOutputSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const955 bool ILayerSupport::IsOutputSupported(const TensorInfo& output,
956 Optional<std::string&> reasonIfUnsupported) const
957 {
958 IgnoreUnused(output, reasonIfUnsupported);
959 return false;
960 }
961
IsPadSupported(const TensorInfo & input,const TensorInfo & output,const PadDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const962 bool ILayerSupport::IsPadSupported(const TensorInfo& input,
963 const TensorInfo& output,
964 const PadDescriptor& descriptor,
965 Optional<std::string&> reasonIfUnsupported) const
966 {
967 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
968 return false;
969 }
970
IsPermuteSupported(const TensorInfo & input,const TensorInfo & output,const PermuteDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const971 bool ILayerSupport::IsPermuteSupported(const TensorInfo& input,
972 const TensorInfo& output,
973 const PermuteDescriptor& descriptor,
974 Optional<std::string&> reasonIfUnsupported) const
975 {
976 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
977 return false;
978 }
979
IsPooling2dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling2dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const980 bool ILayerSupport::IsPooling2dSupported(const TensorInfo& input,
981 const TensorInfo& output,
982 const Pooling2dDescriptor& descriptor,
983 Optional<std::string&> reasonIfUnsupported) const
984 {
985 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
986 return false;
987 }
988
IsPooling3dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling3dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const989 bool ILayerSupport::IsPooling3dSupported(const TensorInfo& input,
990 const TensorInfo& output,
991 const Pooling3dDescriptor& descriptor,
992 Optional<std::string&> reasonIfUnsupported) const
993 {
994 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
995 return false;
996 }
997
IsPreCompiledSupported(const TensorInfo & input,const PreCompiledDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const998 bool ILayerSupport::IsPreCompiledSupported(const TensorInfo& input,
999 const PreCompiledDescriptor& descriptor,
1000 Optional<std::string&> reasonIfUnsupported) const
1001 {
1002 IgnoreUnused(input, descriptor, reasonIfUnsupported);
1003 return false;
1004 }
1005
IsPreluSupported(const TensorInfo & input,const TensorInfo & alpha,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1006 bool ILayerSupport::IsPreluSupported(const TensorInfo& input,
1007 const TensorInfo& alpha,
1008 const TensorInfo& output,
1009 Optional<std::string&> reasonIfUnsupported) const
1010 {
1011 IgnoreUnused(input, alpha, output, reasonIfUnsupported);
1012 return false;
1013 }
1014
IsQuantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1015 bool ILayerSupport::IsQuantizeSupported(const TensorInfo& input,
1016 const TensorInfo& output,
1017 Optional<std::string&> reasonIfUnsupported) const
1018 {
1019 IgnoreUnused(input, output, reasonIfUnsupported);
1020 return false;
1021 }
1022
IsQLstmSupported(const TensorInfo & input,const TensorInfo & previousOutputIn,const TensorInfo & previousCellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const QLstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1023 bool ILayerSupport::IsQLstmSupported(const TensorInfo& input,
1024 const TensorInfo& previousOutputIn,
1025 const TensorInfo& previousCellStateIn,
1026 const TensorInfo& outputStateOut,
1027 const TensorInfo& cellStateOut,
1028 const TensorInfo& output,
1029 const QLstmDescriptor& descriptor,
1030 const LstmInputParamsInfo& paramsInfo,
1031 Optional<std::string&> reasonIfUnsupported) const
1032 {
1033 IgnoreUnused(input,
1034 previousOutputIn,
1035 previousCellStateIn,
1036 outputStateOut,
1037 cellStateOut,
1038 output,
1039 descriptor,
1040 paramsInfo,
1041 reasonIfUnsupported);
1042 return false;
1043 }
1044
IsQuantizedLstmSupported(const TensorInfo & input,const TensorInfo & previousCellStateIn,const TensorInfo & previousOutputIn,const TensorInfo & cellStateOut,const TensorInfo & output,const QuantizedLstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1045 bool ILayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1046 const TensorInfo& previousCellStateIn,
1047 const TensorInfo& previousOutputIn,
1048 const TensorInfo& cellStateOut,
1049 const TensorInfo& output,
1050 const QuantizedLstmInputParamsInfo& paramsInfo,
1051 Optional<std::string&> reasonIfUnsupported) const
1052 {
1053 IgnoreUnused(input,
1054 previousCellStateIn,
1055 previousOutputIn,
1056 cellStateOut,
1057 output,
1058 paramsInfo,
1059 reasonIfUnsupported);
1060 return false;
1061 }
1062
IsRankSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1063 bool ILayerSupport::IsRankSupported(const TensorInfo& input,
1064 const TensorInfo& output,
1065 Optional<std::string&> reasonIfUnsupported) const
1066 {
1067 IgnoreUnused(input, output, reasonIfUnsupported);
1068 return false;
1069 }
1070
IsReduceSupported(const TensorInfo & input,const TensorInfo & output,const ReduceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1071 bool ILayerSupport::IsReduceSupported(const TensorInfo& input,
1072 const TensorInfo& output,
1073 const ReduceDescriptor& descriptor,
1074 Optional<std::string&> reasonIfUnsupported) const
1075 {
1076 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1077 return false;
1078 }
1079
IsReshapeSupported(const TensorInfo & input,const TensorInfo & output,const ReshapeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1080 bool ILayerSupport::IsReshapeSupported(const TensorInfo& input,
1081 const TensorInfo& output,
1082 const ReshapeDescriptor& descriptor,
1083 Optional<std::string&> reasonIfUnsupported) const
1084 {
1085 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1086 return false;
1087 }
1088
IsResizeSupported(const TensorInfo & input,const TensorInfo & output,const ResizeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1089 bool ILayerSupport::IsResizeSupported(const TensorInfo& input,
1090 const TensorInfo& output,
1091 const ResizeDescriptor& descriptor,
1092 Optional<std::string&> reasonIfUnsupported) const
1093 {
1094 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1095 return false;
1096 }
1097
IsShapeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1098 bool ILayerSupport::IsShapeSupported(const TensorInfo& input,
1099 const TensorInfo& output,
1100 Optional<std::string&> reasonIfUnsupported) const
1101 {
1102 IgnoreUnused(input, output, reasonIfUnsupported);
1103 return false;
1104 }
1105
IsSliceSupported(const TensorInfo & input,const TensorInfo & output,const SliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1106 bool ILayerSupport::IsSliceSupported(const TensorInfo& input,
1107 const TensorInfo& output,
1108 const SliceDescriptor& descriptor,
1109 Optional<std::string&> reasonIfUnsupported) const
1110 {
1111 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1112 return false;
1113 }
1114
IsSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const SoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1115 bool ILayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1116 const TensorInfo& output,
1117 const SoftmaxDescriptor& descriptor,
1118 Optional<std::string&> reasonIfUnsupported) const
1119 {
1120 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1121 return false;
1122 }
1123
IsSpaceToBatchNdSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToBatchNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1124 bool ILayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1125 const TensorInfo& output,
1126 const SpaceToBatchNdDescriptor& descriptor,
1127 Optional<std::string&> reasonIfUnsupported) const
1128 {
1129 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1130 return false;
1131 }
1132
IsSpaceToDepthSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToDepthDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1133 bool ILayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1134 const TensorInfo& output,
1135 const SpaceToDepthDescriptor& descriptor,
1136 Optional<std::string&> reasonIfUnsupported) const
1137 {
1138 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1139 return false;
1140 }
1141
IsSplitterSupported(const TensorInfo & input,const std::vector<std::reference_wrapper<TensorInfo>> & outputs,const ViewsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1142 bool ILayerSupport::IsSplitterSupported(const TensorInfo& input,
1143 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1144 const ViewsDescriptor& descriptor,
1145 Optional<std::string&> reasonIfUnsupported) const
1146 {
1147 IgnoreUnused(input, outputs, descriptor, reasonIfUnsupported);
1148 return false;
1149 }
1150
IsStackSupported(const std::vector<const TensorInfo * > & inputs,const TensorInfo & output,const StackDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1151 bool ILayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1152 const TensorInfo& output,
1153 const StackDescriptor& descriptor,
1154 Optional<std::string&> reasonIfUnsupported) const
1155 {
1156 IgnoreUnused(inputs, output, descriptor, reasonIfUnsupported);
1157 return false;
1158 }
1159
IsStandInSupported(const std::vector<const TensorInfo * > & inputs,const std::vector<const TensorInfo * > & outputs,const StandInDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1160 bool ILayerSupport::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
1161 const std::vector<const TensorInfo*>& outputs,
1162 const StandInDescriptor& descriptor,
1163 Optional<std::string&> reasonIfUnsupported) const
1164 {
1165 IgnoreUnused(inputs, outputs, descriptor, reasonIfUnsupported);
1166 return false;
1167 }
1168
IsStridedSliceSupported(const TensorInfo & input,const TensorInfo & output,const StridedSliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1169 bool ILayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1170 const TensorInfo& output,
1171 const StridedSliceDescriptor& descriptor,
1172 Optional<std::string&> reasonIfUnsupported) const
1173 {
1174 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1175 return false;
1176 }
1177
IsSubtractionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1178 bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1179 const TensorInfo& input1,
1180 const TensorInfo& output,
1181 Optional<std::string&> reasonIfUnsupported) const
1182 {
1183 IgnoreUnused(input0, input1, output, reasonIfUnsupported);
1184 return false;
1185 }
1186
IsSwitchSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output0,const TensorInfo & output1,Optional<std::string &> reasonIfUnsupported) const1187 bool ILayerSupport::IsSwitchSupported(const TensorInfo& input0,
1188 const TensorInfo& input1,
1189 const TensorInfo& output0,
1190 const TensorInfo& output1,
1191 Optional<std::string&> reasonIfUnsupported) const
1192 {
1193 IgnoreUnused(input0, input1, output0, output1, reasonIfUnsupported);
1194 return false;
1195 }
1196
IsTransposeConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const TransposeConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const1197 bool ILayerSupport::IsTransposeConvolution2dSupported(
1198 const TensorInfo& input,
1199 const TensorInfo& output,
1200 const TransposeConvolution2dDescriptor& descriptor,
1201 const TensorInfo& weights,
1202 const Optional<TensorInfo>& biases,
1203 Optional<std::string&> reasonIfUnsupported) const
1204 {
1205 IgnoreUnused(input, output, descriptor, weights, biases, reasonIfUnsupported);
1206 return false;
1207 }
1208
IsTransposeSupported(const TensorInfo & input,const TensorInfo & output,const TransposeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1209 bool ILayerSupport::IsTransposeSupported(const TensorInfo& input,
1210 const TensorInfo& output,
1211 const TransposeDescriptor& descriptor,
1212 Optional<std::string&> reasonIfUnsupported) const
1213 {
1214 IgnoreUnused(input, output, descriptor, reasonIfUnsupported);
1215 return false;
1216 }
1217
IsUnidirectionalSequenceLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const LstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1218 bool ILayerSupport::IsUnidirectionalSequenceLstmSupported(
1219 const TensorInfo& input,
1220 const TensorInfo& outputStateIn,
1221 const TensorInfo& cellStateIn,
1222 const TensorInfo& outputStateOut,
1223 const TensorInfo& cellStateOut,
1224 const TensorInfo& output,
1225 const LstmDescriptor& descriptor,
1226 const LstmInputParamsInfo& paramsInfo,
1227 Optional<std::string&> reasonIfUnsupported) const
1228 {
1229 IgnoreUnused(input,
1230 outputStateIn,
1231 cellStateIn,
1232 outputStateOut,
1233 cellStateOut,
1234 output,
1235 descriptor,
1236 paramsInfo,
1237 reasonIfUnsupported);
1238 return false;
1239 }
1240 ARMNN_NO_DEPRECATE_WARN_END
1241 }
1242