xref: /aosp_15_r20/external/armnn/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ExecuteNetworkProgramOptions.hpp"
7 #include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
8 
9 #include <armnn/BackendRegistry.hpp>
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/utility/Assert.hpp>
12 #include <armnn/utility/StringUtils.hpp>
13 #include <armnn/Logging.hpp>
14 
15 #include <fmt/format.h>
16 
CheckOption(const cxxopts::ParseResult & result,const char * option)17 bool CheckOption(const cxxopts::ParseResult& result,
18                  const char* option)
19 {
20     // Check that the given option is valid.
21     if (option == nullptr)
22     {
23         return false;
24     }
25 
26     // Check whether 'option' is provided.
27     return ((result.count(option)) ? true : false);
28 }
29 
CheckOptionDependency(const cxxopts::ParseResult & result,const char * option,const char * required)30 void CheckOptionDependency(const cxxopts::ParseResult& result,
31                            const char* option,
32                            const char* required)
33 {
34     // Check that the given options are valid.
35     if (option == nullptr || required == nullptr)
36     {
37         throw cxxopts::OptionParseException("Invalid option to check dependency for");
38     }
39 
40     // Check that if 'option' is provided, 'required' is also provided.
41     if (CheckOption(result, option) && !result[option].has_default())
42     {
43         if (CheckOption(result, required) == 0 || result[required].has_default())
44         {
45             throw cxxopts::OptionParseException(
46                     std::string("Option '") + option + "' requires option '" + required + "'.");
47         }
48     }
49 }
50 
CheckOptionDependencies(const cxxopts::ParseResult & result)51 void CheckOptionDependencies(const cxxopts::ParseResult& result)
52 {
53     CheckOptionDependency(result, "tuning-level", "tuning-path");
54 }
55 
RemoveDuplicateDevices(std::vector<armnn::BackendId> & computeDevices)56 void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
57 {
58     // Mark the duplicate devices as 'Undefined'.
59     for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
60     {
61         for (auto j = std::next(i); j != computeDevices.end(); ++j)
62         {
63             if (*j == *i)
64             {
65                 *j = armnn::Compute::Undefined;
66             }
67         }
68     }
69 
70     // Remove 'Undefined' devices.
71     computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
72                          computeDevices.end());
73 }
74 
75 /// Takes a vector of backend strings and returns a vector of backendIDs.
76 /// Removes duplicate entries.
77 /// Can handle backend strings that contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
GetBackendIDs(const std::vector<std::string> & backendStringsVec)78 std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStringsVec)
79 {
80     std::vector<armnn::BackendId> backendIDs;
81     for (const auto& backendStrings : backendStringsVec)
82     {
83         // Each backendStrings might contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
84         std::vector<std::string> backendStringVec = ParseStringList(backendStrings, ",");
85         for (const auto& b : backendStringVec)
86         {
87             backendIDs.push_back(armnn::BackendId(b));
88         }
89     }
90 
91     RemoveDuplicateDevices(backendIDs);
92 
93     return backendIDs;
94 }
95 
96 /// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
97 /// If the option wasn't defined it returns an empty object.
98 template<typename optionType>
GetOptionValue(std::string && optionName,const cxxopts::ParseResult & result)99 optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
100 {
101     optionType out;
102     if(result.count(optionName))
103     {
104         out = result[optionName].as<optionType>();
105     }
106     return out;
107 }
108 
LogAndThrowFatal(std::string errorMessage)109 void LogAndThrowFatal(std::string errorMessage)
110 {
111     throw armnn::InvalidArgumentException (errorMessage);
112 }
113 
CheckRequiredOptions(const cxxopts::ParseResult & result)114 void CheckRequiredOptions(const cxxopts::ParseResult& result)
115 {
116 
117     // For each option in option-group "a) Required
118     std::vector<std::string> requiredOptions{"compute",
119                                              "model-path"
120                                              };
121 
122     bool requiredMissing = false;
123     for(auto const&  str : requiredOptions)
124     {
125         if(!(result.count(str) > 0))
126         {
127             ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
128             requiredMissing = true;
129         }
130     }
131     if(requiredMissing)
132     {
133         throw armnn::InvalidArgumentException ("Some required arguments are missing");
134     }
135 }
136 
CheckForDeprecatedOptions(const cxxopts::ParseResult & result)137 void CheckForDeprecatedOptions(const cxxopts::ParseResult& result)
138 {
139     if(result.count("armnn-tflite-delegate") > 0)
140     {
141         ARMNN_LOG(warning) << "DEPRECATED: The program option 'armnn-tflite-delegate' is deprecated and will be "
142                               "removed soon. Please use the option 'tflite-executor' instead.";
143     }
144     if(result.count("concurrent") > 0)
145     {
146         ARMNN_LOG(warning) << "DEPRECATED: The program option 'concurrent' is deprecated and will be "
147                               "removed soon. Please use the option '\"P, thread-pool-size\"' instead.";
148     }
149     if(result.count("input-type") > 0)
150     {
151         ARMNN_LOG(warning) << "DEPRECATED: The program option 'input-type' is deprecated and will be "
152                               "removed soon. The input-types are now automatically set.";
153     }
154     if(result.count("input-name") > 0)
155     {
156         ARMNN_LOG(warning) << "DEPRECATED: The program option 'input-name' is deprecated and will be "
157                               "removed soon. The input-names are now automatically set.";
158     }
159     if(result.count("output-type") > 0)
160     {
161         ARMNN_LOG(warning) << "DEPRECATED: The program option 'output-type' is deprecated and will be "
162                               "removed soon. The output-types are now automatically set.";
163     }
164     if(result.count("output-name") > 0)
165     {
166         ARMNN_LOG(warning) << "DEPRECATED: The program option 'output-name' is deprecated and will be "
167                               "removed soon. The output-names are now automatically set.";
168     }
169     if(result.count("model-format") > 0)
170     {
171         ARMNN_LOG(warning) << "DEPRECATED: The program option 'model-format' is deprecated and will be "
172                               "removed soon. The model-format is now automatically set.";
173     }
174 
175 }
176 
ValidateExecuteNetworkParams()177 void ProgramOptions::ValidateExecuteNetworkParams()
178 {
179     m_ExNetParams.ValidateParams();
180 }
181 
ValidateRuntimeOptions()182 void ProgramOptions::ValidateRuntimeOptions()
183 {
184     if (m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled &&
185         !m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
186     {
187         LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
188     }
189 }
190 
191 
ProgramOptions()192 ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
193                                                 "Executes a neural network model using the provided input "
194                                                 "tensor. Prints the resulting output tensor."}
195 {
196     try
197     {
198         // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
199         // separate function CheckRequiredOptions() for that.
200         m_CxxOptions.add_options("a) Required")
201                 ("c,compute",
202                  "Which device to run layers on by default. If a single device doesn't support all layers in the model "
203                  "you can specify a second or third to fall back on. Possible choices: "
204                  + armnn::BackendRegistryInstance().GetBackendIdsAsString()
205                  + " NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
206                    "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c CpuAcc -c CpuRef'. "
207                    "Duplicates are ignored.",
208                  cxxopts::value<std::vector<std::string>>())
209 
210                 ("f,model-format",
211                  "armnn-binary, onnx-binary, onnx-text, tflite-binary"
212                  "DEPRECATED: The program option 'model-format' is deprecated and will be "
213                  "removed soon. The model-format is now automatically set.",
214                  cxxopts::value<std::string>())
215 
216                 ("m,model-path",
217                  "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
218                  cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
219 
220                 ("i,input-name",
221                  "Identifier of the input tensors in the network separated by comma."
222                  "This option is not required, but can be used to set the order of inputs",
223                  cxxopts::value<std::string>())
224 
225                 ("o,output-name",
226                  "Identifier of the output tensors in the network separated by comma."
227                  "This option is not required, but can be used to set the order of outputs",
228                  cxxopts::value<std::string>());
229 
230         m_CxxOptions.add_options("b) General")
231                 ("b,dynamic-backends-path",
232                  "Path where to load any available dynamic backend from. "
233                  "If left empty (the default), dynamic backends will not be used.",
234                  cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
235 
236                 ("P, thread-pool-size",
237                  "Run the network using the Arm NN thread pool with the number of threads provided. ",
238                  cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"))
239 
240                 ("n,concurrent",
241                  "This option is for Arm NN internal asynchronous testing purposes. "
242                  "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
243                  "if 'thread-pool-size' is greater than 0, for asynchronous execution."
244                  "DEPRECATED: The program option 'concurrent' is deprecated and will be "
245                  "removed soon. Please use the option '\"P, thread-pool-size\"' instead.",
246                  cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
247 
248                 ("d,input-tensor-data",
249                  "Path to files containing the input data as a flat array separated by whitespace. "
250                  "Several paths can be passed by separating them with a comma if the network has multiple inputs "
251                  "or you wish to run the model multiple times with different input data using the 'iterations' option. "
252                  "If not specified, the network will be run with dummy data (useful for profiling).",
253                  cxxopts::value<std::string>()->default_value(""))
254 
255                 ("h,help", "Display usage information")
256 
257                 ("infer-output-shape",
258                  "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
259                  "parser)",
260                  cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
261 
262                 ("allow-expanded-dims",
263                  "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
264                  "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
265                  "This parameter may be removed in a later update. ",
266                  cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
267                          ->implicit_value("true"))
268 
269                 ("I,iterations",
270                  "Number of iterations to run the network for, default is set to 1. "
271                  "If you wish to run the model with different input data for every execution you can do so by "
272                  "supplying more input file paths to the 'input-tensor-data' option. "
273                  "Note: The number of input files provided must be divisible by the number of inputs of the model. "
274                  "e.g. Your model has 2 inputs and you supply 4 input files. If you set 'iterations' to 6 the first "
275                  "run will consume the first two inputs, the second the next two and the last will begin from the "
276                  "start and use the first two inputs again. "
277                  "Note: If the 'concurrent' option is enabled all iterations will be run asynchronously.",
278                  cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
279 
280                 ("l,dequantize-output",
281                  "If this option is enabled, all quantized outputs will be dequantized to float. "
282                  "If unset, default to not get dequantized. "
283                  "Accepted values (true or false)"
284                  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
285                  cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
286 
287                 ("p,print-intermediate-layers",
288                  "If this option is enabled, the output of every graph layer will be printed.",
289                  cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
290                          ->implicit_value("true"))
291 
292                 ("F,print-intermediate-layers-to-file",
293                  "If this option is enabled, the output of every graph layer will be printed within separate files.",
294                  cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediateOutputsToFile)->default_value("false")
295                          ->implicit_value("true"))
296 
297                 ("parse-unsupported",
298                  "Add unsupported operators as stand-in layers (where supported by parser)",
299                  cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
300 
301                 ("N,do-not-print-output",
302                  "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. "
303                  "This behaviour can be changed by adding this flag to your command.",
304                  cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true"))
305 
306                 ("q,quantize-input",
307                  "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
308                  "If unset, default to not quantized. Accepted values (true or false)"
309                  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
310                  cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
311 
312                 ("r,threshold-time",
313                  "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
314                  "inference time is greater than the threshold time, the test will fail. By default, no threshold "
315                  "time is used.",
316                  cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
317 
318                 ("s,input-tensor-shape",
319                  "The shape of the input tensors in the network as a flat array of integers separated by comma."
320                  "Several shapes can be passed by separating them with a colon (:).",
321                  cxxopts::value<std::string>())
322 
323                 ("v,visualize-optimized-model",
324                  "Enables built optimized model visualizer. If unset, defaults to off.",
325                  cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
326                          ->implicit_value("true"))
327 
328                 ("w,write-outputs-to-file",
329                  "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
330                  "If left empty (the default), the output tensors will not be written to a file.",
331                  cxxopts::value<std::string>())
332 
333                 ("x,subgraph-number",
334                  "Id of the subgraph to be executed. Defaults to 0."
335                  " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
336                  cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
337 
338                 ("y,input-type",
339                  "The type of the input tensors in the network separated by comma. "
340                  "If unset, defaults to \"float\" for all defined inputs. "
341                  "Accepted values (float, int, qasymms8 or qasymmu8)."
342                  "DEPRECATED: The program option 'input-type' is deprecated and will be "
343                  "removed soon. The input-types are now automatically set.",
344                  cxxopts::value<std::string>())
345 
346                 ("z,output-type",
347                  "The type of the output tensors in the network separated by comma. "
348                  "If unset, defaults to \"float\" for all defined outputs. "
349                  "Accepted values (float, int,  qasymms8 or qasymmu8)."
350                  "DEPRECATED: The program option 'output-type' is deprecated and will be "
351                  "removed soon. The output-types are now automatically set.",
352                  cxxopts::value<std::string>())
353 
354                 ("T,tflite-executor",
355                  "Set the executor for the tflite model: parser, delegate, tflite"
356                  "parser is the ArmNNTfLiteParser, "
357                  "delegate is the ArmNNTfLiteDelegate, "
358                  "tflite is the TfliteInterpreter",
359                  cxxopts::value<std::string>()->default_value("parser"))
360 
361                 ("C, compare-output",
362                  "Perform a per byte root mean square error calculation of the inference output with an output"
363                  " file that has been previously produced by running a network through ExecuteNetwork."
364                  " See --write-outputs-to-file to produce an output file for an execution.",
365                  cxxopts::value<std::string>(m_ExNetParams.m_ComparisonFile))
366 
367                 ("B, compare-output-with-backend",
368                  "Perform a per byte root mean square error calculation of the output of the inference with a"
369                  " different backend.",
370                  cxxopts::value<std::vector<std::string>>())
371 
372                 ("A, compare-with-tflite",
373                  "Perform an per byte root mean square error calculation of the output of the inference with"
374                  " the tflite ref model.",
375                  cxxopts::value<bool>(m_ExNetParams.m_CompareWithTflite)->default_value("false")
376                          ->implicit_value("true"));
377 
378         m_CxxOptions.add_options("c) Optimization")
379                 ("bf16-turbo-mode",
380                  "This option is no longer being used. In order to use bf16 please set enable-fast-math "
381                  "to true",
382                  cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
383                          ->default_value("false")->implicit_value("true"))
384 
385                 ("enable-fast-math",
386                  "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
387                  "performance improvements but may result in reduced or different precision. ",
388                  cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
389 
390                 ("number-of-threads",
391                  "Assign the number of threads used by the CpuAcc backend. "
392                  "Input value must be between 1 and 64. "
393                  "Default is set to 0 (Backend will decide number of threads to use).",
394                  cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
395 
396                 ("save-cached-network",
397                  "Enables saving of the cached network to a file given with the cached-network-filepath option. "
398                  "See also --cached-network-filepath",
399                  cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
400                          ->default_value("false")->implicit_value("true"))
401 
402                 ("cached-network-filepath",
403                  "If non-empty, the given file will be used to load/save the cached network. "
404                  "If save-cached-network is given then the cached network will be saved to the given file. "
405                  "To save the cached network a file must already exist. "
406                  "If save-cached-network is not given then the cached network will be loaded from the given file. "
407                  "This will remove initial compilation time of kernels and speed up the first execution.",
408                  cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
409 
410                 ("fp16-turbo-mode",
411                  "If this option is enabled, FP32 layers, "
412                  "weights and biases will be converted to FP16 where the backend supports it",
413                  cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
414                          ->default_value("false")->implicit_value("true"))
415 
416                 ("tuning-level",
417                  "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
418                  "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
419                  "Requires tuning-path to be set, default is set to 0 (No tuning run)",
420                  cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
421 
422                 ("tuning-path",
423                  "Path to tuning file. Enables use of CL tuning",
424                  cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
425 
426                 ("MLGOTuningFilePath",
427                  "Path to tuning file. Enables use of CL MLGO tuning",
428                  cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath))
429 
430                 ("R, reuse-buffers",
431                  "If enabled then the IO buffers will be reused for each inference",
432                  cxxopts::value<bool>(m_ExNetParams.m_ReuseBuffers)->default_value("false")->implicit_value("true"));
433 
434         m_CxxOptions.add_options("d) Profiling")
435                 ("a,enable-external-profiling",
436                  "If enabled external profiling will be switched on",
437                  cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
438                          ->default_value("false")->implicit_value("true"))
439 
440                 ("e,event-based-profiling",
441                  "Enables built in profiler. If unset, defaults to off.",
442                  cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
443 
444                 ("g,file-only-external-profiling",
445                  "If enabled then the 'file-only' test mode of external profiling will be enabled",
446                  cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
447                          ->default_value("false")->implicit_value("true"))
448 
449                 ("file-format",
450                  "If profiling is enabled specifies the output file format",
451                  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
452 
453                 ("j,outgoing-capture-file",
454                  "If specified the outgoing external profiling packets will be captured in this binary file",
455                  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
456 
457                 ("k,incoming-capture-file",
458                  "If specified the incoming external profiling packets will be captured in this binary file",
459                  cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
460 
461                 ("timeline-profiling",
462                  "If enabled timeline profiling will be switched on, requires external profiling",
463                  cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled)
464                          ->default_value("false")->implicit_value("true"))
465 
466                 ("u,counter-capture-period",
467                  "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
468                  cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"))
469 
470                 ("output-network-details",
471                  "Outputs layer tensor infos and descriptors to std out along with profiling events. Defaults to off.",
472                  cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsToStdOut)->default_value("false")
473                          ->implicit_value("true"))
474 
475                 ("output-network-details-only",
476                  "Outputs layer tensor infos and descriptors to std out without profiling events. Defaults to off.",
477                  cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsOnlyToStdOut)->default_value("false")
478                          ->implicit_value("true"))
479 
480                 ("import-inputs-if-aligned",
481                  "In & Out tensors will be imported per inference if the memory alignment allows. Defaults to false.",
482                  cxxopts::value<bool>(m_ExNetParams.m_ImportInputsIfAligned)->default_value("false")
483                          ->implicit_value("true"));
484     }
485     catch (const std::exception& e)
486     {
487         ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
488         ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
489         exit(EXIT_FAILURE);
490     }
491 }
492 
ProgramOptions(int ac,const char * av[])493 ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
494 {
495     ParseOptions(ac, av);
496 }
497 
ParseOptions(int ac,const char * av[])498 void ProgramOptions::ParseOptions(int ac, const char* av[])
499 {
500     // Parses the command-line.
501     m_CxxResult = m_CxxOptions.parse(ac, av);
502 
503     if (m_CxxResult.count("help") || ac <= 1)
504     {
505         std::cout << m_CxxOptions.help() << std::endl;
506         exit(EXIT_SUCCESS);
507     }
508 
509     CheckRequiredOptions(m_CxxResult);
510     CheckOptionDependencies(m_CxxResult);
511     CheckForDeprecatedOptions(m_CxxResult);
512 
513     if ((m_ExNetParams.m_OutputDetailsToStdOut ||
514          m_ExNetParams.m_OutputDetailsOnlyToStdOut) &&
515         !m_ExNetParams.m_EnableProfiling)
516     {
517         throw cxxopts::OptionParseException("You must enable profiling if you would like to output layer details");
518     }
519 
520     // Some options can't be assigned directly because they need some post-processing:
521     auto computeDevices = GetOptionValue<std::vector<std::string>>("compute", m_CxxResult);
522     m_ExNetParams.m_ComputeDevices = GetBackendIDs(computeDevices);
523     m_ExNetParams.m_InputNames =
524             ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
525     m_ExNetParams.m_InputTensorDataFilePaths =
526             ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
527     m_ExNetParams.m_OutputNames =
528             ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
529     m_ExNetParams.m_OutputTensorFiles =
530             ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
531     m_ExNetParams.m_GenerateTensorData = m_ExNetParams.m_InputTensorDataFilePaths.empty();
532     m_ExNetParams.m_DynamicBackendsPath = m_RuntimeOptions.m_DynamicBackendsPath;
533 
534     m_RuntimeOptions.m_EnableGpuProfiling = m_ExNetParams.m_EnableProfiling;
535 
536     std::string tfliteExecutor = GetOptionValue<std::string>("tflite-executor", m_CxxResult);
537 
538     if (tfliteExecutor.size() == 0 || tfliteExecutor == "parser")
539     {
540         m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser;
541     }
542     else if (tfliteExecutor == "delegate")
543     {
544         m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
545     }
546     else if (tfliteExecutor == "tflite")
547     {
548         m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter;
549     }
550     else
551     {
552         ARMNN_LOG(info) << fmt::format("Invalid tflite-executor option '{}'.", tfliteExecutor);
553         throw armnn::InvalidArgumentException ("Invalid tflite-executor option");
554     }
555 
556     // For backwards compatibility when deprecated options are used
557     if (m_ExNetParams.m_EnableDelegate)
558     {
559         m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
560     }
561 
562     // Set concurrent to true if the user expects to run inferences asynchronously
563     if (m_ExNetParams.m_Concurrent)
564     {
565         m_ExNetParams.m_ThreadPoolSize = 1;
566     }
567 
568     if (m_ExNetParams.m_ThreadPoolSize > 0)
569     {
570         m_ExNetParams.m_Concurrent = true;
571     }
572 
573     // Parse input tensor shape from the string we got from the command-line.
574     std::vector<std::string> inputTensorShapesVector =
575             ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
576 
577     if (!inputTensorShapesVector.empty())
578     {
579         m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
580 
581         for(const std::string& shape : inputTensorShapesVector)
582         {
583             std::stringstream ss(shape);
584             std::vector<unsigned int> dims = ParseArray(ss);
585 
586             m_ExNetParams.m_InputTensorShapes.push_back(
587                     armnn::TensorShape{static_cast<unsigned int>(dims.size()), dims.data()});
588         }
589     }
590 
591     // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
592     ValidateExecuteNetworkParams();
593 
594     // Parse CL tuning parameters to runtime options
595     if (!m_ExNetParams.m_TuningPath.empty())
596     {
597         m_RuntimeOptions.m_BackendOptions.emplace_back(
598             armnn::BackendOptions
599             {
600                 "GpuAcc",
601                 {
602                     {"TuningLevel", m_ExNetParams.m_TuningLevel},
603                     {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
604                     {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
605                     {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
606                 }
607             }
608         );
609     }
610 
611     ValidateRuntimeOptions();
612 
613     auto comparisonComputDevices = GetOptionValue<std::vector<std::string>>("compare-output-with-backend", m_CxxResult);
614 
615     if (!comparisonComputDevices.empty())
616     {
617         m_ExNetParams.m_ComparisonComputeDevices = GetBackendIDs(comparisonComputDevices);
618     }
619 }
620 
621