xref: /aosp_15_r20/external/armnn/tests/ImageTensorGenerator/ImageTensorGenerator.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "../InferenceTestImage.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 
10 #include <armnnUtils/TContainer.hpp>
11 #include <armnnUtils/Permute.hpp>
12 
13 #include <algorithm>
14 #include <fstream>
15 #include <iterator>
16 #include <string>
17 
18 // Parameters used in normalizing images
19 struct NormalizationParameters
20 {
21     float scale{ 1.0 };
22     std::array<float, 3> mean{ { 0.0, 0.0, 0.0 } };
23     std::array<float, 3> stddev{ { 1.0, 1.0, 1.0 } };
24 };
25 
26 enum class SupportedFrontend
27 {
28     TFLite     = 0,
29 };
30 
31 /** Get normalization parameters.
32  * Note that different flavours of models and different model data types have different normalization methods.
33  * This tool currently only supports TF and TFLite models
34  *
35  * @param[in] modelFormat   One of the supported frontends
36  * @param[in] outputType    Output type of the image tensor, also the type of the intended model
37  */
GetNormalizationParameters(const SupportedFrontend & modelFormat,const armnn::DataType & outputType)38 NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
39                                                    const armnn::DataType& outputType)
40 {
41     NormalizationParameters normParams;
42     // Explicitly set default parameters
43     normParams.scale  = 1.0;
44     normParams.mean   = { 0.0, 0.0, 0.0 };
45     normParams.stddev = { 1.0, 1.0, 1.0 };
46     switch (modelFormat)
47     {
48         case SupportedFrontend::TFLite:
49         default:
50             switch (outputType)
51             {
52                 case armnn::DataType::Float32:
53                     normParams.scale = 127.5;
54                     normParams.mean  = { 1.0, 1.0, 1.0 };
55                     break;
56                 case armnn::DataType::Signed32:
57                     normParams.mean = { 128.0, 128.0, 128.0 };
58                     break;
59                 case armnn::DataType::QAsymmU8:
60                     break;
61                 case armnn::DataType::QAsymmS8:
62                     normParams.mean = { 128.0, 128.0, 128.0 };
63                     break;
64                 default:
65                     break;
66             }
67             break;
68     }
69     return normParams;
70 }
71 
72 /** Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
73  *
74  * @param[in] imagePath     Path to the image file
75  * @param[in] newWidth      The new width of the output image tensor
76  * @param[in] newHeight     The new height of the output image tensor
77  * @param[in] normParams    Normalization parameters for the normalization of the image
78  * @param[in] batchSize     Batch size
79  * @param[in] outputLayout  Data layout of the output image tensor
80  */
81 template <typename ElemType>
82 std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
83                                          unsigned int newWidth,
84                                          unsigned int newHeight,
85                                          const NormalizationParameters& normParams,
86                                          unsigned int batchSize                = 1,
87                                          const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
88 
89 // Prepare float32 image tensor
90 template <>
PrepareImageTensor(const std::string & imagePath,unsigned int newWidth,unsigned int newHeight,const NormalizationParameters & normParams,unsigned int batchSize,const armnn::DataLayout & outputLayout)91 std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
92                                              unsigned int newWidth,
93                                              unsigned int newHeight,
94                                              const NormalizationParameters& normParams,
95                                              unsigned int batchSize,
96                                              const armnn::DataLayout& outputLayout)
97 {
98     // Generate image tensor
99     std::vector<float> imageData;
100     InferenceTestImage testImage(imagePath.c_str());
101     if (newWidth == 0)
102     {
103         newWidth = testImage.GetWidth();
104     }
105     if (newHeight == 0)
106     {
107         newHeight = testImage.GetHeight();
108     }
109     // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
110     // as 0 Centre/Normalise the image.
111     imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
112                                  InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
113                                  normParams.stddev, normParams.scale);
114     if (outputLayout == armnn::DataLayout::NCHW)
115     {
116         // Convert to NCHW format
117         const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
118         armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
119         std::vector<float> tempImage(imageData.size());
120         armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
121         imageData.swap(tempImage);
122     }
123     return imageData;
124 }
125 
126 // Prepare int32 image tensor
127 template <>
PrepareImageTensor(const std::string & imagePath,unsigned int newWidth,unsigned int newHeight,const NormalizationParameters & normParams,unsigned int batchSize,const armnn::DataLayout & outputLayout)128 std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
129                                          unsigned int newWidth,
130                                          unsigned int newHeight,
131                                          const NormalizationParameters& normParams,
132                                          unsigned int batchSize,
133                                          const armnn::DataLayout& outputLayout)
134 {
135     // Get float32 image tensor
136     std::vector<float> imageDataFloat =
137         PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
138     // Convert to int32 image tensor with static cast
139     std::vector<int> imageDataInt;
140     imageDataInt.reserve(imageDataFloat.size());
141     std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
142                    [](float val) { return static_cast<int>(val); });
143     return imageDataInt;
144 }
145 
146 // Prepare qasymmu8 image tensor
147 template <>
PrepareImageTensor(const std::string & imagePath,unsigned int newWidth,unsigned int newHeight,const NormalizationParameters & normParams,unsigned int batchSize,const armnn::DataLayout & outputLayout)148 std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
149                                                  unsigned int newWidth,
150                                                  unsigned int newHeight,
151                                                  const NormalizationParameters& normParams,
152                                                  unsigned int batchSize,
153                                                  const armnn::DataLayout& outputLayout)
154 {
155     // Get float32 image tensor
156     std::vector<float> imageDataFloat =
157         PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
158     std::vector<uint8_t> imageDataQasymm8;
159     imageDataQasymm8.reserve(imageDataFloat.size());
160     // Convert to uint8 image tensor with static cast
161     std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
162                    [](float val) { return static_cast<uint8_t>(val); });
163     return imageDataQasymm8;
164 }
165 
166 // Prepare qasymms8 image tensor
167 template <>
PrepareImageTensor(const std::string & imagePath,unsigned int newWidth,unsigned int newHeight,const NormalizationParameters & normParams,unsigned int batchSize,const armnn::DataLayout & outputLayout)168 std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
169                                                unsigned int newWidth,
170                                                unsigned int newHeight,
171                                                const NormalizationParameters& normParams,
172                                                unsigned int batchSize,
173                                                const armnn::DataLayout& outputLayout)
174 {
175     // Get float32 image tensor
176     std::vector<float> imageDataFloat =
177             PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
178     std::vector<int8_t> imageDataQasymms8;
179     imageDataQasymms8.reserve(imageDataFloat.size());
180     // Convert to uint8 image tensor with static cast
181     std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
182                    [](float val) { return static_cast<uint8_t>(val); });
183     return imageDataQasymms8;
184 }
185 
186 /** Write image tensor to ofstream
187  *
188  * @param[in] imageData         Image tensor data
189  * @param[in] imageTensorFile   Output filestream (ofstream) to which the image tensor data is written
190  */
191 template <typename ElemType>
WriteImageTensorImpl(const std::vector<ElemType> & imageData,std::ofstream & imageTensorFile)192 void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
193 {
194     std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
195 }
196 
197 // For uint8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
198 // numerical values
199 template <>
WriteImageTensorImpl(const std::vector<uint8_t> & imageData,std::ofstream & imageTensorFile)200 void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::ofstream& imageTensorFile)
201 {
202     std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
203 }
204 
205 // For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
206 // numerical values
207 template <>
WriteImageTensorImpl(const std::vector<int8_t> & imageData,std::ofstream & imageTensorFile)208 void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
209 {
210     std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
211 }