1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "RefNormalizationWorkload.hpp"
7
8 #include <armnn/Logging.hpp>
9 #include <armnn/Tensor.hpp>
10 #include <armnnUtils/DataLayoutIndexed.hpp>
11 #include <armnn/utility/NumericCast.hpp>
12
13 #include <Profiling.hpp>
14
15 #include "RefWorkloadUtils.hpp"
16 #include "Decoders.hpp"
17 #include "Encoders.hpp"
18
19 using namespace armnn;
20 using namespace armnnUtils;
21
22 namespace
23 {
24
25 // Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization.
NormalizeWithinUingLbr(Decoder<float> & inputData,Encoder<float> & outputData,const TensorShape & tensorShape,uint32_t norm_size,float alpha,float beta,float kappa)26 void NormalizeWithinUingLbr(Decoder<float>& inputData,
27 Encoder<float>& outputData,
28 const TensorShape& tensorShape,
29 uint32_t norm_size,
30 float alpha,
31 float beta,
32 float kappa)
33 {
34 const unsigned int batchSize = tensorShape[0];
35 const unsigned int depth = tensorShape[1];
36 const unsigned int rows = tensorShape[2];
37 const unsigned int cols = tensorShape[3];
38
39 int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
40
41 for (unsigned int n = 0; n < batchSize; n++)
42 {
43 for (unsigned int c = 0; c < depth; c++)
44 {
45 for (unsigned int h = 0; h < rows; h++)
46 {
47 for (unsigned int w = 0; w < cols; w++)
48 {
49 float accumulated_scale = 0.0;
50 for (int y = -radius; y <= radius; y++)
51 {
52 for (int x = -radius; x <= radius; x++)
53 {
54 int i = armnn::numeric_cast<int>(w) + x;
55 int j = armnn::numeric_cast<int>(h) + y;
56
57 if ((i < 0) || (i >= armnn::numeric_cast<int>(cols)))
58 {
59 continue;
60 }
61
62 if ((j < 0) || (j >= armnn::numeric_cast<int>(rows)))
63 {
64 continue;
65 }
66
67 unsigned int inputIndex = n * cols * rows * depth +
68 c * cols * rows +
69 armnn::numeric_cast<unsigned int>(j) * cols +
70 armnn::numeric_cast<unsigned int>(i);
71 inputData[inputIndex];
72 float inval = inputData.Get();
73
74 accumulated_scale += inval*inval;
75 }
76 }
77
78 unsigned int index = n * cols * rows * depth +
79 c * cols * rows +
80 h * cols +
81 w;
82 inputData[index];
83 outputData[index];
84 outputData.Set(inputData.Get() / (powf((kappa + (accumulated_scale * alpha)), beta)));
85 }
86 }
87 }
88 }
89 }
90
91 // Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization.
NormalizeAcrossUingLbr(Decoder<float> & inputData,Encoder<float> & outputData,const TensorShape & tensorShape,uint32_t norm_size,float alpha,float beta,float kappa,DataLayout dataLayout)92 void NormalizeAcrossUingLbr(Decoder<float>& inputData,
93 Encoder<float>& outputData,
94 const TensorShape& tensorShape,
95 uint32_t norm_size,
96 float alpha,
97 float beta,
98 float kappa,
99 DataLayout dataLayout)
100 {
101 DataLayoutIndexed dataLayoutIndexed(dataLayout);
102
103 const unsigned int batchSize = tensorShape[0];
104 const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()];
105 const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
106 const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
107
108 int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
109
110 for (unsigned int n = 0; n < batchSize; n++)
111 {
112 for (unsigned int c = 0; c < depth; c++)
113 {
114 for (unsigned int h = 0; h < rows; h++)
115 {
116 for (unsigned int w = 0; w < cols; w++)
117 {
118 float accumulated_scale = 0.0;
119 for (int z = -radius; z <= radius; z++)
120 {
121 int k = armnn::numeric_cast<int>(c) + z;
122
123 if ((k < 0) || (k >= armnn::numeric_cast<int>(depth)))
124 {
125 continue;
126 }
127
128 unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
129 n,
130 armnn::numeric_cast<unsigned int>(k),
131 h,
132 w);
133
134 inputData[inputIndex];
135 float inval = inputData.Get();
136
137 accumulated_scale += inval * inval;
138 }
139
140 float scale = kappa + (accumulated_scale * alpha);
141 scale = powf(scale, -beta);
142
143 unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w);
144
145 inputData[index];
146 outputData[index];
147 outputData.Set(scale * inputData.Get());
148 }
149 }
150 }
151 }
152 }
153
154 } // Anonymous namespace
155
156 namespace armnn
157 {
158
RefNormalizationWorkload(const NormalizationQueueDescriptor & descriptor,const WorkloadInfo & info)159 RefNormalizationWorkload::RefNormalizationWorkload(const NormalizationQueueDescriptor& descriptor,
160 const WorkloadInfo& info)
161 : RefBaseWorkload(descriptor, info)
162 {}
163
Execute() const164 void RefNormalizationWorkload::Execute() const
165 {
166 Execute(m_Data.m_Inputs, m_Data.m_Outputs);
167 }
168
ExecuteAsync(ExecutionData & executionData)169 void RefNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
170 {
171 WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
172 Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
173 }
174
Execute(std::vector<ITensorHandle * > inputs,std::vector<ITensorHandle * > outputs) const175 void RefNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
176 {
177 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute");
178
179 const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
180
181 auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
182 auto outputEncoder = MakeEncoder<float>(inputInfo, outputs[0]->Map());
183
184 if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType)
185 {
186 if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType)
187 {
188 NormalizeWithinUingLbr(*inputDecoder,
189 *outputEncoder,
190 inputInfo.GetShape(),
191 m_Data.m_Parameters.m_NormSize,
192 m_Data.m_Parameters.m_Alpha,
193 m_Data.m_Parameters.m_Beta,
194 m_Data.m_Parameters.m_K);
195 }
196 else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType)
197 {
198 NormalizeAcrossUingLbr(*inputDecoder,
199 *outputEncoder,
200 inputInfo.GetShape(),
201 m_Data.m_Parameters.m_NormSize,
202 m_Data.m_Parameters.m_Alpha,
203 m_Data.m_Parameters.m_Beta,
204 m_Data.m_Parameters.m_K,
205 m_Data.m_Parameters.m_DataLayout);
206 }
207 else
208 {
209 ARMNN_LOG(warning) << "Illegal NORMALIZATION mode in normalization_f32";
210 return;
211 }
212 }
213 else
214 {
215 ARMNN_LOG(warning) << "Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet.";
216 return;
217 }
218 }
219
220 } // namespace armnn
221