1 /*
2 * Copyright (c) 2021-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #pragma once
26
27 #include "src/core/NEON/kernels/arm_conv/addressing.hpp"
28 #include "depthwise_strategies_common.hpp"
29 #include "working_space.hpp"
30
31 #ifdef CYCLE_PROFILING
32 #include "profiler.hpp"
33 #endif
34
35 #include <limits>
36
37 namespace arm_conv {
38 namespace depthwise {
39
40 template <typename TInput, typename TWeight, typename TOutput, typename TAccum,
41 typename OutputStage>
42 class DepthwiseDepthfirstStrategyCommon
43 : public DepthfirstStrategy<TInput, TWeight, TOutput, TAccum, OutputStage>
44 {
45 protected:
46 unsigned int m_output_rows, m_output_cols;
47 unsigned int m_kernel_rows, m_kernel_cols;
48 unsigned int m_stride_rows, m_stride_cols;
49
50 public:
DepthwiseDepthfirstStrategyCommon(unsigned int output_rows,unsigned int output_cols,unsigned int kernel_rows,unsigned int kernel_cols,unsigned int stride_rows=1,unsigned int stride_cols=1)51 DepthwiseDepthfirstStrategyCommon(
52 unsigned int output_rows, unsigned int output_cols,
53 unsigned int kernel_rows, unsigned int kernel_cols,
54 unsigned int stride_rows=1, unsigned int stride_cols=1
55 ) : m_output_rows(output_rows), m_output_cols(output_cols),
56 m_kernel_rows(kernel_rows), m_kernel_cols(kernel_cols),
57 m_stride_rows(stride_rows), m_stride_cols(stride_cols)
58 {
59 }
60
DepthwiseDepthfirstStrategyCommon(unsigned int output_size,unsigned int kernel_size,unsigned int stride=1)61 DepthwiseDepthfirstStrategyCommon(unsigned int output_size, unsigned int kernel_size, unsigned int stride=1)
62 : DepthwiseDepthfirstStrategyCommon(output_size, output_size, kernel_size, kernel_size, stride, stride)
63 {
64 }
65
~DepthwiseDepthfirstStrategyCommon()66 virtual ~DepthwiseDepthfirstStrategyCommon() {}
67
get_output_rows() const68 unsigned int get_output_rows() const override { return m_output_rows; }
get_output_cols() const69 unsigned int get_output_cols() const override { return m_output_cols; }
70
get_kernel_rows() const71 unsigned int get_kernel_rows() const override { return m_kernel_rows; }
get_kernel_cols() const72 unsigned int get_kernel_cols() const override { return m_kernel_cols; }
73
get_stride_rows() const74 unsigned int get_stride_rows() const override { return m_stride_rows; }
get_stride_cols() const75 unsigned int get_stride_cols() const override { return m_stride_cols; }
76 };
77
78 template <typename TInput, typename TWeight, typename TOutput, typename TAccum, typename OutputStage=typename DefaultOutputStage<TOutput>::Type>
79 class DepthwiseDepthfirstStrategy : public DepthwiseDepthfirstStrategyCommon<TInput, TWeight, TOutput, TAccum, OutputStage>
80 {
81 using Parent = DepthwiseDepthfirstStrategyCommon<TInput, TWeight, TOutput, TAccum, OutputStage>;
82
83 public:
84 using Parent::Parent;
85
86 typedef void (*IndirectKernelType)(
87 const TInput *const *input_ptrs,
88 TOutput *const *output_ptrs,
89 const void *params,
90 unsigned int n_channels,
91 const TAccum activation_min,
92 const TAccum activation_max
93 );
94 virtual IndirectKernelType get_indirect_kernel(void) const = 0;
95
96 typedef void (*DirectKernelType)(
97 const unsigned int n_tile_rows, const unsigned int n_tile_cols,
98 const TInput *inptr_base, int64_t ld_input_row, int64_t ld_input_col,
99 TOutput *outptr_base, int64_t ld_output_row, int64_t ld_output_col,
100 const void *params, unsigned int n_channels,
101 const TAccum activation_min,
102 const TAccum activation_max
103 );
104 virtual DirectKernelType get_direct_kernel(void) const = 0;
105 };
106
107 template <typename TInput, typename TWeight, typename TOutput>
108 class DepthwiseDepthfirstStrategy<TInput, TWeight, TOutput, int32_t>
109 : public DepthwiseDepthfirstStrategyCommon<TInput, TWeight, TOutput, int32_t, arm_gemm::Requantize32>
110 {
111 using Parent = DepthwiseDepthfirstStrategyCommon<TInput, TWeight, TOutput, int32_t, arm_gemm::Requantize32>;
112
113 protected:
get_packing_args(void) const114 interleaves::PackingArguments get_packing_args(void) const
115 {
116 return interleaves::PackingArguments(
117 this->get_kernel_rows(), this->get_kernel_cols(), sizeof(TWeight),
118 false, sizeof(int32_t), // Don't pack the bias
119 this->get_vl_type(), sizeof(int32_t), this->get_accumulator_depth_vl(),
120 [this] (unsigned int idx, unsigned int &x, unsigned int &y) -> bool
121 { return this->get_kernel_packing_point(idx, x, y); }
122 );
123 }
124
125 public:
126 using Parent::Parent;
127
128 typedef void (*KernelType)(
129 unsigned int, // n_channels,
130 const TInput *const *, // inptrs
131 const TWeight *, // weights
132 const int32_t *, // bias,
133 const arm_gemm::Requantize32 &,
134 const int32_t *, const int32_t *, // requant_muls and requant_shifts
135 TOutput *const * // outptrs
136 );
137 virtual KernelType get_kernel() const = 0;
138
get_storage_size(const DepthwiseArgs & args) const139 size_t get_storage_size(const DepthwiseArgs &args) const override
140 {
141 return interleaves::get_storage_size_generic(get_packing_args(), args);
142 }
143
pack_parameters(const DepthwiseArgs & args,void * buffer,const void * biases,const arm_gemm::Requantize32 &,const void * weights,size_t ld_weight_col,size_t ld_weight_row) const144 void pack_parameters(
145 const DepthwiseArgs &args, void *buffer,
146 const void *biases, const arm_gemm::Requantize32 &,
147 const void *weights, size_t ld_weight_col, size_t ld_weight_row
148 ) const override
149 {
150 interleaves::pack_parameters_generic(
151 get_packing_args(), args, buffer, biases, weights, ld_weight_col, ld_weight_row);
152 }
153 };
154
155 template <typename TInput, typename TWeight, typename TOutput, typename TAccum, typename OutputStage>
156 class DepthwiseDepthfirstCommon : public DepthfirstDriver<TInput, TWeight, TOutput>
157 {
158 using StratType = DepthwiseDepthfirstStrategyCommon<TInput, TWeight, TOutput, TAccum, OutputStage>;
159 OutputStage m_os;
160
161 protected:
get_output_stage(void)162 inline OutputStage &get_output_stage(void) { return m_os; }
get_output_stage(void) const163 inline const OutputStage &get_output_stage(void) const { return m_os; }
164
165 public:
DepthwiseDepthfirstCommon(StratType * const strat,const DepthwiseArgs & args,const OutputStage & os)166 DepthwiseDepthfirstCommon(StratType *const strat, const DepthwiseArgs &args, const OutputStage &os)
167 : DepthfirstDriver<TInput, TWeight, TOutput>(strat, args), m_os(os)
168 {
169 }
170
171 DepthwiseDepthfirstCommon(DepthwiseDepthfirstCommon &) = delete;
172 DepthwiseDepthfirstCommon &operator=(DepthwiseDepthfirstCommon &) = delete;
173
get_storage_size(void) const174 size_t get_storage_size(void) const override
175 {
176 return reinterpret_cast<const StratType *>(this->m_strat.get())->
177 get_storage_size(this->m_args);
178 }
179
pack_parameters(void * buffer,const void * biases,const void * weights,size_t ld_weight_col,size_t ld_weight_row)180 void pack_parameters(void *buffer, const void *biases, const void *weights, size_t ld_weight_col, size_t ld_weight_row) override
181 {
182 reinterpret_cast<const StratType *>(this->m_strat.get())->
183 pack_parameters(this->m_args, buffer, biases, m_os, weights, ld_weight_col, ld_weight_row);
184 }
185 };
186
187 namespace depthwise_depthfirst {
188
189 /* Workspace Element for an array of input pointers as consumed by the
190 * specialised depthwise kernels.
191 */
192 template <typename T>
193 class InputArrayElement
194 {
195 public:
196 struct Workspace
197 {
198 const T **inptr_array;
199 };
200
201 template <class OutputStage>
get_element_size(const WorkspaceArgs<IDepthfirstStrategy,OutputStage> & args)202 static size_t get_element_size(const WorkspaceArgs<IDepthfirstStrategy, OutputStage> &args)
203 {
204 return sizeof(T **) * args.strategy->get_input_rows() * args.strategy->get_input_cols();
205 }
206
207 template <class WorkspaceType, class OutputStage>
initialise(WorkspaceType * ws,void * buffer,const WorkspaceArgs<IDepthfirstStrategy,OutputStage> & args)208 static void *initialise(WorkspaceType *ws, void *buffer, const WorkspaceArgs<IDepthfirstStrategy, OutputStage> &args)
209 {
210 ws->inptr_array = reinterpret_cast<const T**>(buffer);
211 return reinterpret_cast<char *>(buffer) + get_element_size(args);
212 }
213 };
214
215 template <typename TAccum, typename OutputStage, bool IsDot=false>
216 struct WorkspaceFinalElement
217 {
218 using Element = ActivationsElement<TAccum, OutputStage>;
219 };
220
221 template <>
222 struct WorkspaceFinalElement<int32_t, arm_gemm::Requantize32, false>
223 {
224 using Element = RequantizationParametersElement;
225 };
226
227 template <typename TInput, typename TWeight, typename TOutput, typename TAccum, typename OutputStage>
228 struct Invoke
229 {
230 constexpr static bool supports_direct_kernel = true;
231
232 template <typename Strat, typename Workspace>
indirectarm_conv::depthwise::depthwise_depthfirst::Invoke233 static inline void indirect(const Strat *strat, const Workspace *ws, const OutputStage &, const void *params, const TAccum *, unsigned int n_channels)
234 {
235 strat->get_indirect_kernel()(
236 ws->inptr_array,
237 ws->outptr_array,
238 params, n_channels,
239 ws->activation_min, ws->activation_max
240 );
241 }
242
243 template <typename Strat, typename Workspace>
directarm_conv::depthwise::depthwise_depthfirst::Invoke244 static void direct(
245 const Strat *strat, const Workspace *ws, const OutputStage &,
246 unsigned int n_tile_rows, unsigned int n_tile_cols,
247 const TInput *inptr, size_t ld_in_row, size_t ld_in_col,
248 TOutput *outptr, size_t ld_out_row, size_t ld_out_col,
249 const void *params, unsigned int n_channels
250 )
251 {
252 strat->get_direct_kernel()(
253 n_tile_rows, n_tile_cols,
254 inptr, ld_in_row, ld_in_col,
255 outptr, ld_out_row, ld_out_col,
256 params, n_channels, ws->activation_min, ws->activation_max
257 );
258 }
259 };
260
261 template <typename TInput, typename TWeight, typename TOutput, typename TAccum>
262 struct Invoke<TInput, TWeight, TOutput, TAccum, arm_gemm::Requantize32>
263 {
264 constexpr static bool supports_direct_kernel = false;
265
266 template <typename Strat, typename Workspace>
indirectarm_conv::depthwise::depthwise_depthfirst::Invoke267 static inline void indirect(const Strat *strat, const Workspace *ws, const arm_gemm::Requantize32 &qp, const void *params, const TAccum *, unsigned int n_channels)
268 {
269 strat->get_kernel()(
270 n_channels, ws->inptr_array,
271 reinterpret_cast<const TWeight *>(params), ws->bias,
272 qp, ws->requant_muls, ws->requant_shifts,
273 ws->outptr_array
274 );
275 }
276
277 template <typename Strat, typename Workspace>
directarm_conv::depthwise::depthwise_depthfirst::Invoke278 static inline void direct(
279 const Strat *, const Workspace *, const arm_gemm::Requantize32 &,
280 unsigned int, unsigned int, // n_tile_rows, n_tile_cols
281 const TInput *, size_t, size_t, // Input pointer, row stride, column stride
282 TOutput *, size_t, size_t, // Output pointer, row stride, column stride
283 const void *, unsigned int // Parameters, number of channels
284 )
285 {
286 // Do nothing - this should never be reached because entry to it is guarded
287 // by an `if` on a `constexpr static bool`.
288 }
289 };
290
291 namespace
292 {
293
294 template <typename OutputStage>
stash_bias(OutputStage &,const void *)295 inline void stash_bias(OutputStage &, const void *) {}
296
297 template <>
298 inline void stash_bias(arm_gemm::Requantize32 &qp, const void *bias) __attribute__ ((unused));
299
300 template <>
stash_bias(arm_gemm::Requantize32 & qp,const void * bias)301 inline void stash_bias(arm_gemm::Requantize32 &qp, const void *bias)
302 {
303 qp.bias = reinterpret_cast<const int32_t *>(bias);
304 }
305
306 }
307
308 } // namespace depthwise_depthfirst
309
310 template <typename TInput,
311 typename TWeight=TInput,
312 typename TOutput=TInput,
313 typename TAccum=typename DefaultTAccum<TInput>::Type,
314 typename OutputStage=typename DefaultOutputStage<TOutput>::Type>
315 class DepthwiseDepthfirst
316 : public DepthwiseDepthfirstCommon<TInput, TWeight, TOutput, TAccum, OutputStage>
317 {
318 using StratType = DepthwiseDepthfirstStrategy<TInput, TWeight, TOutput, TAccum>;
319 using Parent = DepthwiseDepthfirstCommon<TInput, TWeight, TOutput, TAccum, OutputStage>;
320 using WorkspaceManager = Workspace<
321 OutputArrayElement<TOutput>,
322 depthwise_depthfirst::InputArrayElement<TInput>,
323 InputBufferElement<TInput>,
324 typename depthwise_depthfirst::WorkspaceFinalElement<TAccum, OutputStage>::Element
325 >;
326 using WorkingSpace = typename WorkspaceManager::WorkspaceType;
327
328 // We keep a copy of the bias and output stage
329 const TAccum *m_bias;
330
331 public:
DepthwiseDepthfirst(StratType * const strat,const DepthwiseArgs & args,const OutputStage & os={})332 DepthwiseDepthfirst(StratType *const strat, const DepthwiseArgs &args, const OutputStage &os = {})
333 : Parent(strat, args, os), m_bias(nullptr)
334 {
335 }
336
337 DepthwiseDepthfirst(DepthwiseDepthfirst &) = delete;
338 DepthwiseDepthfirst &operator=(DepthwiseDepthfirst &) = delete;
339
pack_parameters(void * buffer,const void * biases,const void * weights,size_t ld_weight_col,size_t ld_weight_row)340 void pack_parameters(void *buffer, const void *biases, const void *weights, size_t ld_weight_col, size_t ld_weight_row) override
341 {
342 reinterpret_cast<const StratType *>(this->m_strat.get())->pack_parameters(
343 this->m_args, buffer, biases, this->get_output_stage(),
344 weights, ld_weight_col, ld_weight_row
345 );
346 m_bias = reinterpret_cast<const TAccum *>(biases);
347 depthwise_depthfirst::stash_bias(this->get_output_stage(), biases);
348 }
349
get_working_size_per_thread(const unsigned int n_input_channels) const350 size_t get_working_size_per_thread(const unsigned int n_input_channels) const override
351 {
352 DepthwiseArgs args(this->m_args);
353 args.input_channels = n_input_channels;
354 return WorkspaceManager::get_sizeof_workspace(
355 WorkspaceArgs<IDepthfirstStrategy, OutputStage>(this->m_strat.get(), args, this->get_output_stage())
356 );
357 }
358
initialise_working_space(void * buffer,unsigned int n_input_channels) const359 void initialise_working_space(void *buffer, unsigned int n_input_channels) const override
360 {
361 DepthwiseArgs args(this->m_args);
362 args.input_channels = n_input_channels;
363 WorkspaceManager::initialise(
364 buffer, WorkspaceArgs<IDepthfirstStrategy, OutputStage>(this->m_strat.get(), args, this->get_output_stage())
365 );
366 }
367
368 protected:
compute_tile_padded(unsigned int output_i,unsigned int output_j,unsigned int output_channel_start,unsigned int output_channel_end,const TensorSpec<const TInput * > & input,const TensorSpec<TOutput * > & output,const void * parameters,void * working_space_raw) const369 void compute_tile_padded(
370 unsigned int output_i, unsigned int output_j,
371 unsigned int output_channel_start, unsigned int output_channel_end,
372 const TensorSpec<const TInput *> &input,
373 const TensorSpec<TOutput *> &output,
374 const void *parameters,
375 void *working_space_raw
376 ) const override
377 {
378 // Get the working space
379 auto ws = reinterpret_cast<WorkingSpace *>(working_space_raw);
380
381 // Compute the input pointer array
382 const auto input_channel_start = output_channel_start / this->m_args.channel_multiplier;
383
384 const int ii = static_cast<int>(output_i * this->m_args.stride_rows) - this->m_args.padding.top;
385 const auto input_pad_top = static_cast<unsigned int>(ii < 0 ? -ii : 0);
386 const auto input_i = static_cast<unsigned int>(ii < 0 ? 0 : ii);
387
388 const int ij = static_cast<int>(output_j * this->m_args.stride_cols) - this->m_args.padding.left;
389 const auto input_pad_left = static_cast<unsigned int>(ij < 0 ? -ij : 0);
390 const auto input_j = static_cast<unsigned int>(ij < 0 ? 0 : ij);
391
392 fill_pointer_array<const TInput>(
393 ws->inptr_array, this->m_strat->get_input_rows(), this->m_strat->get_input_cols(),
394 input.base + input_i*input.ld_row + input_j*input.ld_col + input_channel_start,
395 input.ld_row, input.ld_col,
396 ws->input_buffer,
397 input_pad_top, this->m_args.input_rows - input_i,
398 input_pad_left, this->m_args.input_cols - input_j
399 );
400
401 // Compute the output pointer array
402 fill_pointer_array(
403 ws->outptr_array, this->m_strat->get_output_rows(), this->m_strat->get_output_cols(),
404 output.base + output_i*output.ld_row + output_j*output.ld_col + output_channel_start,
405 output.ld_row, output.ld_col,
406 ws->output_buffer,
407 0, this->m_args.output_rows - output_i, // Top padding, # valid rows
408 0, this->m_args.output_cols - output_j // Left padding, # valid columns
409 );
410
411 // Execute the kernel
412 depthwise_depthfirst::Invoke<TInput, TWeight, TOutput, TAccum, OutputStage>::indirect(
413 reinterpret_cast<const StratType *>(this->m_strat.get()),
414 ws, this->get_output_stage(), parameters, m_bias, output_channel_end - output_channel_start
415 );
416 }
417
compute_row_padded_tile_row(const unsigned int output_i,unsigned int output_j,unsigned int n_tile_cols,const unsigned int output_channel_start,const unsigned int output_channel_end,const TensorSpec<const TInput * > & input,const TensorSpec<TOutput * > & output,const void * parameters,void * working_space) const418 void compute_row_padded_tile_row(
419 const unsigned int output_i, unsigned int output_j, unsigned int n_tile_cols,
420 const unsigned int output_channel_start, const unsigned int output_channel_end,
421 const TensorSpec<const TInput *> &input,
422 const TensorSpec<TOutput *> &output,
423 const void *parameters,
424 void *working_space
425 ) const override
426 {
427 using Invoker = depthwise_depthfirst::Invoke<TInput, TWeight, TOutput, TAccum, OutputStage>;
428 auto ws = reinterpret_cast<WorkingSpace *>(working_space);
429 const auto strat = reinterpret_cast<const StratType *>(this->m_strat.get());
430 const auto os = this->get_output_stage();
431
432 // Compute top and bottom padding; hence fill in the initial pointer arrays.
433 const auto input_channel_start = output_channel_start / this->m_args.channel_multiplier;
434 const int ii = static_cast<int>(output_i * this->m_args.stride_rows) - this->m_args.padding.top;
435 const auto input_pad_top = static_cast<unsigned int>(ii < 0 ? -ii : 0);
436
437 const auto input_i = static_cast<unsigned int>(ii < 0 ? 0 : ii);
438 const auto input_j = output_j * this->m_args.stride_cols - this->m_args.padding.left;
439
440 // Valid input rows is the smallest of the input rows that aren't padding for this tile, and the number of rows
441 // available.
442 const auto valid_input_rows = std::min(strat->get_input_rows() - input_pad_top, this->m_args.input_rows - input_i);
443 const auto valid_output_rows = std::min(strat->get_output_rows(), this->m_args.output_rows - output_i);
444
445 const auto input_point_stride = input.ld_col * this->m_strat->get_output_cols() * this->m_args.stride_cols;
446 const auto output_point_stride = output.ld_col * this->m_strat->get_output_cols();
447
448 fill_pointer_array<const TInput>(
449 ws->inptr_array, this->m_strat->get_input_rows(), this->m_strat->get_input_cols(),
450 input.base + input_i*input.ld_row + input_j*input.ld_col + input_channel_start,
451 input.ld_row, input.ld_col,
452 ws->input_buffer,
453 input_pad_top, this->m_args.input_rows - input_i,
454 0, this->m_args.input_cols - input_j // No left padding
455 );
456
457 fill_pointer_array(
458 ws->outptr_array, this->m_strat->get_output_rows(), this->m_strat->get_output_cols(),
459 output.base + output_i*output.ld_row + output_j*output.ld_col + output_channel_start,
460 output.ld_row, output.ld_col,
461 ws->output_buffer,
462 0, this->m_args.output_rows - output_i, // Top padding, # valid rows
463 0, this->m_args.output_cols - output_j // Left padding, # valid columns
464 );
465
466 for (; n_tile_cols; n_tile_cols--)
467 {
468 // Execute the kernel
469 Invoker::indirect(
470 strat, ws, os, parameters, m_bias, output_channel_end - output_channel_start
471 );
472
473 // Update all unpadded pointers
474 {
475 auto ptr = ws->inptr_array + strat->get_input_cols() * input_pad_top;
476 for (auto n = input_pad_top; n < (valid_input_rows + input_pad_top); n++)
477 {
478 for (auto m = 0u; m < strat->get_input_cols(); m++)
479 {
480 *(ptr++) += input_point_stride;
481 }
482 }
483 }
484 {
485 auto ptr = ws->outptr_array;
486 for (auto n = 0u; n < valid_output_rows * strat->get_output_cols(); n++)
487 {
488 *(ptr++) += output_point_stride;
489 }
490 }
491 }
492 }
493
compute_tiles_unpadded(unsigned int output_i,const unsigned int output_j,unsigned int n_tile_rows,unsigned int n_tile_cols,unsigned int output_channel_start,unsigned int output_channel_end,const TensorSpec<const TInput * > & input,const TensorSpec<TOutput * > & output,const void * parameters,void * working_space_raw) const494 void compute_tiles_unpadded(
495 unsigned int output_i, const unsigned int output_j,
496 unsigned int n_tile_rows, unsigned int n_tile_cols,
497 unsigned int output_channel_start, unsigned int output_channel_end,
498 const TensorSpec<const TInput *> &input,
499 const TensorSpec<TOutput *> &output,
500 const void *parameters,
501 void *working_space_raw
502 ) const override
503 {
504 using Invoker = depthwise_depthfirst::Invoke<TInput, TWeight, TOutput, TAccum, OutputStage>;
505 auto ws = reinterpret_cast<WorkingSpace *>(working_space_raw);
506 const auto strat = reinterpret_cast<const StratType *>(this->m_strat.get());
507 const auto os = this->get_output_stage();
508
509 if (Invoker::supports_direct_kernel)
510 {
511 // If the direct kernel is supported, then use it.
512 // Compute the base pointers we'll use in the tile.
513 auto outptr = output.base + output_channel_start + output_i * output.ld_row + output_j * output.ld_col;
514 const int start_input_i = output_i * this->m_args.stride_rows - this->m_args.padding.top;
515 const int start_input_j = output_j * this->m_args.stride_cols - this->m_args.padding.left;
516 auto inptr = input.base + output_channel_start + start_input_i * input.ld_row + start_input_j * input.ld_col;
517
518 // Execute the kernel
519 Invoker::direct(
520 strat, ws, os,
521 n_tile_rows, n_tile_cols,
522 inptr, input.ld_row, input.ld_col,
523 outptr, output.ld_row, output.ld_col,
524 parameters, output_channel_end - output_channel_start
525 );
526 }
527 else
528 {
529 // Otherwise, we repeatedly call the padded kernel but use our knowledge
530 // of the tensor structure to avoid recomputing the pointer array.
531 const auto input_channel_start = output_channel_start / this->m_args.channel_multiplier;
532
533 const auto n_input_pointers = this->m_strat->get_input_rows() * this->m_strat->get_input_cols();
534 const auto input_point_stride = input.ld_col * this->m_strat->get_output_cols() * this->m_args.stride_cols;
535 const auto n_output_pointers = this->m_strat->get_output_rows() * this->m_strat->get_output_cols();
536 const auto output_point_stride = output.ld_col * this->m_strat->get_output_cols();
537
538 // For each tile row, initialise the input and output pointer arrays. For
539 // each subsequent tile we simply update the pointers.
540 for (unsigned int tile_i = 0; tile_i < n_tile_rows; tile_i++)
541 {
542 const int input_i = static_cast<int>(output_i * this->m_args.stride_rows) - this->m_args.padding.top;
543 const int input_j = static_cast<int>(output_j * this->m_args.stride_cols) - this->m_args.padding.left;
544
545 fill_pointer_array<const TInput>(
546 ws->inptr_array, this->m_strat->get_input_rows(), this->m_strat->get_input_cols(),
547 input.base + input_i*input.ld_row + input_j*input.ld_col + input_channel_start,
548 input.ld_row, input.ld_col,
549 ws->input_buffer,
550 0, this->m_args.input_rows,
551 0, this->m_args.input_cols
552 );
553
554 // Compute the output pointer array
555 fill_pointer_array(
556 ws->outptr_array, this->m_strat->get_output_rows(), this->m_strat->get_output_cols(),
557 output.base + output_i*output.ld_row + output_j*output.ld_col + output_channel_start,
558 output.ld_row, output.ld_col,
559 ws->output_buffer,
560 0, this->m_args.output_rows,
561 0, this->m_args.output_cols
562 );
563
564 for (unsigned int tile_j = 0; tile_j < n_tile_cols; tile_j++)
565 {
566 // Invoke the indirect kernel for this tile
567 depthwise_depthfirst::Invoke<TInput, TWeight, TOutput, TAccum, OutputStage>::indirect(
568 strat, ws, os, parameters, m_bias, output_channel_end - output_channel_start
569 );
570
571 // Progress the pointers
572 for (auto i = 0u; i < n_input_pointers; i++)
573 {
574 ws->inptr_array[i] += input_point_stride;
575 }
576 for (auto i = 0u; i < n_output_pointers; i++)
577 {
578 ws->outptr_array[i] += output_point_stride;
579 }
580 }
581
582 output_i += this->m_strat->get_output_rows();
583 }
584 }
585 }
586 };
587
588 } // namespace depthwise
589 } // namespace arm_conv
590