xref: /aosp_15_r20/external/ComputeLibrary/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1/*
2 * Copyright (c) 2021-2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "activation_float_helpers.h"
26#include "helpers.h"
27#include "tile_helpers.h"
28// *INDENT-OFF*
29// clang-format off
30#if defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
31//! @cond Doxygen_Suppress
32/** OpenCL kernel to compute the depthwise convolution for floating-point data types (F32/F16)
33 *
34 * @note Data layout supported: NHWC
35 * @note Data type supported: F32/F16
36 * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
37 * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
38 * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
39 * @note The convolution dilations must be passed at compile time using -DDILATION_X and -DDILATION_Y (e.g. -DDILATION_X=2, -DDILATION_Y=2)
40 * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
41 * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
42 * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
43 * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
44 * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
45 * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
46 * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
47 * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
48 * @note The number of M0 rows (width) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
49 * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
50 * @note The size of the partial store block in the first dimension must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
51 * @note Only the following configurations of M0 and N0 are currently supported:
52 *  - M0 = 1, 2, 3, 4, 5, .... n (M0 != 1 with STRIDE_X == 1 && DILATION_X == 1 only)
53 *  - N0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
54 * @note The number of rows to read from the src tensor must be passed at compile time using -DM0_A (e.g., -DM0_A=3). M0_A must be equal to WEI_WIDTH + (M0 - 1)
55 * @note The number of columns to read from the src tensor must be passed at compile time using -DN0_A. It can either be 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
56 *
57 * @param[in]  src_img                           (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
58 * @param[in]  src_ptr                           Pointer to the source tensor. Supported data type: F16/F32
59 * @param[in]  src_stride_y                      Stride of the source tensor in Y dimension (in bytes)
60 * @param[in]  src_stride_z                      Stride of the source tensor in Z dimension (in bytes)
61 * @param[in]  src_stride_w                      Stride of the source tensor in W dimension (in bytes)
62 * @param[in]  src_c                             The size of the channels dimension of the source tensor
63 * @param[in]  src_w                             The size of the width dimension of the source tensor
64 * @param[in]  src_h                             The size of the height dimension of the source tensor
65 * @param[in]  src_n                             The size of the batches dimension of the source tensor
66 * @param[in]  src_offset_first_element_in_bytes The offset of the first element in the source tensor
67 * @param[out] dst_img                           (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
68 * @param[out] dst_ptr                           Pointer to the destination tensor. Supported data type: same as @p src_ptr
69 * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
70 * @param[in]  dst_stride_z                      Stride of the destination tensor in Z dimension (in bytes)
71 * @param[in]  dst_stride_w                      Stride of the destination tensor in W dimension (in bytes)
72 * @param[in]  dst_c                             The size of the channels dimension of the destination tensor
73 * @param[in]  dst_w                             The size of the width dimension of the destination tensor
74 * @param[in]  dst_h                             The size of the height dimension of the destination tensor
75 * @param[in]  dst_n                             The size of the batches dimension of the destination tensor
76 * @param[in]  dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
77 * @param[in]  wei_img                           (Optional) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
78 * @param[in]  wei_ptr                           Pointer to the weights tensor. Supported data type: same as @p src_ptr
79 * @param[in]  wei_stride_y                      Stride of the weights tensor in Y dimension (in bytes)
80 * @param[in]  wei_stride_z                      Stride of the weights tensor in Z dimension (in bytes)
81 * @param[in]  wei_stride_w                      Stride of the weights tensor in W dimension (in bytes)
82 * @param[in]  wei_c                             The size of the channels dimension of the weights tensor
83 * @param[in]  wei_w                             The size of the width dimension of the weights tensor
84 * @param[in]  wei_h                             The size of the height dimension of the weights tensor
85 * @param[in]  wei_n                             The size of the batches dimension of the weights tensor
86 * @param[in]  wei_offset_first_element_in_bytes The offset of the first element in the weigts matrix
87 * @param[in]  bia_ptr                           (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr
88 * @param[in]  bia_stride_x                      (Optional) Stride of the bias tensor in X dimension (in bytes)
89 * @param[in]  bia_step_x                        (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
90 * @param[in]  bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
91 */
92//! @endcond
93__kernel void dwc_native_fp_nhwc(
94    TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
95    TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
96    TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
97#if defined(HAS_BIAS)
98    ,
99    VECTOR_DECLARATION(bia)
100#endif // defined(HAS_BIAS)
101)
102{
103    // Only the weight tensor dimensions are passed at compile time.
104    // In case of dynamic tensor support, the following dimensions should be passed as function argument.
105#define _IWEI_WIDTH WEI_WIDTH
106#define _IWEI_HEIGHT WEI_HEIGHT
107#define _IM0_A M0_A        // _IWEI_WIDTH + (M0 - 1) Rows tile A (If M0 != 1, the tiles overlap of 1 element on the X dimension)
108#define _IN0_A N0_A        // Cols tile A. It can be either 1 (for DEPTH_MULTIPLIER > 1) or N0 (for DEPTH_MULTIPLIER == 1)
109#define _IM0_B _IWEI_WIDTH // Rows tile B
110#define _IN0_B N0          // Cols tile B
111#define _IBOUNDARY_CHECK (!((WEI_WIDTH == 1 && WEI_HEIGHT == 1 && PAD_LEFT == 0 && PAD_TOP == 0 && M0 == 1)))
112
113    const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
114    const int xo   = GET_SPATIAL_IDX(1, M0, 0);          // WIDTH
115#if defined(BATCHED_EXECUTION)
116    const int yo   = GET_SPATIAL_IDX(2, 1, 0) % dst_h; // HEIGHT
117    const int bout = GET_SPATIAL_IDX(2, 1, 0) / dst_h; // BATCH SIZE IDX
118#else                                                  // defined(BATCHED_EXECUTION)
119    const int yo   = GET_SPATIAL_IDX(2, 1, 0); // HEIGHT
120    const int bout = 0;                        // BATCH SIZE IDX
121#endif                                                 // defined(BATCHED_EXECUTION)
122
123    int xi = xo * STRIDE_X;
124    int yi = yo * STRIDE_Y;
125    xi -= PAD_LEFT;
126    yi -= PAD_TOP;
127
128    TILE(ACC_DATA_TYPE, M0, N0, c);
129
130    // Reset accumulators
131    LOOP_UNROLLING(int, i, 0, 1, M0,
132    {
133        c[i].v = 0;
134    })
135
136#if _IWEI_HEIGHT < 5
137    LOOP_UNROLLING(int, yk, 0, 1, _IWEI_HEIGHT,
138#else  // _IWEI_HEIGHT <= 5
139    for(int yk = 0; yk < _IWEI_HEIGHT; ++yk)
140#endif // _IWEI_HEIGHT <= 5
141    {
142        TILE(SRC_DATA_TYPE, _IM0_A, _IN0_A, a);
143
144        LOOP_UNROLLING(int, i, 0, 1, _IM0_A,
145        {
146            a[i].v = 0;
147        })
148
149        // Load tile from the src tensor (TILE A)
150        T_LOAD_NHWC_WITH_DILATION(SRC_DATA_TYPE, 1, _IM0_A, _IN0_A, SRC_TENSOR_TYPE, src, bout, yi + yk * DILATION_Y, xi, (cout / DEPTH_MULTIPLIER), SRC_WIDTH, SRC_HEIGHT, DILATION_X, 1, _IBOUNDARY_CHECK, a);
151
152        TILE(WEI_DATA_TYPE, _IM0_B, _IN0_B, b);
153
154        // Load tile from the weights tensor (TILE B)
155        T_LOAD(WEI_DATA_TYPE, _IM0_B, _IN0_B, WEI_TENSOR_TYPE, wei, cout, yk * _IM0_B, 1, wei_stride_y, b);
156
157        // Optimized path for STRIDE_X == 1
158        // If M0 != 1, we can skip the common loads between the two applied kernels on the X (WIDTH) dimension
159        LOOP_UNROLLING(int, m0, 0, 1, M0,
160        {
161            LOOP_UNROLLING(int, xk, 0, 1, _IWEI_WIDTH,
162            {
163#if GPU_ARCH == GPU_ARCH_MIDGARD
164                c[m0].v += a[xk + m0].v * b[xk].v;
165#else  // GPU_ARCH == GPU_ARCH_MIDGARD
166                c[m0].v = fma(a[xk + m0].v, b[xk].v, c[m0].v);
167#endif // GPU_ARCH == GPU_ARCH_MIDGARD
168            })
169        })
170    }
171#if _IWEI_HEIGHT < 5
172                      )
173#endif // _IWEI_HEIGHT <= 5
174
175#if defined(HAS_BIAS)
176    TILE(BIA_DATA_TYPE, 1, N0, bias0);
177
178    T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 0, 0, bias0);
179
180    // c = c + bias[broadcasted]
181    T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
182#endif // HAS_BIAS
183
184    T_ACTIVATION(ACC_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
185
186    TILE(uint, M0, 1, dst_indirect_y);
187
188    bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
189
190    if(x_cond)
191    {
192        LOOP_UNROLLING(int, m0, 0, 1, M0,
193        {
194            int xi_out = min(xo + M0 - 1 - m0, (int)(DST_WIDTH) - 1);
195            VSTORE_PARTIAL(N0, PARTIAL_N0)
196            (c[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + cout * sizeof(DST_DATA_TYPE) + (uint)xi_out * dst_stride_y + (uint)yo * dst_stride_z + (uint)bout * dst_stride_w));
197        })
198    }
199    else
200    {
201        LOOP_UNROLLING(int, m0, 0, 1, M0,
202        {
203            int xi_out = min(xo + M0 - 1 - m0, (int)(DST_WIDTH) - 1);
204            VSTORE(N0)
205            (c[M0 - 1 - m0].v, 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + cout * sizeof(DST_DATA_TYPE) + (uint)xi_out * dst_stride_y + (uint)yo * dst_stride_z + (uint)bout * dst_stride_w));
206        })
207    }
208}
209#endif // defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP)
210// *INDENT-ON*
211// clang-format on
212