1R"( 2 3 4 5#ifndef ARM_COMPUTE_HELPER_H 6#define ARM_COMPUTE_HELPER_H 7 8 9 10 11#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 12 VSTORE(N0) \ 13 (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 14 15#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 16 STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 17 VSTORE(N0) \ 18 (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 19 20#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 21 STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 22 VSTORE(N0) \ 23 (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 24 25#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 26 STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 27 VSTORE(N0) \ 28 (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 29 30#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 31 STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 32 VSTORE(N0) \ 33 (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 34 35#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 36 STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 37 VSTORE(N0) \ 38 (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 39 40#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 41 STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 42 VSTORE(N0) \ 43 (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 44 45#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 46 STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 47 VSTORE(N0) \ 48 (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 49 50#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 51 STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 52 VSTORE(N0) \ 53 (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 54 55#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 56 STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 57 VSTORE(N0) \ 58 (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 59 60#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 61 STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 62 VSTORE(N0) \ 63 (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 64 65#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 66 STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 67 VSTORE(N0) \ 68 (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 69 70#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 71 STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 72 VSTORE(N0) \ 73 (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 74 75#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 76 STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 77 VSTORE(N0) \ 78 (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 79 80#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 81 STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 82 VSTORE(N0) \ 83 (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 84 85#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 86 STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 87 VSTORE(N0) \ 88 (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 89 90 91 92#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 93 VSTORE(N0) \ 94 (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 95 96#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 97 CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 98 VSTORE(N0) \ 99 (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 100 101#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 102 CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 103 VSTORE(N0) \ 104 (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 105 106#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 107 CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 108 VSTORE(N0) \ 109 (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 110 111#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 112 CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 113 VSTORE(N0) \ 114 (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 115 116#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 117 CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 118 VSTORE(N0) \ 119 (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 120 121#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 122 CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 123 VSTORE(N0) \ 124 (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 125 126#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 127 CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 128 VSTORE(N0) \ 129 (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 130 131#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 132 CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 133 VSTORE(N0) \ 134 (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 135 136#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ 137 CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 138 VSTORE(N0) \ 139 (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 140 141#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 142 CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 143 VSTORE(N0) \ 144 (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 145 146#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 147 CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 148 VSTORE(N0) \ 149 (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 150 151#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 152 CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 153 VSTORE(N0) \ 154 (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 155 156#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 157 CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 158 VSTORE(N0) \ 159 (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 160 161#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 162 CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 163 VSTORE(N0) \ 164 (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 165 166#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 167 CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 168 VSTORE(N0) \ 169 (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 170 171 172 173 174#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 175#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 176 177 178 179#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 180#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 181 182 183 184#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 185 VSTORE_PARTIAL(N0, STORE_N0) \ 186 (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 187 188#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 189 STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 190 VSTORE_PARTIAL(N0, STORE_N0) \ 191 (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 192 193#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 194 STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 195 VSTORE_PARTIAL(N0, STORE_N0) \ 196 (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 197 198#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 199 STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 200 VSTORE_PARTIAL(N0, STORE_N0) \ 201 (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 202 203#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 204 STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 205 VSTORE_PARTIAL(N0, STORE_N0) \ 206 (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 207 208#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 209 STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 210 VSTORE_PARTIAL(N0, STORE_N0) \ 211 (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 212 213#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 214 STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 215 VSTORE_PARTIAL(N0, STORE_N0) \ 216 (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 217 218#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 219 STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 220 VSTORE_PARTIAL(N0, STORE_N0) \ 221 (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 222 223#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 224 STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 225 VSTORE_PARTIAL(N0, STORE_N0) \ 226 (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 227 228#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 229 STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 230 VSTORE_PARTIAL(N0, STORE_N0) \ 231 (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 232 233#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 234 STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 235 VSTORE_PARTIAL(N0, STORE_N0) \ 236 (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 237 238#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 239 STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 240 VSTORE_PARTIAL(N0, STORE_N0) \ 241 (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 242 243#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 244 STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 245 VSTORE_PARTIAL(N0, STORE_N0) \ 246 (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 247 248#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 249 STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 250 VSTORE_PARTIAL(N0, STORE_N0) \ 251 (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 252 253#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 254 STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 255 VSTORE_PARTIAL(N0, STORE_N0) \ 256 (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 257 258#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 259 STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 260 VSTORE_PARTIAL(N0, STORE_N0) \ 261 (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 262 263 264 265#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 266#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 267 268#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 269 if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ 270 { \ 271 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 272 } \ 273 else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ 274 { \ 275 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 276 } \ 277 else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ 278 { \ 279 STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 280 } \ 281 else \ 282 { \ 283 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 284 } 285 286#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ 287 if(!(PARTIAL_COND_X)) \ 288 { \ 289 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 290 } \ 291 else \ 292 { \ 293 STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 294 } 295 296#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ 297 if(!(PARTIAL_COND_Y)) \ 298 { \ 299 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 300 } \ 301 else \ 302 { \ 303 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 304 } 305 306 307#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) 308 309 310#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 311 312#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 313 STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 314 315#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 316 317#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 318 STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) 319 320#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 321 322#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 323 STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) 324 325#else 326 327#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 328 STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) 329 330#endif 331 332#endif 333 334 335#if defined(PARTIAL_STORE_M0) 336 337#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ 338 ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) 339#else 340#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ 341 ((uint)(y * M0)) 342#endif 343 344 345 346#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ 347 STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) 348 349 350#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 351#pragma OPENCL EXTENSION cl_khr_fp16 : enable 352#endif 353 354#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) 355#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable 356#endif 357 358#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) 359#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable 360#endif 361 362#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) 363#pragma OPENCL EXTENSION cl_arm_printf : enable 364#endif 365 366#define GPU_ARCH_MIDGARD 0x100 367#define GPU_ARCH_BIFROST 0x200 368#define GPU_ARCH_VALHALL 0x300 369 370 371#define CONCAT(a, b) a##b 372 373 374#define EXPAND(x) x 375 376 377#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) 378 379 380#define REV1(x) ((x)) 381#define REV2(x) ((x).s10) 382#define REV3(x) ((x).s210) 383#define REV4(x) ((x).s3210) 384#define REV8(x) ((x).s76543210) 385#define REV16(x) ((x).sFEDCBA9876543210) 386 387 388 389#define REVERSE_STR(x, s) REV##s((x)) 390#define REVERSE(x, s) REVERSE_STR(x, s) 391 392 393 394#define ROT1_0(x) ((x)) 395#define ROT1_1(x) ((x)) 396 397#define ROT2_0(x) ((x)) 398#define ROT2_1(x) ((x).s10) 399#define ROT2_2(x) ((x)) 400 401#define ROT3_0(x) ((x)) 402#define ROT3_1(x) ((x).s201) 403#define ROT3_2(x) ((x).s120) 404#define ROT3_3(x) ((x)) 405 406#define ROT4_0(x) ((x)) 407#define ROT4_1(x) ((x).s3012) 408#define ROT4_2(x) ((x).s2301) 409#define ROT4_3(x) ((x).s1230) 410#define ROT4_4(x) ((x)) 411 412#define ROT8_0(x) ((x)) 413#define ROT8_1(x) ((x).s70123456) 414#define ROT8_2(x) ((x).s67012345) 415#define ROT8_3(x) ((x).s56701234) 416#define ROT8_4(x) ((x).s45670123) 417#define ROT8_5(x) ((x).s34567012) 418#define ROT8_6(x) ((x).s23456701) 419#define ROT8_7(x) ((x).s12345670) 420#define ROT8_8(x) ((x)) 421 422#define ROT16_0(x) ((x)) 423#define ROT16_1(x) ((x).sF0123456789ABCDE) 424#define ROT16_2(x) ((x).sEF0123456789ABCD) 425#define ROT16_3(x) ((x).sDEF0123456789ABC) 426#define ROT16_4(x) ((x).sCDEF0123456789AB) 427#define ROT16_5(x) ((x).sBCDEF0123456789A) 428#define ROT16_6(x) ((x).sABCDEF0123456789) 429#define ROT16_7(x) ((x).s9ABCDEF012345678) 430#define ROT16_8(x) ((x).s89ABCDEF01234567) 431#define ROT16_9(x) ((x).s789ABCDEF0123456) 432#define ROT16_10(x) ((x).s6789ABCDEF012345) 433#define ROT16_11(x) ((x).s56789ABCDEF01234) 434#define ROT16_12(x) ((x).s456789ABCDEF0123) 435#define ROT16_13(x) ((x).s3456789ABCDEF012) 436#define ROT16_14(x) ((x).s23456789ABCDEF01) 437#define ROT16_15(x) ((x).s123456789ABCDEF0) 438#define ROT16_16(x) ((x)) 439 440 441 442#define ROTATE_STR(x, s, n) ROT##s##_##n(x) 443#define ROTATE(x, s, n) ROTATE_STR(x, s, n) 444 445 446 447#define V_OFFS1(dt) (dt##1)(0) 448#define V_OFFS2(dt) (dt##2)(0, 1) 449#define V_OFFS3(dt) (dt##3)(0, 1, 2) 450#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) 451#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) 452#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) 453 454 455 456#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) 457#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) 458 459 460#define VLOAD_STR(size) vload##size 461#define VLOAD(size) VLOAD_STR(size) 462 463 464#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size 465#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) 466 467#define NO_LOAD(data, offs, ptr) \ 468 { \ 469 } 470 471 472#define vload_partial_1_0 NO_LOAD 473#define vload_partial_1_1 vload1 474#define vload_partial_1_2 NO_LOAD 475#define vload_partial_1_3 NO_LOAD 476#define vload_partial_1_4 NO_LOAD 477#define vload_partial_1_5 NO_LOAD 478#define vload_partial_1_6 NO_LOAD 479#define vload_partial_1_7 NO_LOAD 480#define vload_partial_1_8 NO_LOAD 481#define vload_partial_1_9 NO_LOAD 482#define vload_partial_1_10 NO_LOAD 483#define vload_partial_1_11 NO_LOAD 484#define vload_partial_1_12 NO_LOAD 485#define vload_partial_1_13 NO_LOAD 486#define vload_partial_1_14 NO_LOAD 487#define vload_partial_1_15 NO_LOAD 488#define vload_partial_1_16 NO_LOAD 489 490#define vload_partial_2_0 NO_LOAD 491#define vload_partial_2_1 vload_partial_1 492#define vload_partial_2_2 vload_partial_2 493#define vload_partial_2_3 NO_LOAD 494#define vload_partial_2_4 NO_LOAD 495#define vload_partial_2_5 NO_LOAD 496#define vload_partial_2_6 NO_LOAD 497#define vload_partial_2_7 NO_LOAD 498#define vload_partial_2_8 NO_LOAD 499#define vload_partial_2_9 NO_LOAD 500#define vload_partial_2_10 NO_LOAD 501#define vload_partial_2_11 NO_LOAD 502#define vload_partial_2_12 NO_LOAD 503#define vload_partial_2_13 NO_LOAD 504#define vload_partial_2_14 NO_LOAD 505#define vload_partial_2_15 NO_LOAD 506#define vload_partial_2_16 NO_LOAD 507 508#define vload_partial_3_0 NO_LOAD 509#define vload_partial_3_1 vload_partial_1 510#define vload_partial_3_2 vload_partial_2 511#define vload_partial_3_3 vload_partial_3 512#define vload_partial_3_4 NO_LOAD 513#define vload_partial_3_5 NO_LOAD 514#define vload_partial_3_6 NO_LOAD 515#define vload_partial_3_7 NO_LOAD 516#define vload_partial_3_8 NO_LOAD 517#define vload_partial_3_9 NO_LOAD 518#define vload_partial_3_10 NO_LOAD 519#define vload_partial_3_11 NO_LOAD 520#define vload_partial_3_12 NO_LOAD 521#define vload_partial_3_13 NO_LOAD 522#define vload_partial_3_14 NO_LOAD 523#define vload_partial_3_15 NO_LOAD 524#define vload_partial_3_16 NO_LOAD 525 526#define vload_partial_4_0 NO_LOAD 527#define vload_partial_4_1 vload_partial_1 528#define vload_partial_4_2 vload_partial_2 529#define vload_partial_4_3 vload_partial_3 530#define vload_partial_4_4 vload_partial_4 531#define vload_partial_4_5 NO_LOAD 532#define vload_partial_4_6 NO_LOAD 533#define vload_partial_4_7 NO_LOAD 534#define vload_partial_4_8 NO_LOAD 535#define vload_partial_4_9 NO_LOAD 536#define vload_partial_4_10 NO_LOAD 537#define vload_partial_4_11 NO_LOAD 538#define vload_partial_4_12 NO_LOAD 539#define vload_partial_4_13 NO_LOAD 540#define vload_partial_4_14 NO_LOAD 541#define vload_partial_4_15 NO_LOAD 542#define vload_partial_4_16 NO_LOAD 543 544#define vload_partial_8_0 NO_LOAD 545#define vload_partial_8_1 vload_partial_1 546#define vload_partial_8_2 vload_partial_2 547#define vload_partial_8_3 vload_partial_3 548#define vload_partial_8_4 vload_partial_4 549#define vload_partial_8_5 vload_partial_5 550#define vload_partial_8_6 vload_partial_6 551#define vload_partial_8_7 vload_partial_7 552#define vload_partial_8_8 vload_partial_8 553#define vload_partial_8_9 NO_LOAD 554#define vload_partial_8_10 NO_LOAD 555#define vload_partial_8_11 NO_LOAD 556#define vload_partial_8_12 NO_LOAD 557#define vload_partial_8_13 NO_LOAD 558#define vload_partial_8_14 NO_LOAD 559#define vload_partial_8_15 NO_LOAD 560#define vload_partial_8_16 NO_LOAD 561 562#define vload_partial_16_0 NO_LOAD 563#define vload_partial_16_1 vload_partial_1 564#define vload_partial_16_2 vload_partial_2 565#define vload_partial_16_3 vload_partial_3 566#define vload_partial_16_4 vload_partial_4 567#define vload_partial_16_5 vload_partial_5 568#define vload_partial_16_6 vload_partial_6 569#define vload_partial_16_7 vload_partial_7 570#define vload_partial_16_8 vload_partial_8 571#define vload_partial_16_9 vload_partial_9 572#define vload_partial_16_10 vload_partial_10 573#define vload_partial_16_11 vload_partial_11 574#define vload_partial_16_12 vload_partial_12 575#define vload_partial_16_13 vload_partial_13 576#define vload_partial_16_14 vload_partial_14 577#define vload_partial_16_15 vload_partial_15 578#define vload_partial_16_16 vload_partial_16 579 580 581#define vload_partial_1(DATA, OFFSET, PTR) \ 582 DATA.s0 = vload1(OFFSET, PTR); 583 584#define vload_partial_2(DATA, OFFSET, PTR) \ 585 DATA.s01 = vload2(OFFSET, PTR); 586 587#define vload_partial_3(DATA, OFFSET, PTR) \ 588 DATA.s012 = vload3(OFFSET, PTR); 589 590#define vload_partial_4(DATA, OFFSET, PTR) \ 591 DATA.s0123 = vload4(OFFSET, PTR); 592 593#define vload_partial_5(DATA, OFFSET, PTR) \ 594 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 595 DATA.s4 = vload1(OFFSET, PTR + 4); 596 597#define vload_partial_6(DATA, OFFSET, PTR) \ 598 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 599 vload_partial_2(DATA.s45, OFFSET, PTR + 4); 600 601#define vload_partial_7(DATA, OFFSET, PTR) \ 602 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 603 vload_partial_3(DATA.s456, OFFSET, PTR + 4); 604 605#define vload_partial_8(DATA, OFFSET, PTR) \ 606 DATA.s01234567 = vload8(OFFSET, PTR); 607 608#define vload_partial_9(DATA, OFFSET, PTR) \ 609 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 610 DATA.s8 = vload1(OFFSET, PTR + 8); 611 612#define vload_partial_10(DATA, OFFSET, PTR) \ 613 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 614 vload_partial_2(DATA.s89, OFFSET, PTR + 8); 615 616#define vload_partial_11(DATA, OFFSET, PTR) \ 617 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 618 vload_partial_3(DATA.s89A, OFFSET, PTR + 8); 619 620#define vload_partial_12(DATA, OFFSET, PTR) \ 621 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 622 vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); 623 624#define vload_partial_13(DATA, OFFSET, PTR) \ 625 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 626 vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); 627 628#define vload_partial_14(DATA, OFFSET, PTR) \ 629 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 630 vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); 631 632#define vload_partial_15(DATA, OFFSET, PTR) \ 633 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 634 vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); 635 636#define vload_partial_16(DATA, OFFSET, PTR) \ 637 DATA = vload16(OFFSET, PTR); 638 639 640 641#define PIXEL_UNIT4 1 642#define PIXEL_UNIT8 2 643#define PIXEL_UNIT16 4 644 645 646#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size 647#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) 648 649 650#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); 651#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); 652#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); 653 654#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 655#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); 656#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); 657#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); 658#endif 659 660#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); 661#define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); 662#define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); 663 664#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 665#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); 666#define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); 667#define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); 668#endif 669 670 671#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) 672#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) 673 674 675#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) 676#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) 677 678#define VSTORE_STR(size) vstore##size 679#define VSTORE(size) VSTORE_STR(size) 680 681#define float1 float 682#define half1 half 683#define char1 char 684#define uchar1 uchar 685#define short1 short 686#define ushort1 ushort 687#define int1 int 688#define uint1 uint 689#define long1 long 690#define ulong1 ulong 691#define double1 double 692 693#define vload1(OFFSET, PTR) *(OFFSET + PTR) 694#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA 695 696 697#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size 698#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) 699 700#define NO_STORE(data, offs, ptr) \ 701 { \ 702 } 703 704 705#define vstore_partial_1_0 NO_STORE 706#define vstore_partial_1_1 vstore1 707#define vstore_partial_1_2 NO_STORE 708#define vstore_partial_1_3 NO_STORE 709#define vstore_partial_1_4 NO_STORE 710#define vstore_partial_1_5 NO_STORE 711#define vstore_partial_1_6 NO_STORE 712#define vstore_partial_1_7 NO_STORE 713#define vstore_partial_1_8 NO_STORE 714#define vstore_partial_1_9 NO_STORE 715#define vstore_partial_1_10 NO_STORE 716#define vstore_partial_1_11 NO_STORE 717#define vstore_partial_1_12 NO_STORE 718#define vstore_partial_1_13 NO_STORE 719#define vstore_partial_1_14 NO_STORE 720#define vstore_partial_1_15 NO_STORE 721#define vstore_partial_1_16 NO_STORE 722 723#define vstore_partial_2_0 NO_STORE 724#define vstore_partial_2_1 vstore_partial_1 725#define vstore_partial_2_2 vstore_partial_2 726#define vstore_partial_2_3 NO_STORE 727#define vstore_partial_2_4 NO_STORE 728#define vstore_partial_2_5 NO_STORE 729#define vstore_partial_2_6 NO_STORE 730#define vstore_partial_2_7 NO_STORE 731#define vstore_partial_2_8 NO_STORE 732#define vstore_partial_2_9 NO_STORE 733#define vstore_partial_2_10 NO_STORE 734#define vstore_partial_2_11 NO_STORE 735#define vstore_partial_2_12 NO_STORE 736#define vstore_partial_2_13 NO_STORE 737#define vstore_partial_2_14 NO_STORE 738#define vstore_partial_2_15 NO_STORE 739#define vstore_partial_2_16 NO_STORE 740 741#define vstore_partial_3_0 NO_STORE 742#define vstore_partial_3_1 vstore_partial_1 743#define vstore_partial_3_2 vstore_partial_2 744#define vstore_partial_3_3 vstore_partial_3 745#define vstore_partial_3_4 NO_STORE 746#define vstore_partial_3_5 NO_STORE 747#define vstore_partial_3_6 NO_STORE 748#define vstore_partial_3_7 NO_STORE 749#define vstore_partial_3_8 NO_STORE 750#define vstore_partial_3_9 NO_STORE 751#define vstore_partial_3_10 NO_STORE 752#define vstore_partial_3_11 NO_STORE 753#define vstore_partial_3_12 NO_STORE 754#define vstore_partial_3_13 NO_STORE 755#define vstore_partial_3_14 NO_STORE 756#define vstore_partial_3_15 NO_STORE 757#define vstore_partial_3_16 NO_STORE 758 759#define vstore_partial_4_0 NO_STORE 760#define vstore_partial_4_1 vstore_partial_1 761#define vstore_partial_4_2 vstore_partial_2 762#define vstore_partial_4_3 vstore_partial_3 763#define vstore_partial_4_4 vstore_partial_4 764#define vstore_partial_4_5 NO_STORE 765#define vstore_partial_4_6 NO_STORE 766#define vstore_partial_4_7 NO_STORE 767#define vstore_partial_4_8 NO_STORE 768#define vstore_partial_4_9 NO_STORE 769#define vstore_partial_4_10 NO_STORE 770#define vstore_partial_4_11 NO_STORE 771#define vstore_partial_4_12 NO_STORE 772#define vstore_partial_4_13 NO_STORE 773#define vstore_partial_4_14 NO_STORE 774#define vstore_partial_4_15 NO_STORE 775#define vstore_partial_4_16 NO_STORE 776 777#define vstore_partial_8_0 NO_STORE 778#define vstore_partial_8_1 vstore_partial_1 779#define vstore_partial_8_2 vstore_partial_2 780#define vstore_partial_8_3 vstore_partial_3 781#define vstore_partial_8_4 vstore_partial_4 782#define vstore_partial_8_5 vstore_partial_5 783#define vstore_partial_8_6 vstore_partial_6 784#define vstore_partial_8_7 vstore_partial_7 785#define vstore_partial_8_8 vstore_partial_8 786#define vstore_partial_8_9 NO_STORE 787#define vstore_partial_8_10 NO_STORE 788#define vstore_partial_8_11 NO_STORE 789#define vstore_partial_8_12 NO_STORE 790#define vstore_partial_8_13 NO_STORE 791#define vstore_partial_8_14 NO_STORE 792#define vstore_partial_8_15 NO_STORE 793#define vstore_partial_8_16 NO_STORE 794 795#define vstore_partial_16_0 NO_STORE 796#define vstore_partial_16_1 vstore_partial_1 797#define vstore_partial_16_2 vstore_partial_2 798#define vstore_partial_16_3 vstore_partial_3 799#define vstore_partial_16_4 vstore_partial_4 800#define vstore_partial_16_5 vstore_partial_5 801#define vstore_partial_16_6 vstore_partial_6 802#define vstore_partial_16_7 vstore_partial_7 803#define vstore_partial_16_8 vstore_partial_8 804#define vstore_partial_16_9 vstore_partial_9 805#define vstore_partial_16_10 vstore_partial_10 806#define vstore_partial_16_11 vstore_partial_11 807#define vstore_partial_16_12 vstore_partial_12 808#define vstore_partial_16_13 vstore_partial_13 809#define vstore_partial_16_14 vstore_partial_14 810#define vstore_partial_16_15 vstore_partial_15 811#define vstore_partial_16_16 vstore_partial_16 812 813 814#define vstore_partial_1(DATA, OFFSET, PTR) \ 815 vstore1(DATA.s0, OFFSET, PTR); 816 817#define vstore_partial_2(DATA, OFFSET, PTR) \ 818 vstore2(DATA.s01, OFFSET, PTR); 819 820#define vstore_partial_3(DATA, OFFSET, PTR) \ 821 vstore3(DATA.s012, OFFSET, PTR); 822 823#define vstore_partial_4(DATA, OFFSET, PTR) \ 824 vstore4(DATA.s0123, OFFSET, PTR); 825 826#define vstore_partial_5(DATA, OFFSET, PTR) \ 827 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 828 vstore1(DATA.s4, OFFSET, PTR + 4); 829 830#define vstore_partial_6(DATA, OFFSET, PTR) \ 831 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 832 vstore_partial_2(DATA.s45, OFFSET, PTR + 4); 833 834#define vstore_partial_7(DATA, OFFSET, PTR) \ 835 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 836 vstore_partial_3(DATA.s456, OFFSET, PTR + 4); 837 838#define vstore_partial_8(DATA, OFFSET, PTR) \ 839 vstore8(DATA.s01234567, OFFSET, PTR); 840 841#define vstore_partial_9(DATA, OFFSET, PTR) \ 842 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 843 vstore1(DATA.s8, OFFSET, PTR + 8); 844 845#define vstore_partial_10(DATA, OFFSET, PTR) \ 846 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 847 vstore_partial_2(DATA.s89, OFFSET, PTR + 8); 848 849#define vstore_partial_11(DATA, OFFSET, PTR) \ 850 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 851 vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); 852 853#define vstore_partial_12(DATA, OFFSET, PTR) \ 854 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 855 vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); 856 857#define vstore_partial_13(DATA, OFFSET, PTR) \ 858 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 859 vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); 860 861#define vstore_partial_14(DATA, OFFSET, PTR) \ 862 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 863 vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); 864 865#define vstore_partial_15(DATA, OFFSET, PTR) \ 866 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 867 vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); 868 869#define vstore_partial_16(DATA, OFFSET, PTR) \ 870 vstore16(DATA, OFFSET, PTR); 871 872 873 874 875 876#define convert_float_sat convert_float 877#define convert_float1_sat convert_float 878#define convert_float2_sat convert_float2 879#define convert_float3_sat convert_float3 880#define convert_float4_sat convert_float4 881#define convert_float8_sat convert_float8 882#define convert_float16_sat convert_float16 883#define convert_half_sat convert_float 884#define convert_half1_sat convert_half 885#define convert_half2_sat convert_half2 886#define convert_half3_sat convert_half3 887#define convert_half4_sat convert_half4 888#define convert_half8_sat convert_half8 889#define convert_half16_sat convert_half16 890 891#define convert_float1 convert_float 892#define convert_half1 convert_half 893#define convert_char1 convert_char 894#define convert_uchar1 convert_uchar 895#define convert_short1 convert_short 896#define convert_ushort1 convert_ushort 897#define convert_int1 convert_int 898#define convert_uint1 convert_uint 899#define convert_long1 convert_long 900#define convert_ulong1 convert_ulong 901#define convert_double1 convert_double 902 903#define convert_char1_sat convert_char_sat 904#define convert_uchar1_sat convert_uchar_sat 905#define convert_uchar2_sat convert_uchar2_sat 906#define convert_uchar3_sat convert_uchar3_sat 907#define convert_uchar4_sat convert_uchar4_sat 908#define convert_uchar8_sat convert_uchar8_sat 909#define convert_uchar16_sat convert_uchar16_sat 910#define convert_short1_sat convert_short_sat 911#define convert_ushort1_sat convert_ushort_sat 912#define convert_int1_sat convert_int_sat 913#define convert_uint1_sat convert_uint_sat 914#define convert_long1_sat convert_long_sat 915#define convert_ulong1_sat convert_ulong_sat 916#define convert_double1_sat convert_double_sat 917 918#define VEC_DATA_TYPE_STR(type, size) type##size 919#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) 920 921#define CONVERT_STR(x, type) (convert_##type((x))) 922#define CONVERT(x, type) CONVERT_STR(x, type) 923 924#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) 925#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) 926 927#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) 928#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) 929 930#define select_vec_dt_uchar(size) uchar##size 931#define select_vec_dt_char(size) char##size 932#define select_vec_dt_ushort(size) ushort##size 933#define select_vec_dt_short(size) short##size 934#define select_vec_dt_half(size) short##size 935#define select_vec_dt_uint(size) uint##size 936#define select_vec_dt_int(size) int##size 937#define select_vec_dt_float(size) int##size 938#define select_vec_dt_ulong(size) ulong##size 939#define select_vec_dt_long(size) long##size 940 941#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) 942#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) 943#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) 944 945#define signed_int_vec_dt_uchar(size) char##size 946#define signed_int_vec_dt_char(size) char##size 947#define signed_int_vec_dt_ushort(size) short##size 948#define signed_int_vec_dt_short(size) short##size 949#define signed_int_vec_dt_half(size) short##size 950#define signed_int_vec_dt_uint(size) int##size 951#define signed_int_vec_dt_int(size) int##size 952#define signed_int_vec_dt_float(size) int##size 953#define signed_int_vec_dt_ulong(size) long##size 954#define signed_int_vec_dt_long(size) long##size 955 956#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) 957#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) 958#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) 959 960#define sum_reduce_1(x) (x) 961#define sum_reduce_2(x) ((x).s0) + ((x).s1) 962#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) 963#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) 964#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) 965#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) 966 967#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) 968#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) 969 970#define prod_reduce_1(x) (x) 971#define prod_reduce_2(x) ((x).s0) * ((x).s1) 972#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) 973#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) 974#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) 975#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) 976 977#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) 978#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) 979 980#define max_reduce_1(x) (x) 981#define max_reduce_2(x) max(((x).s0), ((x).s1)) 982#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) 983#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) 984#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) 985#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) 986 987#define MAX_REDUCE_STR(x, size) max_reduce_##size(x) 988#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) 989 990#define VECTOR_DECLARATION(name) \ 991 __global uchar *name##_ptr, \ 992 uint name##_stride_x, \ 993 uint name##_step_x, \ 994 uint name##_offset_first_element_in_bytes 995 996#define IMAGE_DECLARATION(name) \ 997 __global uchar *name##_ptr, \ 998 uint name##_stride_x, \ 999 uint name##_step_x, \ 1000 uint name##_stride_y, \ 1001 uint name##_step_y, \ 1002 uint name##_offset_first_element_in_bytes 1003 1004#define TENSOR3D_DECLARATION(name) \ 1005 __global uchar *name##_ptr, \ 1006 uint name##_stride_x, \ 1007 uint name##_step_x, \ 1008 uint name##_stride_y, \ 1009 uint name##_step_y, \ 1010 uint name##_stride_z, \ 1011 uint name##_step_z, \ 1012 uint name##_offset_first_element_in_bytes 1013 1014#define TENSOR4D_DECLARATION(name) \ 1015 __global uchar *name##_ptr, \ 1016 uint name##_stride_x, \ 1017 uint name##_step_x, \ 1018 uint name##_stride_y, \ 1019 uint name##_step_y, \ 1020 uint name##_stride_z, \ 1021 uint name##_step_z, \ 1022 uint name##_stride_w, \ 1023 uint name##_step_w, \ 1024 uint name##_offset_first_element_in_bytes 1025 1026#define TENSOR5D_DECLARATION(name) \ 1027 __global uchar *name##_ptr, \ 1028 uint name##_stride_x, \ 1029 uint name##_step_x, \ 1030 uint name##_stride_y, \ 1031 uint name##_step_y, \ 1032 uint name##_stride_z, \ 1033 uint name##_step_z, \ 1034 uint name##_stride_w, \ 1035 uint name##_step_w, \ 1036 uint name##_stride_v, \ 1037 uint name##_step_v, \ 1038 uint name##_offset_first_element_in_bytes 1039 1040#define CONVERT_TO_VECTOR_STRUCT(name) \ 1041 update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) 1042 1043#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ 1044 update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) 1045 1046#define CONVERT_TO_IMAGE_STRUCT(name) \ 1047 update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) 1048 1049#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ 1050 update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) 1051 1052#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ 1053 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) 1054 1055#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ 1056 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) 1057 1058#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ 1059 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) 1060 1061#define CONVERT_TO_TENSOR3D_STRUCT(name) \ 1062 update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 1063 name##_stride_z, name##_step_z) 1064 1065#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ 1066 update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) 1067 1068#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ 1069 update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 1070 name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) 1071 1072#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ 1073 update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) 1074 1075#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ 1076 tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 1077 name##_stride_z, name##_step_z) 1078 1079 1080typedef struct Vector 1081{ 1082 __global uchar *ptr; 1083 int offset_first_element_in_bytes; 1084 int stride_x; 1085} Vector; 1086 1087 1088typedef struct Image 1089{ 1090 __global uchar *ptr; 1091 int offset_first_element_in_bytes; 1092 int stride_x; 1093 int stride_y; 1094} Image; 1095 1096 1097typedef struct Tensor3D 1098{ 1099 __global uchar *ptr; 1100 int offset_first_element_in_bytes; 1101 int stride_x; 1102 int stride_y; 1103 int stride_z; 1104} Tensor3D; 1105 1106 1107typedef struct Tensor4D 1108{ 1109 __global uchar *ptr; 1110 int offset_first_element_in_bytes; 1111 int stride_x; 1112 int stride_y; 1113 int stride_z; 1114 int stride_w; 1115} Tensor4D; 1116 1117 1118inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) 1119{ 1120 Vector vector = 1121 { 1122 .ptr = ptr, 1123 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1124 .stride_x = stride_x, 1125 }; 1126 vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; 1127 return vector; 1128} 1129 1130 1131inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) 1132{ 1133 Image img = 1134 { 1135 .ptr = ptr, 1136 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1137 .stride_x = stride_x, 1138 .stride_y = stride_y 1139 }; 1140 img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; 1141 return img; 1142} 1143 1144 1145inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 1146{ 1147 Image img = 1148 { 1149 .ptr = ptr, 1150 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1151 .stride_x = stride_x, 1152 .stride_y = stride_y 1153 }; 1154 img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; 1155 return img; 1156} 1157 1158 1159inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 1160{ 1161 Tensor3D tensor = 1162 { 1163 .ptr = ptr, 1164 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1165 .stride_x = stride_x, 1166 .stride_y = stride_y, 1167 .stride_z = stride_z 1168 }; 1169 tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; 1170 return tensor; 1171} 1172 1173 1174inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 1175{ 1176 Tensor3D tensor = 1177 { 1178 .ptr = ptr, 1179 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1180 .stride_x = stride_x, 1181 .stride_y = stride_y, 1182 .stride_z = stride_z 1183 }; 1184 return tensor; 1185} 1186 1187inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, 1188 uint step_w, 1189 uint mod_size) 1190{ 1191 Tensor4D tensor = 1192 { 1193 .ptr = ptr, 1194 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1195 .stride_x = stride_x, 1196 .stride_y = stride_y, 1197 .stride_z = stride_z, 1198 .stride_w = stride_w 1199 }; 1200 1201 tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; 1202 return tensor; 1203} 1204 1205 1206inline __global const uchar *vector_offset(const Vector *vec, int x) 1207{ 1208 return vec->ptr + x * vec->stride_x; 1209} 1210 1211 1212inline __global uchar *offset(const Image *img, int x, int y) 1213{ 1214 return img->ptr + x * img->stride_x + y * img->stride_y; 1215} 1216 1217 1218inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) 1219{ 1220 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; 1221} 1222 1223 1224inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) 1225{ 1226 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; 1227} 1228 1229 1230inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) 1231{ 1232 uint num_elements = width * height; 1233 1234 const uint z = index / num_elements; 1235 1236 index %= num_elements; 1237 1238 const uint y = index / width; 1239 1240 index %= width; 1241 1242 const uint x = index; 1243 1244 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; 1245} 1246 1247#endif 1248 1249#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) 1250 1251#if defined(S1_VAL) && !defined(S2_VAL) 1252#define S2_VAL S1_VAL 1253#endif 1254#if defined(O1_VAL) && !defined(O2_VAL) 1255#define O2_VAL O1_VAL 1256#endif 1257 1258 1259inline TYPE relu_op(TYPE x) 1260{ 1261 return max((TYPE)CONST_0, x); 1262} 1263 1264inline TYPE brelu_op(TYPE x) 1265{ 1266 return min((TYPE)A_VAL, max((TYPE)CONST_0, x)); 1267} 1268 1269inline TYPE lu_brelu_op(TYPE x) 1270{ 1271 return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); 1272} 1273 1274inline TYPE hard_swish_op(TYPE x) 1275{ 1276 return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f)); 1277} 1278 1279inline TYPE identiy_op(TYPE x) 1280{ 1281 return x; 1282} 1283 1284#define ACTIVATION_OP2(op, x) op##_op(x) 1285#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) 1286 1287#if defined(S1_VAL) && defined(S2_VAL) 1288#if defined(O1_VAL) && defined(O2_VAL) 1289#define PERFORM_ACTIVATION_QUANT(act, data) \ 1290 ({ \ 1291 data = ACTIVATION_OP(act, data); \ 1292 \ 1293 VEC_DATA_TYPE(float, VEC_SIZE) \ 1294 fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ 1295 \ 1296 fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \ 1297 data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ 1298 }) 1299#else 1300#define PERFORM_ACTIVATION_QUANT(act, data) \ 1301 ({ \ 1302 data = ACTIVATION_OP(act, data); \ 1303 \ 1304 VEC_DATA_TYPE(float, VEC_SIZE) \ 1305 fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ 1306 \ 1307 fdata = round((fdata) * ((float)S1_VAL / (float)S2_VAL)); \ 1308 data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ 1309 }) 1310#endif 1311#else 1312#define PERFORM_ACTIVATION_QUANT(act, data) \ 1313 ({ \ 1314 data = ACTIVATION_OP(act, data); \ 1315 }) 1316#endif 1317 1318#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) 1319 1320#if defined(FLOAT_DOMAIN) 1321 1322 1323 1324 1325 1326#ifndef ARM_COMPUTE_HELPER_H 1327#define ARM_COMPUTE_HELPER_H 1328 1329 1330 1331 1332#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1333 VSTORE(N0) \ 1334 (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 1335 1336#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1337 STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1338 VSTORE(N0) \ 1339 (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 1340 1341#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1342 STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1343 VSTORE(N0) \ 1344 (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 1345 1346#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1347 STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1348 VSTORE(N0) \ 1349 (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 1350 1351#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1352 STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1353 VSTORE(N0) \ 1354 (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 1355 1356#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1357 STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1358 VSTORE(N0) \ 1359 (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 1360 1361#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1362 STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1363 VSTORE(N0) \ 1364 (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 1365 1366#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1367 STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1368 VSTORE(N0) \ 1369 (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 1370 1371#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1372 STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1373 VSTORE(N0) \ 1374 (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 1375 1376#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1377 STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1378 VSTORE(N0) \ 1379 (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 1380 1381#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1382 STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1383 VSTORE(N0) \ 1384 (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 1385 1386#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1387 STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1388 VSTORE(N0) \ 1389 (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 1390 1391#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1392 STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1393 VSTORE(N0) \ 1394 (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 1395 1396#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1397 STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1398 VSTORE(N0) \ 1399 (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 1400 1401#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1402 STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1403 VSTORE(N0) \ 1404 (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 1405 1406#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1407 STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1408 VSTORE(N0) \ 1409 (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 1410 1411 1412 1413#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1414 VSTORE(N0) \ 1415 (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 1416 1417#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1418 CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1419 VSTORE(N0) \ 1420 (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 1421 1422#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1423 CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1424 VSTORE(N0) \ 1425 (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 1426 1427#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1428 CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1429 VSTORE(N0) \ 1430 (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 1431 1432#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1433 CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1434 VSTORE(N0) \ 1435 (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 1436 1437#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1438 CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1439 VSTORE(N0) \ 1440 (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 1441 1442#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1443 CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1444 VSTORE(N0) \ 1445 (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 1446 1447#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1448 CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1449 VSTORE(N0) \ 1450 (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 1451 1452#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1453 CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1454 VSTORE(N0) \ 1455 (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 1456 1457#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ 1458 CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1459 VSTORE(N0) \ 1460 (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 1461 1462#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1463 CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1464 VSTORE(N0) \ 1465 (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 1466 1467#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1468 CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1469 VSTORE(N0) \ 1470 (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 1471 1472#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1473 CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1474 VSTORE(N0) \ 1475 (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 1476 1477#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1478 CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1479 VSTORE(N0) \ 1480 (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 1481 1482#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1483 CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1484 VSTORE(N0) \ 1485 (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 1486 1487#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1488 CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1489 VSTORE(N0) \ 1490 (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 1491 1492 1493 1494 1495#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1496#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1497 1498 1499 1500#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1501#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1502 1503 1504 1505#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1506 VSTORE_PARTIAL(N0, STORE_N0) \ 1507 (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 1508 1509#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1510 STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1511 VSTORE_PARTIAL(N0, STORE_N0) \ 1512 (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 1513 1514#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1515 STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1516 VSTORE_PARTIAL(N0, STORE_N0) \ 1517 (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 1518 1519#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1520 STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1521 VSTORE_PARTIAL(N0, STORE_N0) \ 1522 (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 1523 1524#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1525 STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1526 VSTORE_PARTIAL(N0, STORE_N0) \ 1527 (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 1528 1529#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1530 STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1531 VSTORE_PARTIAL(N0, STORE_N0) \ 1532 (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 1533 1534#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1535 STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1536 VSTORE_PARTIAL(N0, STORE_N0) \ 1537 (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 1538 1539#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1540 STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1541 VSTORE_PARTIAL(N0, STORE_N0) \ 1542 (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 1543 1544#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1545 STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1546 VSTORE_PARTIAL(N0, STORE_N0) \ 1547 (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 1548 1549#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1550 STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1551 VSTORE_PARTIAL(N0, STORE_N0) \ 1552 (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 1553 1554#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1555 STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1556 VSTORE_PARTIAL(N0, STORE_N0) \ 1557 (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 1558 1559#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1560 STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1561 VSTORE_PARTIAL(N0, STORE_N0) \ 1562 (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 1563 1564#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1565 STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1566 VSTORE_PARTIAL(N0, STORE_N0) \ 1567 (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 1568 1569#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1570 STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1571 VSTORE_PARTIAL(N0, STORE_N0) \ 1572 (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 1573 1574#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1575 STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1576 VSTORE_PARTIAL(N0, STORE_N0) \ 1577 (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 1578 1579#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1580 STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 1581 VSTORE_PARTIAL(N0, STORE_N0) \ 1582 (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 1583 1584 1585 1586#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1587#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1588 1589#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 1590 if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ 1591 { \ 1592 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1593 } \ 1594 else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ 1595 { \ 1596 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1597 } \ 1598 else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ 1599 { \ 1600 STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1601 } \ 1602 else \ 1603 { \ 1604 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1605 } 1606 1607#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ 1608 if(!(PARTIAL_COND_X)) \ 1609 { \ 1610 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1611 } \ 1612 else \ 1613 { \ 1614 STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1615 } 1616 1617#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ 1618 if(!(PARTIAL_COND_Y)) \ 1619 { \ 1620 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1621 } \ 1622 else \ 1623 { \ 1624 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 1625 } 1626 1627 1628#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) 1629 1630 1631#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 1632 1633#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 1634 STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 1635 1636#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 1637 1638#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 1639 STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) 1640 1641#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 1642 1643#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 1644 STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) 1645 1646#else 1647 1648#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 1649 STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) 1650 1651#endif 1652 1653#endif 1654 1655 1656#if defined(PARTIAL_STORE_M0) 1657 1658#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ 1659 ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) 1660#else 1661#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ 1662 ((uint)(y * M0)) 1663#endif 1664 1665 1666 1667#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ 1668 STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) 1669 1670 1671#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 1672#pragma OPENCL EXTENSION cl_khr_fp16 : enable 1673#endif 1674 1675#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) 1676#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable 1677#endif 1678 1679#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) 1680#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable 1681#endif 1682 1683#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) 1684#pragma OPENCL EXTENSION cl_arm_printf : enable 1685#endif 1686 1687#define GPU_ARCH_MIDGARD 0x100 1688#define GPU_ARCH_BIFROST 0x200 1689#define GPU_ARCH_VALHALL 0x300 1690 1691 1692#define CONCAT(a, b) a##b 1693 1694 1695#define EXPAND(x) x 1696 1697 1698#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) 1699 1700 1701#define REV1(x) ((x)) 1702#define REV2(x) ((x).s10) 1703#define REV3(x) ((x).s210) 1704#define REV4(x) ((x).s3210) 1705#define REV8(x) ((x).s76543210) 1706#define REV16(x) ((x).sFEDCBA9876543210) 1707 1708 1709 1710#define REVERSE_STR(x, s) REV##s((x)) 1711#define REVERSE(x, s) REVERSE_STR(x, s) 1712 1713 1714 1715#define ROT1_0(x) ((x)) 1716#define ROT1_1(x) ((x)) 1717 1718#define ROT2_0(x) ((x)) 1719#define ROT2_1(x) ((x).s10) 1720#define ROT2_2(x) ((x)) 1721 1722#define ROT3_0(x) ((x)) 1723#define ROT3_1(x) ((x).s201) 1724#define ROT3_2(x) ((x).s120) 1725#define ROT3_3(x) ((x)) 1726 1727#define ROT4_0(x) ((x)) 1728#define ROT4_1(x) ((x).s3012) 1729#define ROT4_2(x) ((x).s2301) 1730#define ROT4_3(x) ((x).s1230) 1731#define ROT4_4(x) ((x)) 1732 1733#define ROT8_0(x) ((x)) 1734#define ROT8_1(x) ((x).s70123456) 1735#define ROT8_2(x) ((x).s67012345) 1736#define ROT8_3(x) ((x).s56701234) 1737#define ROT8_4(x) ((x).s45670123) 1738#define ROT8_5(x) ((x).s34567012) 1739#define ROT8_6(x) ((x).s23456701) 1740#define ROT8_7(x) ((x).s12345670) 1741#define ROT8_8(x) ((x)) 1742 1743#define ROT16_0(x) ((x)) 1744#define ROT16_1(x) ((x).sF0123456789ABCDE) 1745#define ROT16_2(x) ((x).sEF0123456789ABCD) 1746#define ROT16_3(x) ((x).sDEF0123456789ABC) 1747#define ROT16_4(x) ((x).sCDEF0123456789AB) 1748#define ROT16_5(x) ((x).sBCDEF0123456789A) 1749#define ROT16_6(x) ((x).sABCDEF0123456789) 1750#define ROT16_7(x) ((x).s9ABCDEF012345678) 1751#define ROT16_8(x) ((x).s89ABCDEF01234567) 1752#define ROT16_9(x) ((x).s789ABCDEF0123456) 1753#define ROT16_10(x) ((x).s6789ABCDEF012345) 1754#define ROT16_11(x) ((x).s56789ABCDEF01234) 1755#define ROT16_12(x) ((x).s456789ABCDEF0123) 1756#define ROT16_13(x) ((x).s3456789ABCDEF012) 1757#define ROT16_14(x) ((x).s23456789ABCDEF01) 1758#define ROT16_15(x) ((x).s123456789ABCDEF0) 1759#define ROT16_16(x) ((x)) 1760 1761 1762 1763#define ROTATE_STR(x, s, n) ROT##s##_##n(x) 1764#define ROTATE(x, s, n) ROTATE_STR(x, s, n) 1765 1766 1767 1768#define V_OFFS1(dt) (dt##1)(0) 1769#define V_OFFS2(dt) (dt##2)(0, 1) 1770#define V_OFFS3(dt) (dt##3)(0, 1, 2) 1771#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) 1772#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) 1773#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) 1774 1775 1776 1777#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) 1778#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) 1779 1780 1781#define VLOAD_STR(size) vload##size 1782#define VLOAD(size) VLOAD_STR(size) 1783 1784 1785#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size 1786#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) 1787 1788#define NO_LOAD(data, offs, ptr) \ 1789 { \ 1790 } 1791 1792 1793#define vload_partial_1_0 NO_LOAD 1794#define vload_partial_1_1 vload1 1795#define vload_partial_1_2 NO_LOAD 1796#define vload_partial_1_3 NO_LOAD 1797#define vload_partial_1_4 NO_LOAD 1798#define vload_partial_1_5 NO_LOAD 1799#define vload_partial_1_6 NO_LOAD 1800#define vload_partial_1_7 NO_LOAD 1801#define vload_partial_1_8 NO_LOAD 1802#define vload_partial_1_9 NO_LOAD 1803#define vload_partial_1_10 NO_LOAD 1804#define vload_partial_1_11 NO_LOAD 1805#define vload_partial_1_12 NO_LOAD 1806#define vload_partial_1_13 NO_LOAD 1807#define vload_partial_1_14 NO_LOAD 1808#define vload_partial_1_15 NO_LOAD 1809#define vload_partial_1_16 NO_LOAD 1810 1811#define vload_partial_2_0 NO_LOAD 1812#define vload_partial_2_1 vload_partial_1 1813#define vload_partial_2_2 vload_partial_2 1814#define vload_partial_2_3 NO_LOAD 1815#define vload_partial_2_4 NO_LOAD 1816#define vload_partial_2_5 NO_LOAD 1817#define vload_partial_2_6 NO_LOAD 1818#define vload_partial_2_7 NO_LOAD 1819#define vload_partial_2_8 NO_LOAD 1820#define vload_partial_2_9 NO_LOAD 1821#define vload_partial_2_10 NO_LOAD 1822#define vload_partial_2_11 NO_LOAD 1823#define vload_partial_2_12 NO_LOAD 1824#define vload_partial_2_13 NO_LOAD 1825#define vload_partial_2_14 NO_LOAD 1826#define vload_partial_2_15 NO_LOAD 1827#define vload_partial_2_16 NO_LOAD 1828 1829#define vload_partial_3_0 NO_LOAD 1830#define vload_partial_3_1 vload_partial_1 1831#define vload_partial_3_2 vload_partial_2 1832#define vload_partial_3_3 vload_partial_3 1833#define vload_partial_3_4 NO_LOAD 1834#define vload_partial_3_5 NO_LOAD 1835#define vload_partial_3_6 NO_LOAD 1836#define vload_partial_3_7 NO_LOAD 1837#define vload_partial_3_8 NO_LOAD 1838#define vload_partial_3_9 NO_LOAD 1839#define vload_partial_3_10 NO_LOAD 1840#define vload_partial_3_11 NO_LOAD 1841#define vload_partial_3_12 NO_LOAD 1842#define vload_partial_3_13 NO_LOAD 1843#define vload_partial_3_14 NO_LOAD 1844#define vload_partial_3_15 NO_LOAD 1845#define vload_partial_3_16 NO_LOAD 1846 1847#define vload_partial_4_0 NO_LOAD 1848#define vload_partial_4_1 vload_partial_1 1849#define vload_partial_4_2 vload_partial_2 1850#define vload_partial_4_3 vload_partial_3 1851#define vload_partial_4_4 vload_partial_4 1852#define vload_partial_4_5 NO_LOAD 1853#define vload_partial_4_6 NO_LOAD 1854#define vload_partial_4_7 NO_LOAD 1855#define vload_partial_4_8 NO_LOAD 1856#define vload_partial_4_9 NO_LOAD 1857#define vload_partial_4_10 NO_LOAD 1858#define vload_partial_4_11 NO_LOAD 1859#define vload_partial_4_12 NO_LOAD 1860#define vload_partial_4_13 NO_LOAD 1861#define vload_partial_4_14 NO_LOAD 1862#define vload_partial_4_15 NO_LOAD 1863#define vload_partial_4_16 NO_LOAD 1864 1865#define vload_partial_8_0 NO_LOAD 1866#define vload_partial_8_1 vload_partial_1 1867#define vload_partial_8_2 vload_partial_2 1868#define vload_partial_8_3 vload_partial_3 1869#define vload_partial_8_4 vload_partial_4 1870#define vload_partial_8_5 vload_partial_5 1871#define vload_partial_8_6 vload_partial_6 1872#define vload_partial_8_7 vload_partial_7 1873#define vload_partial_8_8 vload_partial_8 1874#define vload_partial_8_9 NO_LOAD 1875#define vload_partial_8_10 NO_LOAD 1876#define vload_partial_8_11 NO_LOAD 1877#define vload_partial_8_12 NO_LOAD 1878#define vload_partial_8_13 NO_LOAD 1879#define vload_partial_8_14 NO_LOAD 1880#define vload_partial_8_15 NO_LOAD 1881#define vload_partial_8_16 NO_LOAD 1882 1883#define vload_partial_16_0 NO_LOAD 1884#define vload_partial_16_1 vload_partial_1 1885#define vload_partial_16_2 vload_partial_2 1886#define vload_partial_16_3 vload_partial_3 1887#define vload_partial_16_4 vload_partial_4 1888#define vload_partial_16_5 vload_partial_5 1889#define vload_partial_16_6 vload_partial_6 1890#define vload_partial_16_7 vload_partial_7 1891#define vload_partial_16_8 vload_partial_8 1892#define vload_partial_16_9 vload_partial_9 1893#define vload_partial_16_10 vload_partial_10 1894#define vload_partial_16_11 vload_partial_11 1895#define vload_partial_16_12 vload_partial_12 1896#define vload_partial_16_13 vload_partial_13 1897#define vload_partial_16_14 vload_partial_14 1898#define vload_partial_16_15 vload_partial_15 1899#define vload_partial_16_16 vload_partial_16 1900 1901 1902#define vload_partial_1(DATA, OFFSET, PTR) \ 1903 DATA.s0 = vload1(OFFSET, PTR); 1904 1905#define vload_partial_2(DATA, OFFSET, PTR) \ 1906 DATA.s01 = vload2(OFFSET, PTR); 1907 1908#define vload_partial_3(DATA, OFFSET, PTR) \ 1909 DATA.s012 = vload3(OFFSET, PTR); 1910 1911#define vload_partial_4(DATA, OFFSET, PTR) \ 1912 DATA.s0123 = vload4(OFFSET, PTR); 1913 1914#define vload_partial_5(DATA, OFFSET, PTR) \ 1915 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 1916 DATA.s4 = vload1(OFFSET, PTR + 4); 1917 1918#define vload_partial_6(DATA, OFFSET, PTR) \ 1919 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 1920 vload_partial_2(DATA.s45, OFFSET, PTR + 4); 1921 1922#define vload_partial_7(DATA, OFFSET, PTR) \ 1923 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 1924 vload_partial_3(DATA.s456, OFFSET, PTR + 4); 1925 1926#define vload_partial_8(DATA, OFFSET, PTR) \ 1927 DATA.s01234567 = vload8(OFFSET, PTR); 1928 1929#define vload_partial_9(DATA, OFFSET, PTR) \ 1930 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1931 DATA.s8 = vload1(OFFSET, PTR + 8); 1932 1933#define vload_partial_10(DATA, OFFSET, PTR) \ 1934 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1935 vload_partial_2(DATA.s89, OFFSET, PTR + 8); 1936 1937#define vload_partial_11(DATA, OFFSET, PTR) \ 1938 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1939 vload_partial_3(DATA.s89A, OFFSET, PTR + 8); 1940 1941#define vload_partial_12(DATA, OFFSET, PTR) \ 1942 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1943 vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); 1944 1945#define vload_partial_13(DATA, OFFSET, PTR) \ 1946 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1947 vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); 1948 1949#define vload_partial_14(DATA, OFFSET, PTR) \ 1950 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1951 vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); 1952 1953#define vload_partial_15(DATA, OFFSET, PTR) \ 1954 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 1955 vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); 1956 1957#define vload_partial_16(DATA, OFFSET, PTR) \ 1958 DATA = vload16(OFFSET, PTR); 1959 1960 1961 1962#define PIXEL_UNIT4 1 1963#define PIXEL_UNIT8 2 1964#define PIXEL_UNIT16 4 1965 1966 1967#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size 1968#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) 1969 1970 1971#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); 1972#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); 1973#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); 1974 1975#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 1976#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); 1977#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); 1978#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); 1979#endif 1980 1981#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); 1982#define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); 1983#define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); 1984 1985#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 1986#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); 1987#define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); 1988#define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); 1989#endif 1990 1991 1992#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) 1993#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) 1994 1995 1996#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) 1997#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) 1998 1999#define VSTORE_STR(size) vstore##size 2000#define VSTORE(size) VSTORE_STR(size) 2001 2002#define float1 float 2003#define half1 half 2004#define char1 char 2005#define uchar1 uchar 2006#define short1 short 2007#define ushort1 ushort 2008#define int1 int 2009#define uint1 uint 2010#define long1 long 2011#define ulong1 ulong 2012#define double1 double 2013 2014#define vload1(OFFSET, PTR) *(OFFSET + PTR) 2015#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA 2016 2017 2018#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size 2019#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) 2020 2021#define NO_STORE(data, offs, ptr) \ 2022 { \ 2023 } 2024 2025 2026#define vstore_partial_1_0 NO_STORE 2027#define vstore_partial_1_1 vstore1 2028#define vstore_partial_1_2 NO_STORE 2029#define vstore_partial_1_3 NO_STORE 2030#define vstore_partial_1_4 NO_STORE 2031#define vstore_partial_1_5 NO_STORE 2032#define vstore_partial_1_6 NO_STORE 2033#define vstore_partial_1_7 NO_STORE 2034#define vstore_partial_1_8 NO_STORE 2035#define vstore_partial_1_9 NO_STORE 2036#define vstore_partial_1_10 NO_STORE 2037#define vstore_partial_1_11 NO_STORE 2038#define vstore_partial_1_12 NO_STORE 2039#define vstore_partial_1_13 NO_STORE 2040#define vstore_partial_1_14 NO_STORE 2041#define vstore_partial_1_15 NO_STORE 2042#define vstore_partial_1_16 NO_STORE 2043 2044#define vstore_partial_2_0 NO_STORE 2045#define vstore_partial_2_1 vstore_partial_1 2046#define vstore_partial_2_2 vstore_partial_2 2047#define vstore_partial_2_3 NO_STORE 2048#define vstore_partial_2_4 NO_STORE 2049#define vstore_partial_2_5 NO_STORE 2050#define vstore_partial_2_6 NO_STORE 2051#define vstore_partial_2_7 NO_STORE 2052#define vstore_partial_2_8 NO_STORE 2053#define vstore_partial_2_9 NO_STORE 2054#define vstore_partial_2_10 NO_STORE 2055#define vstore_partial_2_11 NO_STORE 2056#define vstore_partial_2_12 NO_STORE 2057#define vstore_partial_2_13 NO_STORE 2058#define vstore_partial_2_14 NO_STORE 2059#define vstore_partial_2_15 NO_STORE 2060#define vstore_partial_2_16 NO_STORE 2061 2062#define vstore_partial_3_0 NO_STORE 2063#define vstore_partial_3_1 vstore_partial_1 2064#define vstore_partial_3_2 vstore_partial_2 2065#define vstore_partial_3_3 vstore_partial_3 2066#define vstore_partial_3_4 NO_STORE 2067#define vstore_partial_3_5 NO_STORE 2068#define vstore_partial_3_6 NO_STORE 2069#define vstore_partial_3_7 NO_STORE 2070#define vstore_partial_3_8 NO_STORE 2071#define vstore_partial_3_9 NO_STORE 2072#define vstore_partial_3_10 NO_STORE 2073#define vstore_partial_3_11 NO_STORE 2074#define vstore_partial_3_12 NO_STORE 2075#define vstore_partial_3_13 NO_STORE 2076#define vstore_partial_3_14 NO_STORE 2077#define vstore_partial_3_15 NO_STORE 2078#define vstore_partial_3_16 NO_STORE 2079 2080#define vstore_partial_4_0 NO_STORE 2081#define vstore_partial_4_1 vstore_partial_1 2082#define vstore_partial_4_2 vstore_partial_2 2083#define vstore_partial_4_3 vstore_partial_3 2084#define vstore_partial_4_4 vstore_partial_4 2085#define vstore_partial_4_5 NO_STORE 2086#define vstore_partial_4_6 NO_STORE 2087#define vstore_partial_4_7 NO_STORE 2088#define vstore_partial_4_8 NO_STORE 2089#define vstore_partial_4_9 NO_STORE 2090#define vstore_partial_4_10 NO_STORE 2091#define vstore_partial_4_11 NO_STORE 2092#define vstore_partial_4_12 NO_STORE 2093#define vstore_partial_4_13 NO_STORE 2094#define vstore_partial_4_14 NO_STORE 2095#define vstore_partial_4_15 NO_STORE 2096#define vstore_partial_4_16 NO_STORE 2097 2098#define vstore_partial_8_0 NO_STORE 2099#define vstore_partial_8_1 vstore_partial_1 2100#define vstore_partial_8_2 vstore_partial_2 2101#define vstore_partial_8_3 vstore_partial_3 2102#define vstore_partial_8_4 vstore_partial_4 2103#define vstore_partial_8_5 vstore_partial_5 2104#define vstore_partial_8_6 vstore_partial_6 2105#define vstore_partial_8_7 vstore_partial_7 2106#define vstore_partial_8_8 vstore_partial_8 2107#define vstore_partial_8_9 NO_STORE 2108#define vstore_partial_8_10 NO_STORE 2109#define vstore_partial_8_11 NO_STORE 2110#define vstore_partial_8_12 NO_STORE 2111#define vstore_partial_8_13 NO_STORE 2112#define vstore_partial_8_14 NO_STORE 2113#define vstore_partial_8_15 NO_STORE 2114#define vstore_partial_8_16 NO_STORE 2115 2116#define vstore_partial_16_0 NO_STORE 2117#define vstore_partial_16_1 vstore_partial_1 2118#define vstore_partial_16_2 vstore_partial_2 2119#define vstore_partial_16_3 vstore_partial_3 2120#define vstore_partial_16_4 vstore_partial_4 2121#define vstore_partial_16_5 vstore_partial_5 2122#define vstore_partial_16_6 vstore_partial_6 2123#define vstore_partial_16_7 vstore_partial_7 2124#define vstore_partial_16_8 vstore_partial_8 2125#define vstore_partial_16_9 vstore_partial_9 2126#define vstore_partial_16_10 vstore_partial_10 2127#define vstore_partial_16_11 vstore_partial_11 2128#define vstore_partial_16_12 vstore_partial_12 2129#define vstore_partial_16_13 vstore_partial_13 2130#define vstore_partial_16_14 vstore_partial_14 2131#define vstore_partial_16_15 vstore_partial_15 2132#define vstore_partial_16_16 vstore_partial_16 2133 2134 2135#define vstore_partial_1(DATA, OFFSET, PTR) \ 2136 vstore1(DATA.s0, OFFSET, PTR); 2137 2138#define vstore_partial_2(DATA, OFFSET, PTR) \ 2139 vstore2(DATA.s01, OFFSET, PTR); 2140 2141#define vstore_partial_3(DATA, OFFSET, PTR) \ 2142 vstore3(DATA.s012, OFFSET, PTR); 2143 2144#define vstore_partial_4(DATA, OFFSET, PTR) \ 2145 vstore4(DATA.s0123, OFFSET, PTR); 2146 2147#define vstore_partial_5(DATA, OFFSET, PTR) \ 2148 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 2149 vstore1(DATA.s4, OFFSET, PTR + 4); 2150 2151#define vstore_partial_6(DATA, OFFSET, PTR) \ 2152 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 2153 vstore_partial_2(DATA.s45, OFFSET, PTR + 4); 2154 2155#define vstore_partial_7(DATA, OFFSET, PTR) \ 2156 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 2157 vstore_partial_3(DATA.s456, OFFSET, PTR + 4); 2158 2159#define vstore_partial_8(DATA, OFFSET, PTR) \ 2160 vstore8(DATA.s01234567, OFFSET, PTR); 2161 2162#define vstore_partial_9(DATA, OFFSET, PTR) \ 2163 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2164 vstore1(DATA.s8, OFFSET, PTR + 8); 2165 2166#define vstore_partial_10(DATA, OFFSET, PTR) \ 2167 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2168 vstore_partial_2(DATA.s89, OFFSET, PTR + 8); 2169 2170#define vstore_partial_11(DATA, OFFSET, PTR) \ 2171 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2172 vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); 2173 2174#define vstore_partial_12(DATA, OFFSET, PTR) \ 2175 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2176 vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); 2177 2178#define vstore_partial_13(DATA, OFFSET, PTR) \ 2179 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2180 vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); 2181 2182#define vstore_partial_14(DATA, OFFSET, PTR) \ 2183 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2184 vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); 2185 2186#define vstore_partial_15(DATA, OFFSET, PTR) \ 2187 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 2188 vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); 2189 2190#define vstore_partial_16(DATA, OFFSET, PTR) \ 2191 vstore16(DATA, OFFSET, PTR); 2192 2193 2194 2195 2196 2197#define convert_float_sat convert_float 2198#define convert_float1_sat convert_float 2199#define convert_float2_sat convert_float2 2200#define convert_float3_sat convert_float3 2201#define convert_float4_sat convert_float4 2202#define convert_float8_sat convert_float8 2203#define convert_float16_sat convert_float16 2204#define convert_half_sat convert_float 2205#define convert_half1_sat convert_half 2206#define convert_half2_sat convert_half2 2207#define convert_half3_sat convert_half3 2208#define convert_half4_sat convert_half4 2209#define convert_half8_sat convert_half8 2210#define convert_half16_sat convert_half16 2211 2212#define convert_float1 convert_float 2213#define convert_half1 convert_half 2214#define convert_char1 convert_char 2215#define convert_uchar1 convert_uchar 2216#define convert_short1 convert_short 2217#define convert_ushort1 convert_ushort 2218#define convert_int1 convert_int 2219#define convert_uint1 convert_uint 2220#define convert_long1 convert_long 2221#define convert_ulong1 convert_ulong 2222#define convert_double1 convert_double 2223 2224#define convert_char1_sat convert_char_sat 2225#define convert_uchar1_sat convert_uchar_sat 2226#define convert_uchar2_sat convert_uchar2_sat 2227#define convert_uchar3_sat convert_uchar3_sat 2228#define convert_uchar4_sat convert_uchar4_sat 2229#define convert_uchar8_sat convert_uchar8_sat 2230#define convert_uchar16_sat convert_uchar16_sat 2231#define convert_short1_sat convert_short_sat 2232#define convert_ushort1_sat convert_ushort_sat 2233#define convert_int1_sat convert_int_sat 2234#define convert_uint1_sat convert_uint_sat 2235#define convert_long1_sat convert_long_sat 2236#define convert_ulong1_sat convert_ulong_sat 2237#define convert_double1_sat convert_double_sat 2238 2239#define VEC_DATA_TYPE_STR(type, size) type##size 2240#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) 2241 2242#define CONVERT_STR(x, type) (convert_##type((x))) 2243#define CONVERT(x, type) CONVERT_STR(x, type) 2244 2245#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) 2246#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) 2247 2248#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) 2249#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) 2250 2251#define select_vec_dt_uchar(size) uchar##size 2252#define select_vec_dt_char(size) char##size 2253#define select_vec_dt_ushort(size) ushort##size 2254#define select_vec_dt_short(size) short##size 2255#define select_vec_dt_half(size) short##size 2256#define select_vec_dt_uint(size) uint##size 2257#define select_vec_dt_int(size) int##size 2258#define select_vec_dt_float(size) int##size 2259#define select_vec_dt_ulong(size) ulong##size 2260#define select_vec_dt_long(size) long##size 2261 2262#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) 2263#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) 2264#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) 2265 2266#define signed_int_vec_dt_uchar(size) char##size 2267#define signed_int_vec_dt_char(size) char##size 2268#define signed_int_vec_dt_ushort(size) short##size 2269#define signed_int_vec_dt_short(size) short##size 2270#define signed_int_vec_dt_half(size) short##size 2271#define signed_int_vec_dt_uint(size) int##size 2272#define signed_int_vec_dt_int(size) int##size 2273#define signed_int_vec_dt_float(size) int##size 2274#define signed_int_vec_dt_ulong(size) long##size 2275#define signed_int_vec_dt_long(size) long##size 2276 2277#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) 2278#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) 2279#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) 2280 2281#define sum_reduce_1(x) (x) 2282#define sum_reduce_2(x) ((x).s0) + ((x).s1) 2283#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) 2284#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) 2285#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) 2286#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) 2287 2288#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) 2289#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) 2290 2291#define prod_reduce_1(x) (x) 2292#define prod_reduce_2(x) ((x).s0) * ((x).s1) 2293#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) 2294#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) 2295#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) 2296#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) 2297 2298#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) 2299#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) 2300 2301#define max_reduce_1(x) (x) 2302#define max_reduce_2(x) max(((x).s0), ((x).s1)) 2303#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) 2304#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) 2305#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) 2306#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) 2307 2308#define MAX_REDUCE_STR(x, size) max_reduce_##size(x) 2309#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) 2310 2311#define VECTOR_DECLARATION(name) \ 2312 __global uchar *name##_ptr, \ 2313 uint name##_stride_x, \ 2314 uint name##_step_x, \ 2315 uint name##_offset_first_element_in_bytes 2316 2317#define IMAGE_DECLARATION(name) \ 2318 __global uchar *name##_ptr, \ 2319 uint name##_stride_x, \ 2320 uint name##_step_x, \ 2321 uint name##_stride_y, \ 2322 uint name##_step_y, \ 2323 uint name##_offset_first_element_in_bytes 2324 2325#define TENSOR3D_DECLARATION(name) \ 2326 __global uchar *name##_ptr, \ 2327 uint name##_stride_x, \ 2328 uint name##_step_x, \ 2329 uint name##_stride_y, \ 2330 uint name##_step_y, \ 2331 uint name##_stride_z, \ 2332 uint name##_step_z, \ 2333 uint name##_offset_first_element_in_bytes 2334 2335#define TENSOR4D_DECLARATION(name) \ 2336 __global uchar *name##_ptr, \ 2337 uint name##_stride_x, \ 2338 uint name##_step_x, \ 2339 uint name##_stride_y, \ 2340 uint name##_step_y, \ 2341 uint name##_stride_z, \ 2342 uint name##_step_z, \ 2343 uint name##_stride_w, \ 2344 uint name##_step_w, \ 2345 uint name##_offset_first_element_in_bytes 2346 2347#define TENSOR5D_DECLARATION(name) \ 2348 __global uchar *name##_ptr, \ 2349 uint name##_stride_x, \ 2350 uint name##_step_x, \ 2351 uint name##_stride_y, \ 2352 uint name##_step_y, \ 2353 uint name##_stride_z, \ 2354 uint name##_step_z, \ 2355 uint name##_stride_w, \ 2356 uint name##_step_w, \ 2357 uint name##_stride_v, \ 2358 uint name##_step_v, \ 2359 uint name##_offset_first_element_in_bytes 2360 2361#define CONVERT_TO_VECTOR_STRUCT(name) \ 2362 update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) 2363 2364#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ 2365 update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) 2366 2367#define CONVERT_TO_IMAGE_STRUCT(name) \ 2368 update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) 2369 2370#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ 2371 update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) 2372 2373#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ 2374 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) 2375 2376#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ 2377 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) 2378 2379#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ 2380 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) 2381 2382#define CONVERT_TO_TENSOR3D_STRUCT(name) \ 2383 update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 2384 name##_stride_z, name##_step_z) 2385 2386#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ 2387 update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) 2388 2389#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ 2390 update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 2391 name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) 2392 2393#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ 2394 update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) 2395 2396#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ 2397 tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 2398 name##_stride_z, name##_step_z) 2399 2400 2401typedef struct Vector 2402{ 2403 __global uchar *ptr; 2404 int offset_first_element_in_bytes; 2405 int stride_x; 2406} Vector; 2407 2408 2409typedef struct Image 2410{ 2411 __global uchar *ptr; 2412 int offset_first_element_in_bytes; 2413 int stride_x; 2414 int stride_y; 2415} Image; 2416 2417 2418typedef struct Tensor3D 2419{ 2420 __global uchar *ptr; 2421 int offset_first_element_in_bytes; 2422 int stride_x; 2423 int stride_y; 2424 int stride_z; 2425} Tensor3D; 2426 2427 2428typedef struct Tensor4D 2429{ 2430 __global uchar *ptr; 2431 int offset_first_element_in_bytes; 2432 int stride_x; 2433 int stride_y; 2434 int stride_z; 2435 int stride_w; 2436} Tensor4D; 2437 2438 2439inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) 2440{ 2441 Vector vector = 2442 { 2443 .ptr = ptr, 2444 .offset_first_element_in_bytes = offset_first_element_in_bytes, 2445 .stride_x = stride_x, 2446 }; 2447 vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; 2448 return vector; 2449} 2450 2451 2452inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) 2453{ 2454 Image img = 2455 { 2456 .ptr = ptr, 2457 .offset_first_element_in_bytes = offset_first_element_in_bytes, 2458 .stride_x = stride_x, 2459 .stride_y = stride_y 2460 }; 2461 img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; 2462 return img; 2463} 2464 2465 2466inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 2467{ 2468 Image img = 2469 { 2470 .ptr = ptr, 2471 .offset_first_element_in_bytes = offset_first_element_in_bytes, 2472 .stride_x = stride_x, 2473 .stride_y = stride_y 2474 }; 2475 img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; 2476 return img; 2477} 2478 2479 2480inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 2481{ 2482 Tensor3D tensor = 2483 { 2484 .ptr = ptr, 2485 .offset_first_element_in_bytes = offset_first_element_in_bytes, 2486 .stride_x = stride_x, 2487 .stride_y = stride_y, 2488 .stride_z = stride_z 2489 }; 2490 tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; 2491 return tensor; 2492} 2493 2494 2495inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 2496{ 2497 Tensor3D tensor = 2498 { 2499 .ptr = ptr, 2500 .offset_first_element_in_bytes = offset_first_element_in_bytes, 2501 .stride_x = stride_x, 2502 .stride_y = stride_y, 2503 .stride_z = stride_z 2504 }; 2505 return tensor; 2506} 2507 2508inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, 2509 uint step_w, 2510 uint mod_size) 2511{ 2512 Tensor4D tensor = 2513 { 2514 .ptr = ptr, 2515 .offset_first_element_in_bytes = offset_first_element_in_bytes, 2516 .stride_x = stride_x, 2517 .stride_y = stride_y, 2518 .stride_z = stride_z, 2519 .stride_w = stride_w 2520 }; 2521 2522 tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; 2523 return tensor; 2524} 2525 2526 2527inline __global const uchar *vector_offset(const Vector *vec, int x) 2528{ 2529 return vec->ptr + x * vec->stride_x; 2530} 2531 2532 2533inline __global uchar *offset(const Image *img, int x, int y) 2534{ 2535 return img->ptr + x * img->stride_x + y * img->stride_y; 2536} 2537 2538 2539inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) 2540{ 2541 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; 2542} 2543 2544 2545inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) 2546{ 2547 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; 2548} 2549 2550 2551inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) 2552{ 2553 uint num_elements = width * height; 2554 2555 const uint z = index / num_elements; 2556 2557 index %= num_elements; 2558 2559 const uint y = index / width; 2560 2561 index %= width; 2562 2563 const uint x = index; 2564 2565 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; 2566} 2567 2568#endif 2569 2570#if GPU_ARCH == GPU_ARCH_BIFROST 2571#define MLA(a, b, c) (fma(c, b, a)) 2572#else 2573#define MLA(a, b, c) ((b) * (c) + (a)) 2574#endif 2575 2576 2577#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667)) 2578 2579 2580#define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x))) 2581 2582 2583#define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x)) 2584 2585 2586#define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x)) 2587 2588 2589#define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x))) 2590 2591 2592#define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) 2593 2594 2595#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0)) 2596 2597 2598#define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x))) 2599 2600 2601#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0))) 2602 2603 2604#define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x)) 2605 2606 2607#define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * x) 2608 2609 2610#define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (sqrt(x)) 2611 2612 2613#define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x)) 2614 2615 2616#define gelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * (DATA_TYPE)0.5 * ((DATA_TYPE)1.0 + erf(x / (DATA_TYPE)1.41421356237))) 2617 2618 2619#define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x) 2620 2621#define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) 2622 2623#define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) 2624 2625 2626__kernel void activation_layer_quant_f32( 2627 TENSOR3D_DECLARATION(input) 2628#ifndef IN_PLACE 2629 , 2630 TENSOR3D_DECLARATION(output) 2631#endif 2632) 2633{ 2634 uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0); 2635 2636 2637 __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z; 2638#ifdef IN_PLACE 2639 __global uchar *output_addr = input_addr; 2640#else 2641 __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z; 2642#endif 2643 2644 2645 TYPE data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr); 2646 2647 VEC_FLOAT data_flt = CONVERT(data0, VEC_FLOAT); 2648#if defined(O1_VAL) 2649 data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL); 2650#else 2651 data_flt = round(data_flt) * ((float)S1_VAL); 2652#endif 2653 data_flt = ACTIVATION(ACT, float, VEC_SIZE, data_flt, A_VAL, B_VAL); 2654 2655#if defined(O2_VAL) 2656 data0 = CONVERT_SAT(round(data_flt / ((float)S2_VAL)) + (float)O2_VAL, TYPE); 2657#else 2658 data0 = CONVERT_SAT(round(data_flt / ((float)S2_VAL)), TYPE); 2659#endif 2660 2661 2662 STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) 2663} 2664 2665#else 2666 2667 2668#if defined(ACT) 2669 2670__kernel void activation_layer_quant( 2671 TENSOR3D_DECLARATION(input) 2672#ifndef IN_PLACE 2673 , 2674 TENSOR3D_DECLARATION(output) 2675#endif 2676) 2677{ 2678 uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0); 2679 2680 2681 __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z; 2682#ifdef IN_PLACE 2683 __global uchar *output_addr = input_addr; 2684#else 2685 __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z; 2686#endif 2687 2688 2689 TYPE data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr); 2690 2691 data0 = PERFORM_ACTIVATION_QUANT(ACT, data0); 2692 2693 2694 STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) 2695} 2696#endif 2697#endif )"