1R"( 2 3 4#ifndef ARM_COMPUTE_HELPER_H 5#define ARM_COMPUTE_HELPER_H 6 7 8 9 10#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 11 VSTORE(N0) \ 12 (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 13 14#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 15 STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 16 VSTORE(N0) \ 17 (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 18 19#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 20 STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 21 VSTORE(N0) \ 22 (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 23 24#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 25 STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 26 VSTORE(N0) \ 27 (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 28 29#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 30 STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 31 VSTORE(N0) \ 32 (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 33 34#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 35 STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 36 VSTORE(N0) \ 37 (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 38 39#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 40 STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 41 VSTORE(N0) \ 42 (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 43 44#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 45 STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 46 VSTORE(N0) \ 47 (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 48 49#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 50 STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 51 VSTORE(N0) \ 52 (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 53 54#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 55 STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 56 VSTORE(N0) \ 57 (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 58 59#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 60 STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 61 VSTORE(N0) \ 62 (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 63 64#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 65 STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 66 VSTORE(N0) \ 67 (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 68 69#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 70 STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 71 VSTORE(N0) \ 72 (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 73 74#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 75 STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 76 VSTORE(N0) \ 77 (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 78 79#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 80 STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 81 VSTORE(N0) \ 82 (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 83 84#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 85 STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 86 VSTORE(N0) \ 87 (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 88 89 90 91#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 92 VSTORE(N0) \ 93 (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 94 95#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 96 CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 97 VSTORE(N0) \ 98 (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 99 100#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 101 CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 102 VSTORE(N0) \ 103 (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 104 105#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 106 CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 107 VSTORE(N0) \ 108 (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 109 110#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 111 CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 112 VSTORE(N0) \ 113 (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 114 115#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 116 CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 117 VSTORE(N0) \ 118 (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 119 120#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 121 CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 122 VSTORE(N0) \ 123 (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 124 125#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 126 CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 127 VSTORE(N0) \ 128 (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 129 130#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 131 CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 132 VSTORE(N0) \ 133 (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 134 135#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ 136 CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 137 VSTORE(N0) \ 138 (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 139 140#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 141 CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 142 VSTORE(N0) \ 143 (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 144 145#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 146 CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 147 VSTORE(N0) \ 148 (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 149 150#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 151 CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 152 VSTORE(N0) \ 153 (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 154 155#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 156 CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 157 VSTORE(N0) \ 158 (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 159 160#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 161 CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 162 VSTORE(N0) \ 163 (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 164 165#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 166 CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 167 VSTORE(N0) \ 168 (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 169 170 171 172 173#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 174#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 175 176 177 178#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 179#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 180 181 182 183#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 184 VSTORE_PARTIAL(N0, STORE_N0) \ 185 (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); 186 187#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 188 STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 189 VSTORE_PARTIAL(N0, STORE_N0) \ 190 (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); 191 192#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 193 STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 194 VSTORE_PARTIAL(N0, STORE_N0) \ 195 (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); 196 197#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 198 STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 199 VSTORE_PARTIAL(N0, STORE_N0) \ 200 (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); 201 202#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 203 STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 204 VSTORE_PARTIAL(N0, STORE_N0) \ 205 (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); 206 207#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 208 STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 209 VSTORE_PARTIAL(N0, STORE_N0) \ 210 (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); 211 212#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 213 STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 214 VSTORE_PARTIAL(N0, STORE_N0) \ 215 (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); 216 217#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 218 STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 219 VSTORE_PARTIAL(N0, STORE_N0) \ 220 (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); 221 222#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 223 STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 224 VSTORE_PARTIAL(N0, STORE_N0) \ 225 (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); 226 227#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 228 STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 229 VSTORE_PARTIAL(N0, STORE_N0) \ 230 (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); 231 232#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 233 STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 234 VSTORE_PARTIAL(N0, STORE_N0) \ 235 (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); 236 237#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 238 STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 239 VSTORE_PARTIAL(N0, STORE_N0) \ 240 (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); 241 242#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 243 STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 244 VSTORE_PARTIAL(N0, STORE_N0) \ 245 (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); 246 247#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 248 STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 249 VSTORE_PARTIAL(N0, STORE_N0) \ 250 (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); 251 252#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 253 STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 254 VSTORE_PARTIAL(N0, STORE_N0) \ 255 (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); 256 257#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 258 STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ 259 VSTORE_PARTIAL(N0, STORE_N0) \ 260 (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); 261 262 263 264#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 265#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 266 267#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 268 if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ 269 { \ 270 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 271 } \ 272 else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ 273 { \ 274 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 275 } \ 276 else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ 277 { \ 278 STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 279 } \ 280 else \ 281 { \ 282 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 283 } 284 285#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ 286 if(!(PARTIAL_COND_X)) \ 287 { \ 288 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 289 } \ 290 else \ 291 { \ 292 STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 293 } 294 295#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ 296 if(!(PARTIAL_COND_Y)) \ 297 { \ 298 STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 299 } \ 300 else \ 301 { \ 302 STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ 303 } 304 305 306#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) 307 308 309#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 310 311#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 312 STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) 313 314#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 315 316#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 317 STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) 318 319#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 320 321#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 322 STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) 323 324#else 325 326#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ 327 STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) 328 329#endif 330 331#endif 332 333 334#if defined(PARTIAL_STORE_M0) 335 336#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ 337 ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) 338#else 339#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ 340 ((uint)(y * M0)) 341#endif 342 343 344 345#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ 346 STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) 347 348 349#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 350#pragma OPENCL EXTENSION cl_khr_fp16 : enable 351#endif 352 353#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) 354#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable 355#endif 356 357#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) 358#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable 359#endif 360 361#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) 362#pragma OPENCL EXTENSION cl_arm_printf : enable 363#endif 364 365#define GPU_ARCH_MIDGARD 0x100 366#define GPU_ARCH_BIFROST 0x200 367#define GPU_ARCH_VALHALL 0x300 368 369 370#define CONCAT(a, b) a##b 371 372 373#define EXPAND(x) x 374 375 376#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) 377 378 379#define REV1(x) ((x)) 380#define REV2(x) ((x).s10) 381#define REV3(x) ((x).s210) 382#define REV4(x) ((x).s3210) 383#define REV8(x) ((x).s76543210) 384#define REV16(x) ((x).sFEDCBA9876543210) 385 386 387 388#define REVERSE_STR(x, s) REV##s((x)) 389#define REVERSE(x, s) REVERSE_STR(x, s) 390 391 392 393#define ROT1_0(x) ((x)) 394#define ROT1_1(x) ((x)) 395 396#define ROT2_0(x) ((x)) 397#define ROT2_1(x) ((x).s10) 398#define ROT2_2(x) ((x)) 399 400#define ROT3_0(x) ((x)) 401#define ROT3_1(x) ((x).s201) 402#define ROT3_2(x) ((x).s120) 403#define ROT3_3(x) ((x)) 404 405#define ROT4_0(x) ((x)) 406#define ROT4_1(x) ((x).s3012) 407#define ROT4_2(x) ((x).s2301) 408#define ROT4_3(x) ((x).s1230) 409#define ROT4_4(x) ((x)) 410 411#define ROT8_0(x) ((x)) 412#define ROT8_1(x) ((x).s70123456) 413#define ROT8_2(x) ((x).s67012345) 414#define ROT8_3(x) ((x).s56701234) 415#define ROT8_4(x) ((x).s45670123) 416#define ROT8_5(x) ((x).s34567012) 417#define ROT8_6(x) ((x).s23456701) 418#define ROT8_7(x) ((x).s12345670) 419#define ROT8_8(x) ((x)) 420 421#define ROT16_0(x) ((x)) 422#define ROT16_1(x) ((x).sF0123456789ABCDE) 423#define ROT16_2(x) ((x).sEF0123456789ABCD) 424#define ROT16_3(x) ((x).sDEF0123456789ABC) 425#define ROT16_4(x) ((x).sCDEF0123456789AB) 426#define ROT16_5(x) ((x).sBCDEF0123456789A) 427#define ROT16_6(x) ((x).sABCDEF0123456789) 428#define ROT16_7(x) ((x).s9ABCDEF012345678) 429#define ROT16_8(x) ((x).s89ABCDEF01234567) 430#define ROT16_9(x) ((x).s789ABCDEF0123456) 431#define ROT16_10(x) ((x).s6789ABCDEF012345) 432#define ROT16_11(x) ((x).s56789ABCDEF01234) 433#define ROT16_12(x) ((x).s456789ABCDEF0123) 434#define ROT16_13(x) ((x).s3456789ABCDEF012) 435#define ROT16_14(x) ((x).s23456789ABCDEF01) 436#define ROT16_15(x) ((x).s123456789ABCDEF0) 437#define ROT16_16(x) ((x)) 438 439 440 441#define ROTATE_STR(x, s, n) ROT##s##_##n(x) 442#define ROTATE(x, s, n) ROTATE_STR(x, s, n) 443 444 445 446#define V_OFFS1(dt) (dt##1)(0) 447#define V_OFFS2(dt) (dt##2)(0, 1) 448#define V_OFFS3(dt) (dt##3)(0, 1, 2) 449#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) 450#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) 451#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) 452 453 454 455#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) 456#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) 457 458 459#define VLOAD_STR(size) vload##size 460#define VLOAD(size) VLOAD_STR(size) 461 462 463#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size 464#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) 465 466#define NO_LOAD(data, offs, ptr) \ 467 { \ 468 } 469 470 471#define vload_partial_1_0 NO_LOAD 472#define vload_partial_1_1 vload1 473#define vload_partial_1_2 NO_LOAD 474#define vload_partial_1_3 NO_LOAD 475#define vload_partial_1_4 NO_LOAD 476#define vload_partial_1_5 NO_LOAD 477#define vload_partial_1_6 NO_LOAD 478#define vload_partial_1_7 NO_LOAD 479#define vload_partial_1_8 NO_LOAD 480#define vload_partial_1_9 NO_LOAD 481#define vload_partial_1_10 NO_LOAD 482#define vload_partial_1_11 NO_LOAD 483#define vload_partial_1_12 NO_LOAD 484#define vload_partial_1_13 NO_LOAD 485#define vload_partial_1_14 NO_LOAD 486#define vload_partial_1_15 NO_LOAD 487#define vload_partial_1_16 NO_LOAD 488 489#define vload_partial_2_0 NO_LOAD 490#define vload_partial_2_1 vload_partial_1 491#define vload_partial_2_2 vload_partial_2 492#define vload_partial_2_3 NO_LOAD 493#define vload_partial_2_4 NO_LOAD 494#define vload_partial_2_5 NO_LOAD 495#define vload_partial_2_6 NO_LOAD 496#define vload_partial_2_7 NO_LOAD 497#define vload_partial_2_8 NO_LOAD 498#define vload_partial_2_9 NO_LOAD 499#define vload_partial_2_10 NO_LOAD 500#define vload_partial_2_11 NO_LOAD 501#define vload_partial_2_12 NO_LOAD 502#define vload_partial_2_13 NO_LOAD 503#define vload_partial_2_14 NO_LOAD 504#define vload_partial_2_15 NO_LOAD 505#define vload_partial_2_16 NO_LOAD 506 507#define vload_partial_3_0 NO_LOAD 508#define vload_partial_3_1 vload_partial_1 509#define vload_partial_3_2 vload_partial_2 510#define vload_partial_3_3 vload_partial_3 511#define vload_partial_3_4 NO_LOAD 512#define vload_partial_3_5 NO_LOAD 513#define vload_partial_3_6 NO_LOAD 514#define vload_partial_3_7 NO_LOAD 515#define vload_partial_3_8 NO_LOAD 516#define vload_partial_3_9 NO_LOAD 517#define vload_partial_3_10 NO_LOAD 518#define vload_partial_3_11 NO_LOAD 519#define vload_partial_3_12 NO_LOAD 520#define vload_partial_3_13 NO_LOAD 521#define vload_partial_3_14 NO_LOAD 522#define vload_partial_3_15 NO_LOAD 523#define vload_partial_3_16 NO_LOAD 524 525#define vload_partial_4_0 NO_LOAD 526#define vload_partial_4_1 vload_partial_1 527#define vload_partial_4_2 vload_partial_2 528#define vload_partial_4_3 vload_partial_3 529#define vload_partial_4_4 vload_partial_4 530#define vload_partial_4_5 NO_LOAD 531#define vload_partial_4_6 NO_LOAD 532#define vload_partial_4_7 NO_LOAD 533#define vload_partial_4_8 NO_LOAD 534#define vload_partial_4_9 NO_LOAD 535#define vload_partial_4_10 NO_LOAD 536#define vload_partial_4_11 NO_LOAD 537#define vload_partial_4_12 NO_LOAD 538#define vload_partial_4_13 NO_LOAD 539#define vload_partial_4_14 NO_LOAD 540#define vload_partial_4_15 NO_LOAD 541#define vload_partial_4_16 NO_LOAD 542 543#define vload_partial_8_0 NO_LOAD 544#define vload_partial_8_1 vload_partial_1 545#define vload_partial_8_2 vload_partial_2 546#define vload_partial_8_3 vload_partial_3 547#define vload_partial_8_4 vload_partial_4 548#define vload_partial_8_5 vload_partial_5 549#define vload_partial_8_6 vload_partial_6 550#define vload_partial_8_7 vload_partial_7 551#define vload_partial_8_8 vload_partial_8 552#define vload_partial_8_9 NO_LOAD 553#define vload_partial_8_10 NO_LOAD 554#define vload_partial_8_11 NO_LOAD 555#define vload_partial_8_12 NO_LOAD 556#define vload_partial_8_13 NO_LOAD 557#define vload_partial_8_14 NO_LOAD 558#define vload_partial_8_15 NO_LOAD 559#define vload_partial_8_16 NO_LOAD 560 561#define vload_partial_16_0 NO_LOAD 562#define vload_partial_16_1 vload_partial_1 563#define vload_partial_16_2 vload_partial_2 564#define vload_partial_16_3 vload_partial_3 565#define vload_partial_16_4 vload_partial_4 566#define vload_partial_16_5 vload_partial_5 567#define vload_partial_16_6 vload_partial_6 568#define vload_partial_16_7 vload_partial_7 569#define vload_partial_16_8 vload_partial_8 570#define vload_partial_16_9 vload_partial_9 571#define vload_partial_16_10 vload_partial_10 572#define vload_partial_16_11 vload_partial_11 573#define vload_partial_16_12 vload_partial_12 574#define vload_partial_16_13 vload_partial_13 575#define vload_partial_16_14 vload_partial_14 576#define vload_partial_16_15 vload_partial_15 577#define vload_partial_16_16 vload_partial_16 578 579 580#define vload_partial_1(DATA, OFFSET, PTR) \ 581 DATA.s0 = vload1(OFFSET, PTR); 582 583#define vload_partial_2(DATA, OFFSET, PTR) \ 584 DATA.s01 = vload2(OFFSET, PTR); 585 586#define vload_partial_3(DATA, OFFSET, PTR) \ 587 DATA.s012 = vload3(OFFSET, PTR); 588 589#define vload_partial_4(DATA, OFFSET, PTR) \ 590 DATA.s0123 = vload4(OFFSET, PTR); 591 592#define vload_partial_5(DATA, OFFSET, PTR) \ 593 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 594 DATA.s4 = vload1(OFFSET, PTR + 4); 595 596#define vload_partial_6(DATA, OFFSET, PTR) \ 597 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 598 vload_partial_2(DATA.s45, OFFSET, PTR + 4); 599 600#define vload_partial_7(DATA, OFFSET, PTR) \ 601 vload_partial_4(DATA.s0123, OFFSET, PTR); \ 602 vload_partial_3(DATA.s456, OFFSET, PTR + 4); 603 604#define vload_partial_8(DATA, OFFSET, PTR) \ 605 DATA.s01234567 = vload8(OFFSET, PTR); 606 607#define vload_partial_9(DATA, OFFSET, PTR) \ 608 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 609 DATA.s8 = vload1(OFFSET, PTR + 8); 610 611#define vload_partial_10(DATA, OFFSET, PTR) \ 612 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 613 vload_partial_2(DATA.s89, OFFSET, PTR + 8); 614 615#define vload_partial_11(DATA, OFFSET, PTR) \ 616 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 617 vload_partial_3(DATA.s89A, OFFSET, PTR + 8); 618 619#define vload_partial_12(DATA, OFFSET, PTR) \ 620 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 621 vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); 622 623#define vload_partial_13(DATA, OFFSET, PTR) \ 624 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 625 vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); 626 627#define vload_partial_14(DATA, OFFSET, PTR) \ 628 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 629 vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); 630 631#define vload_partial_15(DATA, OFFSET, PTR) \ 632 vload_partial_8(DATA.s01234567, OFFSET, PTR); \ 633 vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); 634 635#define vload_partial_16(DATA, OFFSET, PTR) \ 636 DATA = vload16(OFFSET, PTR); 637 638 639 640#define PIXEL_UNIT4 1 641#define PIXEL_UNIT8 2 642#define PIXEL_UNIT16 4 643 644 645#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size 646#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) 647 648 649#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); 650#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); 651#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); 652 653#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 654#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); 655#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); 656#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); 657#endif 658 659#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); 660#define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); 661#define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); 662 663#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) 664#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); 665#define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); 666#define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); 667#endif 668 669 670#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) 671#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) 672 673 674#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) 675#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) 676 677#define VSTORE_STR(size) vstore##size 678#define VSTORE(size) VSTORE_STR(size) 679 680#define float1 float 681#define half1 half 682#define char1 char 683#define uchar1 uchar 684#define short1 short 685#define ushort1 ushort 686#define int1 int 687#define uint1 uint 688#define long1 long 689#define ulong1 ulong 690#define double1 double 691 692#define vload1(OFFSET, PTR) *(OFFSET + PTR) 693#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA 694 695 696#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size 697#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) 698 699#define NO_STORE(data, offs, ptr) \ 700 { \ 701 } 702 703 704#define vstore_partial_1_0 NO_STORE 705#define vstore_partial_1_1 vstore1 706#define vstore_partial_1_2 NO_STORE 707#define vstore_partial_1_3 NO_STORE 708#define vstore_partial_1_4 NO_STORE 709#define vstore_partial_1_5 NO_STORE 710#define vstore_partial_1_6 NO_STORE 711#define vstore_partial_1_7 NO_STORE 712#define vstore_partial_1_8 NO_STORE 713#define vstore_partial_1_9 NO_STORE 714#define vstore_partial_1_10 NO_STORE 715#define vstore_partial_1_11 NO_STORE 716#define vstore_partial_1_12 NO_STORE 717#define vstore_partial_1_13 NO_STORE 718#define vstore_partial_1_14 NO_STORE 719#define vstore_partial_1_15 NO_STORE 720#define vstore_partial_1_16 NO_STORE 721 722#define vstore_partial_2_0 NO_STORE 723#define vstore_partial_2_1 vstore_partial_1 724#define vstore_partial_2_2 vstore_partial_2 725#define vstore_partial_2_3 NO_STORE 726#define vstore_partial_2_4 NO_STORE 727#define vstore_partial_2_5 NO_STORE 728#define vstore_partial_2_6 NO_STORE 729#define vstore_partial_2_7 NO_STORE 730#define vstore_partial_2_8 NO_STORE 731#define vstore_partial_2_9 NO_STORE 732#define vstore_partial_2_10 NO_STORE 733#define vstore_partial_2_11 NO_STORE 734#define vstore_partial_2_12 NO_STORE 735#define vstore_partial_2_13 NO_STORE 736#define vstore_partial_2_14 NO_STORE 737#define vstore_partial_2_15 NO_STORE 738#define vstore_partial_2_16 NO_STORE 739 740#define vstore_partial_3_0 NO_STORE 741#define vstore_partial_3_1 vstore_partial_1 742#define vstore_partial_3_2 vstore_partial_2 743#define vstore_partial_3_3 vstore_partial_3 744#define vstore_partial_3_4 NO_STORE 745#define vstore_partial_3_5 NO_STORE 746#define vstore_partial_3_6 NO_STORE 747#define vstore_partial_3_7 NO_STORE 748#define vstore_partial_3_8 NO_STORE 749#define vstore_partial_3_9 NO_STORE 750#define vstore_partial_3_10 NO_STORE 751#define vstore_partial_3_11 NO_STORE 752#define vstore_partial_3_12 NO_STORE 753#define vstore_partial_3_13 NO_STORE 754#define vstore_partial_3_14 NO_STORE 755#define vstore_partial_3_15 NO_STORE 756#define vstore_partial_3_16 NO_STORE 757 758#define vstore_partial_4_0 NO_STORE 759#define vstore_partial_4_1 vstore_partial_1 760#define vstore_partial_4_2 vstore_partial_2 761#define vstore_partial_4_3 vstore_partial_3 762#define vstore_partial_4_4 vstore_partial_4 763#define vstore_partial_4_5 NO_STORE 764#define vstore_partial_4_6 NO_STORE 765#define vstore_partial_4_7 NO_STORE 766#define vstore_partial_4_8 NO_STORE 767#define vstore_partial_4_9 NO_STORE 768#define vstore_partial_4_10 NO_STORE 769#define vstore_partial_4_11 NO_STORE 770#define vstore_partial_4_12 NO_STORE 771#define vstore_partial_4_13 NO_STORE 772#define vstore_partial_4_14 NO_STORE 773#define vstore_partial_4_15 NO_STORE 774#define vstore_partial_4_16 NO_STORE 775 776#define vstore_partial_8_0 NO_STORE 777#define vstore_partial_8_1 vstore_partial_1 778#define vstore_partial_8_2 vstore_partial_2 779#define vstore_partial_8_3 vstore_partial_3 780#define vstore_partial_8_4 vstore_partial_4 781#define vstore_partial_8_5 vstore_partial_5 782#define vstore_partial_8_6 vstore_partial_6 783#define vstore_partial_8_7 vstore_partial_7 784#define vstore_partial_8_8 vstore_partial_8 785#define vstore_partial_8_9 NO_STORE 786#define vstore_partial_8_10 NO_STORE 787#define vstore_partial_8_11 NO_STORE 788#define vstore_partial_8_12 NO_STORE 789#define vstore_partial_8_13 NO_STORE 790#define vstore_partial_8_14 NO_STORE 791#define vstore_partial_8_15 NO_STORE 792#define vstore_partial_8_16 NO_STORE 793 794#define vstore_partial_16_0 NO_STORE 795#define vstore_partial_16_1 vstore_partial_1 796#define vstore_partial_16_2 vstore_partial_2 797#define vstore_partial_16_3 vstore_partial_3 798#define vstore_partial_16_4 vstore_partial_4 799#define vstore_partial_16_5 vstore_partial_5 800#define vstore_partial_16_6 vstore_partial_6 801#define vstore_partial_16_7 vstore_partial_7 802#define vstore_partial_16_8 vstore_partial_8 803#define vstore_partial_16_9 vstore_partial_9 804#define vstore_partial_16_10 vstore_partial_10 805#define vstore_partial_16_11 vstore_partial_11 806#define vstore_partial_16_12 vstore_partial_12 807#define vstore_partial_16_13 vstore_partial_13 808#define vstore_partial_16_14 vstore_partial_14 809#define vstore_partial_16_15 vstore_partial_15 810#define vstore_partial_16_16 vstore_partial_16 811 812 813#define vstore_partial_1(DATA, OFFSET, PTR) \ 814 vstore1(DATA.s0, OFFSET, PTR); 815 816#define vstore_partial_2(DATA, OFFSET, PTR) \ 817 vstore2(DATA.s01, OFFSET, PTR); 818 819#define vstore_partial_3(DATA, OFFSET, PTR) \ 820 vstore3(DATA.s012, OFFSET, PTR); 821 822#define vstore_partial_4(DATA, OFFSET, PTR) \ 823 vstore4(DATA.s0123, OFFSET, PTR); 824 825#define vstore_partial_5(DATA, OFFSET, PTR) \ 826 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 827 vstore1(DATA.s4, OFFSET, PTR + 4); 828 829#define vstore_partial_6(DATA, OFFSET, PTR) \ 830 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 831 vstore_partial_2(DATA.s45, OFFSET, PTR + 4); 832 833#define vstore_partial_7(DATA, OFFSET, PTR) \ 834 vstore_partial_4(DATA.s0123, OFFSET, PTR); \ 835 vstore_partial_3(DATA.s456, OFFSET, PTR + 4); 836 837#define vstore_partial_8(DATA, OFFSET, PTR) \ 838 vstore8(DATA.s01234567, OFFSET, PTR); 839 840#define vstore_partial_9(DATA, OFFSET, PTR) \ 841 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 842 vstore1(DATA.s8, OFFSET, PTR + 8); 843 844#define vstore_partial_10(DATA, OFFSET, PTR) \ 845 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 846 vstore_partial_2(DATA.s89, OFFSET, PTR + 8); 847 848#define vstore_partial_11(DATA, OFFSET, PTR) \ 849 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 850 vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); 851 852#define vstore_partial_12(DATA, OFFSET, PTR) \ 853 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 854 vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); 855 856#define vstore_partial_13(DATA, OFFSET, PTR) \ 857 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 858 vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); 859 860#define vstore_partial_14(DATA, OFFSET, PTR) \ 861 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 862 vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); 863 864#define vstore_partial_15(DATA, OFFSET, PTR) \ 865 vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ 866 vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); 867 868#define vstore_partial_16(DATA, OFFSET, PTR) \ 869 vstore16(DATA, OFFSET, PTR); 870 871 872 873 874 875#define convert_float_sat convert_float 876#define convert_float1_sat convert_float 877#define convert_float2_sat convert_float2 878#define convert_float3_sat convert_float3 879#define convert_float4_sat convert_float4 880#define convert_float8_sat convert_float8 881#define convert_float16_sat convert_float16 882#define convert_half_sat convert_float 883#define convert_half1_sat convert_half 884#define convert_half2_sat convert_half2 885#define convert_half3_sat convert_half3 886#define convert_half4_sat convert_half4 887#define convert_half8_sat convert_half8 888#define convert_half16_sat convert_half16 889 890#define convert_float1 convert_float 891#define convert_half1 convert_half 892#define convert_char1 convert_char 893#define convert_uchar1 convert_uchar 894#define convert_short1 convert_short 895#define convert_ushort1 convert_ushort 896#define convert_int1 convert_int 897#define convert_uint1 convert_uint 898#define convert_long1 convert_long 899#define convert_ulong1 convert_ulong 900#define convert_double1 convert_double 901 902#define convert_char1_sat convert_char_sat 903#define convert_uchar1_sat convert_uchar_sat 904#define convert_uchar2_sat convert_uchar2_sat 905#define convert_uchar3_sat convert_uchar3_sat 906#define convert_uchar4_sat convert_uchar4_sat 907#define convert_uchar8_sat convert_uchar8_sat 908#define convert_uchar16_sat convert_uchar16_sat 909#define convert_short1_sat convert_short_sat 910#define convert_ushort1_sat convert_ushort_sat 911#define convert_int1_sat convert_int_sat 912#define convert_uint1_sat convert_uint_sat 913#define convert_long1_sat convert_long_sat 914#define convert_ulong1_sat convert_ulong_sat 915#define convert_double1_sat convert_double_sat 916 917#define VEC_DATA_TYPE_STR(type, size) type##size 918#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) 919 920#define CONVERT_STR(x, type) (convert_##type((x))) 921#define CONVERT(x, type) CONVERT_STR(x, type) 922 923#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) 924#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) 925 926#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) 927#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) 928 929#define select_vec_dt_uchar(size) uchar##size 930#define select_vec_dt_char(size) char##size 931#define select_vec_dt_ushort(size) ushort##size 932#define select_vec_dt_short(size) short##size 933#define select_vec_dt_half(size) short##size 934#define select_vec_dt_uint(size) uint##size 935#define select_vec_dt_int(size) int##size 936#define select_vec_dt_float(size) int##size 937#define select_vec_dt_ulong(size) ulong##size 938#define select_vec_dt_long(size) long##size 939 940#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) 941#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) 942#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) 943 944#define signed_int_vec_dt_uchar(size) char##size 945#define signed_int_vec_dt_char(size) char##size 946#define signed_int_vec_dt_ushort(size) short##size 947#define signed_int_vec_dt_short(size) short##size 948#define signed_int_vec_dt_half(size) short##size 949#define signed_int_vec_dt_uint(size) int##size 950#define signed_int_vec_dt_int(size) int##size 951#define signed_int_vec_dt_float(size) int##size 952#define signed_int_vec_dt_ulong(size) long##size 953#define signed_int_vec_dt_long(size) long##size 954 955#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) 956#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) 957#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) 958 959#define sum_reduce_1(x) (x) 960#define sum_reduce_2(x) ((x).s0) + ((x).s1) 961#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) 962#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) 963#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) 964#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) 965 966#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) 967#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) 968 969#define prod_reduce_1(x) (x) 970#define prod_reduce_2(x) ((x).s0) * ((x).s1) 971#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) 972#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) 973#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) 974#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) 975 976#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) 977#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) 978 979#define max_reduce_1(x) (x) 980#define max_reduce_2(x) max(((x).s0), ((x).s1)) 981#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) 982#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) 983#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) 984#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) 985 986#define MAX_REDUCE_STR(x, size) max_reduce_##size(x) 987#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) 988 989#define VECTOR_DECLARATION(name) \ 990 __global uchar *name##_ptr, \ 991 uint name##_stride_x, \ 992 uint name##_step_x, \ 993 uint name##_offset_first_element_in_bytes 994 995#define IMAGE_DECLARATION(name) \ 996 __global uchar *name##_ptr, \ 997 uint name##_stride_x, \ 998 uint name##_step_x, \ 999 uint name##_stride_y, \ 1000 uint name##_step_y, \ 1001 uint name##_offset_first_element_in_bytes 1002 1003#define TENSOR3D_DECLARATION(name) \ 1004 __global uchar *name##_ptr, \ 1005 uint name##_stride_x, \ 1006 uint name##_step_x, \ 1007 uint name##_stride_y, \ 1008 uint name##_step_y, \ 1009 uint name##_stride_z, \ 1010 uint name##_step_z, \ 1011 uint name##_offset_first_element_in_bytes 1012 1013#define TENSOR4D_DECLARATION(name) \ 1014 __global uchar *name##_ptr, \ 1015 uint name##_stride_x, \ 1016 uint name##_step_x, \ 1017 uint name##_stride_y, \ 1018 uint name##_step_y, \ 1019 uint name##_stride_z, \ 1020 uint name##_step_z, \ 1021 uint name##_stride_w, \ 1022 uint name##_step_w, \ 1023 uint name##_offset_first_element_in_bytes 1024 1025#define TENSOR5D_DECLARATION(name) \ 1026 __global uchar *name##_ptr, \ 1027 uint name##_stride_x, \ 1028 uint name##_step_x, \ 1029 uint name##_stride_y, \ 1030 uint name##_step_y, \ 1031 uint name##_stride_z, \ 1032 uint name##_step_z, \ 1033 uint name##_stride_w, \ 1034 uint name##_step_w, \ 1035 uint name##_stride_v, \ 1036 uint name##_step_v, \ 1037 uint name##_offset_first_element_in_bytes 1038 1039#define CONVERT_TO_VECTOR_STRUCT(name) \ 1040 update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) 1041 1042#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ 1043 update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) 1044 1045#define CONVERT_TO_IMAGE_STRUCT(name) \ 1046 update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) 1047 1048#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ 1049 update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) 1050 1051#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ 1052 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) 1053 1054#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ 1055 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) 1056 1057#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ 1058 update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) 1059 1060#define CONVERT_TO_TENSOR3D_STRUCT(name) \ 1061 update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 1062 name##_stride_z, name##_step_z) 1063 1064#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ 1065 update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) 1066 1067#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ 1068 update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 1069 name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) 1070 1071#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ 1072 update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) 1073 1074#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ 1075 tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ 1076 name##_stride_z, name##_step_z) 1077 1078 1079typedef struct Vector 1080{ 1081 __global uchar *ptr; 1082 int offset_first_element_in_bytes; 1083 int stride_x; 1084} Vector; 1085 1086 1087typedef struct Image 1088{ 1089 __global uchar *ptr; 1090 int offset_first_element_in_bytes; 1091 int stride_x; 1092 int stride_y; 1093} Image; 1094 1095 1096typedef struct Tensor3D 1097{ 1098 __global uchar *ptr; 1099 int offset_first_element_in_bytes; 1100 int stride_x; 1101 int stride_y; 1102 int stride_z; 1103} Tensor3D; 1104 1105 1106typedef struct Tensor4D 1107{ 1108 __global uchar *ptr; 1109 int offset_first_element_in_bytes; 1110 int stride_x; 1111 int stride_y; 1112 int stride_z; 1113 int stride_w; 1114} Tensor4D; 1115 1116 1117inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) 1118{ 1119 Vector vector = 1120 { 1121 .ptr = ptr, 1122 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1123 .stride_x = stride_x, 1124 }; 1125 vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; 1126 return vector; 1127} 1128 1129 1130inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) 1131{ 1132 Image img = 1133 { 1134 .ptr = ptr, 1135 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1136 .stride_x = stride_x, 1137 .stride_y = stride_y 1138 }; 1139 img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; 1140 return img; 1141} 1142 1143 1144inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 1145{ 1146 Image img = 1147 { 1148 .ptr = ptr, 1149 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1150 .stride_x = stride_x, 1151 .stride_y = stride_y 1152 }; 1153 img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; 1154 return img; 1155} 1156 1157 1158inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 1159{ 1160 Tensor3D tensor = 1161 { 1162 .ptr = ptr, 1163 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1164 .stride_x = stride_x, 1165 .stride_y = stride_y, 1166 .stride_z = stride_z 1167 }; 1168 tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; 1169 return tensor; 1170} 1171 1172 1173inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) 1174{ 1175 Tensor3D tensor = 1176 { 1177 .ptr = ptr, 1178 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1179 .stride_x = stride_x, 1180 .stride_y = stride_y, 1181 .stride_z = stride_z 1182 }; 1183 return tensor; 1184} 1185 1186inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, 1187 uint step_w, 1188 uint mod_size) 1189{ 1190 Tensor4D tensor = 1191 { 1192 .ptr = ptr, 1193 .offset_first_element_in_bytes = offset_first_element_in_bytes, 1194 .stride_x = stride_x, 1195 .stride_y = stride_y, 1196 .stride_z = stride_z, 1197 .stride_w = stride_w 1198 }; 1199 1200 tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; 1201 return tensor; 1202} 1203 1204 1205inline __global const uchar *vector_offset(const Vector *vec, int x) 1206{ 1207 return vec->ptr + x * vec->stride_x; 1208} 1209 1210 1211inline __global uchar *offset(const Image *img, int x, int y) 1212{ 1213 return img->ptr + x * img->stride_x + y * img->stride_y; 1214} 1215 1216 1217inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) 1218{ 1219 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; 1220} 1221 1222 1223inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) 1224{ 1225 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; 1226} 1227 1228 1229inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) 1230{ 1231 uint num_elements = width * height; 1232 1233 const uint z = index / num_elements; 1234 1235 index %= num_elements; 1236 1237 const uint y = index / width; 1238 1239 index %= width; 1240 1241 const uint x = index; 1242 1243 return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; 1244} 1245 1246#endif 1247 1248 1249__kernel void max_unpooling_layer_2( 1250 TENSOR3D_DECLARATION(input), 1251 TENSOR3D_DECLARATION(output), 1252 TENSOR3D_DECLARATION(indices)) 1253{ 1254 Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); 1255 Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(output); 1256 Tensor3D indices = CONVERT_TO_TENSOR3D_STRUCT(indices); 1257 1258 unsigned int index = *((__global unsigned int *)indices.ptr); 1259 DATA_TYPE value = *((__global DATA_TYPE *)input.ptr); 1260 1261 *((__global DATA_TYPE *)tensor3D_index2ptr(&output, WIDTH_DST, HEIGHT_DST, DEPTH_DST, index)) = value; 1262})"