1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$ABC = "01234567456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9$assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF"] 10$assert ACTIVATION in ["LINEAR", "MINMAX"] 11#include <assert.h> 12 13#include <immintrin.h> 14 15#include <xnnpack/common.h> 16#include <xnnpack/intrinsics-polyfill.h> 17#include <xnnpack/vbinary.h> 18 19 20$_MM256_OP_PS = { 21$ "ADD": lambda x: "_mm256_add_ps(%s, vb)" % x, 22$ "DIV": lambda x: "_mm256_div_ps(%s, vb)" % x, 23$ "RDIV": lambda x: "_mm256_div_ps(vb, %s)" % x, 24$ "MAX": lambda x: "_mm256_max_ps(%s, vb)" % x, 25$ "MIN": lambda x: "_mm256_min_ps(%s, vb)" % x, 26$ "MUL": lambda x: "_mm256_mul_ps(%s, vb)" % x, 27$ "SUB": lambda x: "_mm256_sub_ps(%s, vb)" % x, 28$ "RSUB": lambda x: "_mm256_sub_ps(vb, %s)" % x, 29$ "SQRDIFF": lambda x: "_mm256_sub_ps(%s, vb)" % x, 30$}[OP] 31$SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION] 32$PARAMS = {"LINEAR": "xnn_f16_default_params", "MINMAX": "xnn_f16_minmax_params"}[ACTIVATION] 33void xnn_f16_v${OP.lower()}c${SUFFIX}_ukernel__f16c_x${BATCH_TILE}( 34 size_t n, 35 const void* restrict a_ptr, 36 const void* restrict b_ptr, 37 void* restrict y_ptr, 38 const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 39{ 40 assert(n != 0); 41 assert(n % sizeof(uint16_t) == 0); 42 assert(a_ptr != NULL); 43 assert(b_ptr != NULL); 44 assert(y_ptr != NULL); 45 46 const uint16_t* a = (const uint16_t*) a_ptr; 47 const uint16_t* b = (const uint16_t*) b_ptr; 48 uint16_t* y = (uint16_t*) y_ptr; 49 50 $if ACTIVATION == "MINMAX": 51 const __m256 vy_min = _mm256_load_ps(params->avx.min); 52 const __m256 vy_max = _mm256_load_ps(params->avx.max); 53 54 const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b)); 55 $if BATCH_TILE > 8: 56 for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) { 57 const __m256 va${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a)); 58 $for N in range(8, BATCH_TILE, 8): 59 const __m256 va${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + ${N}))); 60 a += ${BATCH_TILE}; 61 62 $for N in range(0, BATCH_TILE, 8): 63 __m256 vy${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(${_MM256_OP_PS("va" + ABC[N:N+8])}, _MM_FROUND_NO_EXC)); 64 65 $if OP == "SQRDIFF": 66 $for N in range(0, BATCH_TILE, 8): 67 vy${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy${ABC[N:N+8]}, vy${ABC[N:N+8]}), _MM_FROUND_NO_EXC)); 68 69 $if ACTIVATION == "MINMAX": 70 $for N in range(0, BATCH_TILE, 8): 71 vy${ABC[N:N+8]} = _mm256_max_ps(vy${ABC[N:N+8]}, vy_min); 72 73 $for N in range(0, BATCH_TILE, 8): 74 vy${ABC[N:N+8]} = _mm256_min_ps(vy${ABC[N:N+8]}, vy_max); 75 76 _mm_storeu_si128((__m128i*) y, _mm256_cvtps_ph(vy${ABC[0:8]}, _MM_FROUND_NO_EXC)); 77 $for N in range(8, BATCH_TILE, 8): 78 _mm_storeu_si128((__m128i*) (y + ${N}), _mm256_cvtps_ph(vy${ABC[N:N+8]}, _MM_FROUND_NO_EXC)); 79 y += ${BATCH_TILE}; 80 } 81 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) { 82 const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a)); 83 a += 8; 84 85 __m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(${_MM256_OP_PS("va")}, _MM_FROUND_NO_EXC)); 86 $if OP == "SQRDIFF": 87 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_NO_EXC)); 88 89 $if ACTIVATION == "MINMAX": 90 vy = _mm256_max_ps(vy, vy_min); 91 vy = _mm256_min_ps(vy, vy_max); 92 93 _mm_storeu_si128((__m128i*) y, _mm256_cvtps_ph(vy, _MM_FROUND_NO_EXC)); 94 y += 8; 95 } 96 if XNN_UNLIKELY(n != 0) { 97 const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a)); 98 99 __m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(${_MM256_OP_PS("va")}, _MM_FROUND_NO_EXC)); 100 $if OP == "SQRDIFF": 101 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_NO_EXC)); 102 103 $if ACTIVATION == "MINMAX": 104 vy = _mm256_max_ps(vy, vy_min); 105 vy = _mm256_min_ps(vy, vy_max); 106 107 __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_NO_EXC); 108 if (n & (4 * sizeof(uint16_t))) { 109 _mm_storel_epi64((__m128i*) y, vh); 110 vh = _mm_unpackhi_epi64(vh, vh); 111 y += 4; 112 } 113 if (n & (2 * sizeof(uint16_t))) { 114 _mm_storeu_si32(y, vh); 115 vh = _mm_srli_epi64(vh, 32); 116 y += 2; 117 } 118 if (n & (1 * sizeof(uint16_t))) { 119 *y = (uint16_t) _mm_extract_epi16(vh, 0); 120 } 121 } 122} 123