1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9#include <assert.h> 10 11#include <immintrin.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/intrinsics-polyfill.h> 15#include <xnnpack/vunary.h> 16 17 18void xnn_f16_vlrelu_ukernel__f16c_x${BATCH_TILE}( 19 size_t batch, 20 const void* input, 21 void* output, 22 const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 23{ 24 assert(batch != 0); 25 assert(batch % sizeof(uint16_t) == 0); 26 27 const __m256 vslope = _mm256_load_ps(params->avx.slope); 28 const uint16_t* i = (const uint16_t*) input; 29 uint16_t* o = (uint16_t*) output; 30 $if BATCH_TILE > 8: 31 for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { 32 const __m256 vx${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); 33 $for N in range(8, BATCH_TILE, 8): 34 const __m256 vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + ${N}))); 35 i += ${BATCH_TILE}; 36 37 $for N in range(0, BATCH_TILE, 8): 38 __m256 vacc${ABC[N:N+8]} = _mm256_mul_ps(vx${ABC[N:N+8]}, vslope); 39 40 $for N in range(0, BATCH_TILE, 8): 41 vacc${ABC[N:N+8]} = _mm256_blendv_ps(vx${ABC[N:N+8]}, vacc${ABC[N:N+8]}, vx${ABC[N:N+8]}); 42 43 _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc${ABC[0:8]}, _MM_FROUND_NO_EXC)); 44 $for N in range(8, BATCH_TILE, 8): 45 _mm_storeu_si128((__m128i*) (o + ${N}), _mm256_cvtps_ph(vacc${ABC[N:N+8]}, _MM_FROUND_NO_EXC)); 46 o += ${BATCH_TILE}; 47 } 48 for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { 49 const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); 50 i += 8; 51 52 __m256 vacc = _mm256_mul_ps(vx, vslope); 53 vacc = _mm256_blendv_ps(vx, vacc, vx); 54 55 _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_NO_EXC)); 56 o += 8; 57 } 58 if XNN_UNLIKELY(batch != 0) { 59 const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); 60 61 __m256 vacc = _mm256_mul_ps(vx, vslope); 62 vacc = _mm256_blendv_ps(vx, vacc, vx); 63 64 __m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_NO_EXC); 65 if (batch & (4 * sizeof(uint16_t))) { 66 _mm_storel_epi64((__m128i*) o, vh); 67 vh = _mm_unpackhi_epi64(vh, vh); 68 o += 4; 69 } 70 if (batch & (2 * sizeof(uint16_t))) { 71 _mm_storeu_si32(o, vh); 72 vh = _mm_srli_epi64(vh, 32); 73 o += 2; 74 } 75 if (batch & (1 * sizeof(uint16_t))) { 76 *o = _mm_extract_epi16(vh, 0); 77 } 78 } 79} 80