1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert NR % 8 == 0 7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 8#include <assert.h> 9 10#include <immintrin.h> 11 12#include <xnnpack/gemm.h> 13 14 15$ISA = {0: "avx", 3: "fma3"}[FMA] 16void xnn_f32_gemm${"inc" if INC else ""}_minmax_ukernel_${MR}x${NR}s4__${ISA}_broadcast( 17 size_t mr, 18 size_t nc, 19 size_t kc, 20 const float*restrict a, 21 size_t a_stride, 22 const float*restrict w, 23 float*restrict c, 24 size_t cm_stride, 25 size_t cn_stride, 26 $if INC: 27 const float*restrict acc, 28 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 29{ 30 assert(mr != 0); 31 assert(mr <= ${MR}); 32 assert(nc != 0); 33 assert(kc != 0); 34 assert(kc % sizeof(float) == 0); 35 assert(a != NULL); 36 assert(w != NULL); 37 assert(c != NULL); 38 $if INC: 39 assert(acc != NULL); 40 41 const float* a0 = a; 42 float* c0 = c; 43 $for M in range(1, MR): 44 const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride); 45 float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); 46 $if M % 2 == 0: 47 if XNN_UNPREDICTABLE(mr <= ${M}) { 48 a${M} = a${M-1}; 49 c${M} = c${M-1}; 50 } 51 $elif M + 1 == MR: 52 if XNN_UNPREDICTABLE(mr != ${M+1}) { 53 a${M} = a${M-1}; 54 c${M} = c${M-1}; 55 } 56 $else: 57 if XNN_UNPREDICTABLE(mr < ${M+1}) { 58 a${M} = a${M-1}; 59 c${M} = c${M-1}; 60 } 61 62 do { 63 $if INC: 64 $for M in range(MR): 65 $for N in range(0, NR, 8): 66 __m256 vacc${M}x${ABC[N:N+8]} = _mm256_load_ps(acc + ${M*NR+N}); 67 acc += ${MR*NR}; 68 $else: 69 $for N in range(0, NR, 8): 70 __m256 vacc0x${ABC[N:N+8]} = _mm256_load_ps(w + ${N}); 71 $for M in range(1, MR): 72 $for N in range(0, NR, 8): 73 __m256 vacc${M}x${ABC[N:N+8]} = vacc0x${ABC[N:N+8]}; 74 w += ${NR}; 75 76 size_t k = kc; 77 while (k >= 4 * sizeof(float)) { 78 $for M in range(MR): 79 __m256 va${M} = _mm256_broadcast_ps((const __m128*) a${M}); 80 a${M} += 4; 81 82 $for L in range(4): 83 84 $for N in range(0, NR, 8): 85 const __m256 vb${ABC[N:N+8]}c${L} = _mm256_load_ps(w + ${L * NR + N}); 86 87 $for N in range(0, NR, 8): 88 $for M in range(MR): 89 $if FMA == 3: 90 vacc${M}x${ABC[N:N+8]} = _mm256_fmadd_ps(va${M}, vb${ABC[N:N+8]}c${L}, vacc${M}x${ABC[N:N+8]}); 91 $else: 92 vacc${M}x${ABC[N:N+8]} = _mm256_add_ps(vacc${M}x${ABC[N:N+8]}, _mm256_mul_ps(va${M}, vb${ABC[N:N+8]}c${L})); 93 94 $if L + 1 != 4: 95 $for M in range(MR): 96 va${M} = _mm256_permute_ps(va${M}, _MM_SHUFFLE(0, 3, 2, 1)); 97 98 w += ${4 * NR}; 99 k -= 4 * sizeof(float); 100 } 101 if XNN_UNLIKELY(k != 0) { 102 $for M in range(MR): 103 __m256 va${M} = _mm256_broadcast_ps((const __m128*) a${M}); 104 a${M} = (const float*) ((uintptr_t) a${M} + k); 105 106 const __m256 vzero = _mm256_setzero_ps(); 107 $for L in range(4): 108 109 $for N in range(0, NR, 8): 110 const __m256 vb${ABC[N:N+8]}c${L} = _mm256_load_ps(w + ${L * NR + N}); 111 112 $for N in range(0, NR, 8): 113 $for M in range(MR): 114 $if FMA == 3: 115 vacc${M}x${ABC[N:N+8]} = _mm256_fmadd_ps(_mm256_and_ps(va${M}, _mm256_cmp_ps(vb${ABC[N:N+8]}c${L}, vzero, _CMP_NEQ_OQ)), vb${ABC[N:N+8]}c${L}, vacc${M}x${ABC[N:N+8]}); 116 $else: 117 vacc${M}x${ABC[N:N+8]} = _mm256_add_ps(vacc${M}x${ABC[N:N+8]}, _mm256_mul_ps(_mm256_and_ps(va${M}, _mm256_cmp_ps(vb${ABC[N:N+8]}c${L}, vzero, _CMP_NEQ_OQ)), vb${ABC[N:N+8]}c${L})); 118 119 $if L + 1 != 4: 120 $for M in range(MR): 121 va${M} = _mm256_permute_ps(va${M}, _MM_SHUFFLE(0, 3, 2, 1)); 122 123 w += ${4 * NR}; 124 } 125 126 const __m256 vmin = _mm256_load_ps(params->avx.min); 127 $for N in range(0, NR, 8): 128 $for M in range(MR): 129 vacc${M}x${ABC[N:N+8]} = _mm256_max_ps(vacc${M}x${ABC[N:N+8]}, vmin); 130 131 const __m256 vmax = _mm256_load_ps(params->avx.max); 132 $for N in range(0, NR, 8): 133 $for M in range(MR): 134 vacc${M}x${ABC[N:N+8]} = _mm256_min_ps(vacc${M}x${ABC[N:N+8]}, vmax); 135 136 if XNN_LIKELY(nc >= ${NR}) { 137 $for M in reversed(range(MR)): 138 _mm256_storeu_ps(c${M}, vacc${M}x${ABC[0:8]}); 139 $for N in range(8, NR, 8): 140 _mm256_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+8]}); 141 c${M} = (float*) ((uintptr_t) c${M} + cn_stride); 142 143 $for M in reversed(range(MR)): 144 a${M} = (const float*) ((uintptr_t) a${M} - kc); 145 146 nc -= ${NR}; 147 } else { 148 $for LOG2N in reversed(range(NR.bit_length())): 149 $if NR != 1 << LOG2N: 150 if (nc & ${1 << LOG2N}) { 151 $if LOG2N >= 3: 152 $for M in reversed(range(MR)): 153 _mm256_storeu_ps(c${M}, vacc${M}x${ABC[0:8]}); 154 $for N in range(8, 1 << LOG2N, 8): 155 _mm256_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+8]}); 156 157 $for M in reversed(range(MR)): 158 $for N in range(0, 1 << (LOG2N - 1), 8): 159 vacc${M}x${ABC[N:N+8]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+8]}; 160 161 $for M in reversed(range(MR)): 162 c${M} += ${1 << LOG2N}; 163 $elif LOG2N == 2: 164 $for M in reversed(range(MR)): 165 _mm_storeu_ps(c${M}, vacc${M}x${ABC[0:4]}); 166 167 $for M in reversed(range(MR)): 168 vacc${M}x${ABC[0:4]} = _mm256_extractf128_ps(vacc${M}x${ABC[0:8]}, 1); 169 170 $for M in reversed(range(MR)): 171 c${M} += 4; 172 $elif LOG2N == 1: 173 $for M in reversed(range(MR)): 174 _mm_storel_pi((__m64*) c${M}, vacc${M}x${ABC[0:4]}); 175 176 $for M in reversed(range(MR)): 177 vacc${M}x${ABC[0:4]} = _mm_movehl_ps(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}); 178 179 $for M in reversed(range(MR)): 180 c${M} += 2; 181 $elif LOG2N == 0: 182 $for M in reversed(range(MR)): 183 _mm_store_ss(c${M}, vacc${M}x${ABC[0:4]}); 184 } 185 $if LOG2N == 3: 186 $for M in reversed(range(MR)): 187 __m128 vacc${M}x${ABC[0:4]} = _mm256_castps256_ps128(vacc${M}x${ABC[0:8]}); 188 189 nc = 0; 190 } 191 } while (nc != 0); 192} 193