/aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/ |
H A D | 6x2-minmax-neon-lane-ld64.c | 86 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64() local 111 const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1; in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64() local
|
H A D | 5x8-minmax-neon-lane-ld64.c | 84 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemm_minmax_ukernel_5x8__neon_lane_ld64() local 118 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemm_minmax_ukernel_5x8__neon_lane_ld64() local
|
H A D | 5x8-minmax-neonfma-lane-ld64.c | 84 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemm_minmax_ukernel_5x8__neonfma_lane_ld64() local 118 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemm_minmax_ukernel_5x8__neonfma_lane_ld64() local
|
H A D | 5x8s4-wasmrelaxedsimd-fma.c | 88 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_ukernel_5x8s4__wasmrelaxedsimd_fma() local 179 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_ukernel_5x8s4__wasmrelaxedsimd_fma() local
|
H A D | 5x8s4-wasmsimd.c | 88 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_ukernel_5x8s4__wasmsimd() local 179 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_ukernel_5x8s4__wasmsimd() local
|
H A D | 6x2-minmax-neonfma-lane-ld64.c | 86 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemm_minmax_ukernel_6x2__neonfma_lane_ld64() local 141 const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1; in xnn_f32_gemm_minmax_ukernel_6x2__neonfma_lane_ld64() local
|
H A D | 5x8s4-minmax-sse.c | 88 __m128 va4 = _mm_loadu_ps(a4); in xnn_f32_gemm_minmax_ukernel_5x8s4__sse() local 179 __m128 va4 = _mm_loadu_ps(a4); in xnn_f32_gemm_minmax_ukernel_5x8s4__sse() local
|
H A D | 5x8s4-minmax-wasmsimd-arm.c | 90 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_arm() local 181 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_arm() local
|
H A D | 6x8-minmax-neon-lane-ld64.c | 92 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemm_minmax_ukernel_6x8__neon_lane_ld64() local 131 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemm_minmax_ukernel_6x8__neon_lane_ld64() local
|
H A D | 5x8s4-relu-wasmsimd.c | 88 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_relu_ukernel_5x8s4__wasmsimd() local 179 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemm_relu_ukernel_5x8s4__wasmsimd() local
|
H A D | 6x8-minmax-neonfma-lane-ld64.c | 92 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_lane_ld64() local 131 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_lane_ld64() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/sgemm/ |
H A D | 5x8-neon.c | 64 const float32x2_t va4 = vld1_f32(a4); in pytorch_sgemm_ukernel_5x8__neon() local 134 const float32x4_t va4 = vld1q_dup_f32(a4); in pytorch_sgemm_ukernel_5x8__neon() local
|
H A D | 6x8-neon.c | 70 const float32x2_t va4 = vld1_f32(a4); in pytorch_sgemm_ukernel_6x8__neon() local 150 const float32x4_t va4 = vld1q_dup_f32(a4); in pytorch_sgemm_ukernel_6x8__neon() local
|
/aosp_15_r20/external/XNNPACK/src/f32-igemm/gen/ |
H A D | 6x2-minmax-neon-lane-ld64.c | 114 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64() local 139 const float32x2_t va4 = vld1_dup_f32(a4); in xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64() local
|
H A D | 5x8s4-relu-wasmrelaxedsimd-fma.c | 113 v128_t va4 = wasm_v128_load(a4); in xnn_f32_igemm_relu_ukernel_5x8s4__wasmrelaxedsimd_fma() local 204 v128_t va4 = wasm_v128_load(a4); in xnn_f32_igemm_relu_ukernel_5x8s4__wasmrelaxedsimd_fma() local
|
H A D | 6x2-minmax-neonfma-lane-ld64.c | 114 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64() local 169 const float32x2_t va4 = vld1_dup_f32(a4); in xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64() local
|
H A D | 5x8s4-minmax-sse.c | 113 __m128 va4 = _mm_loadu_ps(a4); in xnn_f32_igemm_minmax_ukernel_5x8s4__sse() local 204 __m128 va4 = _mm_loadu_ps(a4); in xnn_f32_igemm_minmax_ukernel_5x8s4__sse() local
|
/aosp_15_r20/external/XNNPACK/src/f32-gemm/gen-inc/ |
H A D | 5x8inc-minmax-neon-lane-ld64.c | 86 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemminc_minmax_ukernel_5x8__neon_lane_ld64() local 120 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemminc_minmax_ukernel_5x8__neon_lane_ld64() local
|
H A D | 5x8inc-minmax-neonfma-lane-ld64.c | 86 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemminc_minmax_ukernel_5x8__neonfma_lane_ld64() local 120 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemminc_minmax_ukernel_5x8__neonfma_lane_ld64() local
|
H A D | 5x8s4inc-minmax-wasmrelaxedsimd.c | 92 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd() local 183 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd() local
|
H A D | 5x8s4inc-minmax-sse.c | 90 __m128 va4 = _mm_loadu_ps(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__sse() local 181 __m128 va4 = _mm_loadu_ps(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__sse() local
|
H A D | 5x8s4inc-minmax-relaxedwasmsimd.c | 92 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_x86() local 183 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_x86() local
|
H A D | 5x8s4inc-minmax-wasmrelaxedsimd-fma.c | 92 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma() local 183 v128_t va4 = wasm_v128_load(a4); in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma() local
|
H A D | 6x8inc-minmax-neonfma-lane-ld64.c | 94 const float32x2_t va4 = vld1_f32(a4); a4 += 2; in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_lane_ld64() local 133 const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1; in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_lane_ld64() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/hgemm/ |
H A D | 8x8-neonfp16arith.c | 73 const float16x4_t va4 = vld1_f16(a4); in pytorch_hgemm_ukernel_8x8__neonfp16arith() local 149 const float16x4_t va4 = vreinterpret_f16_u64(vshl_u64( in pytorch_hgemm_ukernel_8x8__neonfp16arith() local
|