/aosp_15_r20/external/XNNPACK/src/f16-dwconv2d-chw/gen/ |
H A D | 3x3s2p1-minmax-neonfp16arith-1x4.c | 83 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4() local 129 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x9BDF, 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4() local
|
H A D | 3x3s2p1-minmax-neonfp16arith-1x4-acc2.c | 83 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4_acc2() local 130 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x9BDF, 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4_acc2() local
|
H A D | 3x3s2p1-minmax-neonfp16arith-1x4-acc3.c | 83 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4_acc3() local 131 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x9BDF, 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4_acc3() local
|
H A D | 3x3s2p1-minmax-neonfp16arith-1x4-acc4.c | 83 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4_acc4() local 132 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x9BDF, 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x4_acc4() local
|
H A D | 3x3s2p1-minmax-neonfp16arith-2x4.c | 102 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_2x4() local 171 const float16x4_t vi2x7BDF = vext_f16(vi2x1357, vi2x9BDF, 3); in xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_2x4() local
|
/aosp_15_r20/external/XNNPACK/src/f32-dwconv2d-chw/gen/ |
H A D | 3x3s2p1-minmax-neon-1x4.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4() local 129 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4() local
|
H A D | 3x3s2p1-minmax-neonfma-1x4.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4() local 129 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4() local
|
H A D | 3x3s2p1-minmax-neonfma-1x4-acc2.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc2() local 130 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc2() local
|
H A D | 3x3s2p1-minmax-neon-1x4-acc2.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc2() local 130 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc2() local
|
H A D | 3x3s2p1-minmax-neon-1x4-acc3.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc3() local 131 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc3() local
|
H A D | 3x3s2p1-minmax-neonfma-1x4-acc3.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc3() local 131 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc3() local
|
H A D | 3x3s2p1-minmax-neonfma-1x4-acc4.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc4() local 132 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc4() local
|
H A D | 3x3s2p1-minmax-neon-1x4-acc4.c | 83 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc4() local 132 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4() local 145 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2() local 146 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2() local
|
H A D | 3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4() local 145 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2() local 146 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2() local
|
H A D | 3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4() local 148 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4() local 148 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3() local 147 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3() local
|
H A D | 3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c | 97 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3() local 147 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3() local
|
H A D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c | 107 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4() local 155 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c | 107 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4() local 155 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4() local
|
H A D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c | 107 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2() local 156 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2() local
|
H A D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c | 107 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2() local 156 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2() local
|