/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/hgemm/ |
H A D | 8x8-neonfp16arith.c | 79 const float16x4_t va7 = vld1_f16(a7); in pytorch_hgemm_ukernel_8x8__neonfp16arith() local 155 const float16x4_t va7 = vreinterpret_f16_u64(vshl_u64( in pytorch_hgemm_ukernel_8x8__neonfp16arith() local
|
/aosp_15_r20/external/XNNPACK/src/f16-igemm/gen/ |
H A D | 8x8-minmax-neonfp16arith-ld64.c | 137 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() local 269 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() local
|
H A D | 8x16-minmax-neonfp16arith-ld64.c | 145 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() local 345 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() local
|
/aosp_15_r20/external/XNNPACK/src/f16-gemm/gen-inc/ |
H A D | 8x8inc-minmax-neonfp16arith-ld64.c | 107 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() local 241 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() local
|
H A D | 8x16inc-minmax-neonfp16arith-ld64.c | 115 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() local 317 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() local
|
/aosp_15_r20/external/XNNPACK/src/f16-gemm/gen/ |
H A D | 8x8-minmax-neonfp16arith-ld64.c | 105 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() local 239 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() local
|
H A D | 8x16-minmax-neonfp16arith-ld64.c | 113 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() local 315 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() local
|
/aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/ |
H A D | 8x8s4-minmax-neon.c | 111 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() local 232 float32x4_t va7 = vld1q_f32(a7); a7 = (const float*) ((uintptr_t) a7 + k); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() local
|
H A D | 8x8s4-minmax-neonfma.c | 111 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() local 232 float32x4_t va7 = vld1q_f32(a7); a7 = (const float*) ((uintptr_t) a7 + k); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() local
|
/aosp_15_r20/external/XNNPACK/src/f32-gemm/gen-inc/ |
H A D | 8x8s4inc-minmax-neon.c | 113 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() local 234 float32x4_t va7 = vld1q_f32(a7); a7 = (const float*) ((uintptr_t) a7 + k); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() local
|
H A D | 8x8s4inc-minmax-neonfma.c | 113 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() local 234 float32x4_t va7 = vld1q_f32(a7); a7 = (const float*) ((uintptr_t) a7 + k); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() local
|
/aosp_15_r20/external/XNNPACK/src/f32-igemm/gen/ |
H A D | 8x8s4-minmax-neon.c | 144 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() local 266 float32x4_t va7 = vld1q_f32(a7); a7 = (const float*) ((uintptr_t) a7 + k); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() local
|
H A D | 8x8s4-minmax-neonfma.c | 144 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() local 266 float32x4_t va7 = vld1q_f32(a7); a7 = (const float*) ((uintptr_t) a7 + k); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/ |
H A D | 8x8-neon.c | 109 const uint8x8_t va7 = vld1_u8(a7); in pytorch_q8gemm_ukernel_8x8__neon() local 449 const uint8x8_t va7 = vreinterpret_u8_u64( in pytorch_q8gemm_ukernel_8x8__neon() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/ |
H A D | 8x8-neon.c | 77 const uint8x8_t va7 = vld1_u8(a7); in pytorch_q8conv_ukernel_8x8__neon() local 433 const uint8x8_t va7 = vreinterpret_u8_u64(vshl_u64( in pytorch_q8conv_ukernel_8x8__neon() local
|
/aosp_15_r20/external/sdv/vsomeip/third_party/boost/fusion/test/compile_time/ |
D | vector_intrinsic.cpp | 49 typedef typename boost::fusion::result_of::value_at_c<v_type, 7>::type va7; in test() typedef
|
/aosp_15_r20/external/XNNPACK/src/f32-vbinary/gen/ |
H A D | vmaxc-wasm-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmaxc_ukernel__wasm_x8() local
|
H A D | vdivc-relu-wasm-x8.c | 40 const float va7 = a[7]; in xnn_f32_vdivc_relu_ukernel__wasm_x8() local
|
H A D | vmulc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmulc_ukernel__scalar_x8() local
|
H A D | vdivc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vdivc_ukernel__scalar_x8() local
|
H A D | vaddc-relu-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vaddc_relu_ukernel__scalar_x8() local
|
H A D | vmulc-relu-wasm-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmulc_relu_ukernel__wasm_x8() local
|
H A D | vmaxc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmaxc_ukernel__scalar_x8() local
|
H A D | vaddc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vaddc_ukernel__scalar_x8() local
|
H A D | vrdivc-relu-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vrdivc_relu_ukernel__scalar_x8() local
|