/aosp_15_r20/art/compiler/utils/riscv64/ |
H A D | assembler_riscv64.cc | 3839 void Riscv64Assembler::VAdd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VAdd_vv() 3860 void Riscv64Assembler::VSub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VSub_vv() 3890 void Riscv64Assembler::VMinu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VMinu_vv() 3904 void Riscv64Assembler::VMin_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VMin_vv() 3918 void Riscv64Assembler::VMaxu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VMaxu_vv() 3932 void Riscv64Assembler::VMax_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VMax_vv() 3946 void Riscv64Assembler::VAnd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VAnd_vv() 3967 void Riscv64Assembler::VOr_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VOr_vv() 3987 void Riscv64Assembler::VXor_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VXor_vv() 4010 void Riscv64Assembler::VRgather_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) { in VRgather_vv() [all …]
|
/aosp_15_r20/art/test/123-compiler-regressions-mt/src/ |
H A D | Main.java | 69 int[] vs1 = values; in thread2() local 99 int[] vs1; in thread2() local
|
/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib-ng/arch/x86/ |
D | adler32_avx512_vnni.c | 52 __m512i vs1, vs2; in adler32_avx512_vnni() local 149 __m256i vs1, vs2; in adler32_fold_copy_avx512_vnni() local
|
D | adler32_avx512_tpl.h | 58 __m512i vs1 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler0)); local
|
D | adler32_sse42.c | 28 __m128i vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, in adler32_fold_copy_sse42() local
|
D | adler32_ssse3.c | 41 __m128i vbuf, vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, in adler32_ssse3() local
|
D | adler32_avx2_tpl.h | 50 __m256i vs1, vs2; local
|
/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib-ng/arch/power/ |
D | adler32_power8.c | 77 vector unsigned int vs1 = { 0 }; in adler32_power8() local
|
/aosp_15_r20/external/sdv/vsomeip/third_party/boost/numeric/ublas/test/ |
D | test71.cpp | 122 ublas::vector_slice<V> vs1 (v1, ublas::slice (0, 1, N)), in operator ()() local
|
D | test31.cpp | 138 ublas::vector_slice<V> vs1 (v1, ublas::slice (0, 1, N)), in operator ()() local
|
D | test11.cpp | 158 ublas::vector_slice<V> vs1 (v1, ublas::slice (0, 1, N)), in operator ()() local
|
/aosp_15_r20/external/fbjni/test/jni/ |
H A D | iterator_tests.cpp | 35 std::vector<std::string> vs1; in nativeTestListIterator() local
|
/aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
H A D | scalar-rr2-p5-x2.c | 61 const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2() local
|
H A D | scalar-rr2-p5-x2-acc2.c | 62 const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2_acc2() local
|
H A D | scalar-rr2-lut64-p2-x2.c | 78 const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1); in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2() local
|
H A D | scalar-rr2-lut64-p2-x2-acc2.c | 79 const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1); in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2_acc2() local
|
/aosp_15_r20/external/XNNPACK/src/f32-velu/gen/ |
H A D | velu-scalar-rr2-p6-x2.c | 54 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() local
|
H A D | velu-wasm-rr2-p6-x2.c | 54 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x2() local
|
H A D | velu-wasm-rr2-lut16-p3-x2.c | 62 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2() local
|
H A D | velu-scalar-rr2-lut16-p3-x2.c | 62 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2() local
|
/aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/ |
H A D | vsigmoid-scalar-rr2-p5-div-x2.c | 50 const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_vsigmoid_ukernel__scalar_rr2_p5_div_x2() local
|
H A D | vsigmoid-scalar-rr2-lut64-p2-div-x2.c | 55 const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx1] + ve1); in xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x2() local
|
H A D | vsigmoid-scalar-rr2-lut2048-p1-div-x2.c | 55 const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx1] + ve1); in xnn_f32_vsigmoid_ukernel__scalar_rr2_lut2048_p1_div_x2() local
|
/aosp_15_r20/external/XNNPACK/src/f32-vscaleextexp/gen/ |
H A D | avx2-p5-x16.c | 105 …const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vm… in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16() local
|
/aosp_15_r20/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
H A D | avx2-p5-x16.c | 62 const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23)); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local
|