/aosp_15_r20/art/compiler/optimizing/ |
H A D | code_generator_vector_arm64_sve.cc | 1050 __ Udot(acc.VnS(), acc.VnS(), tmp0.VnB(), tmp1.VnB()); in VisitVecDotProd() local
|
H A D | code_generator_vector_arm64_neon.cc | 1351 __ Udot(acc.V4S(), left.V16B(), right.V16B()); in VisitVecDotProd() local
|
/aosp_15_r20/external/vixl/test/aarch64/ |
H A D | test-assembler-sve-aarch64.cc | 11639 __ Udot(zd, za, zn, zm); in SdotUdotHelper() local 11641 __ Udot(zd, za, zn, zm, index_fn); in SdotUdotHelper() local 11976 __ Udot(z2.VnS(), z2.VnS(), z1.VnB(), z0.VnB(), 0); in TEST_SVE() local 11978 __ Udot(z3.VnS(), z3.VnS(), z1.VnB(), z0.VnB(), 1); in TEST_SVE() local 11981 __ Udot(z4.VnS(), z4.VnS(), z1.VnB(), z0.VnB(), 2); in TEST_SVE() local 11984 __ Udot(z5.VnS(), z5.VnS(), z1.VnB(), z0.VnB(), 3); in TEST_SVE() local 12056 __ Udot(z3.VnD(), z3.VnD(), z1.VnH(), z0.VnH(), 0); in TEST_SVE() local 12058 __ Udot(z4.VnD(), z4.VnD(), z1.VnH(), z0.VnH(), 1); in TEST_SVE() local 19699 __ Udot(z6.VnS(), z0.VnS(), z1.VnB(), z2.VnB()); in TEST_SVE() local
|
H A D | test-assembler-neon-aarch64.cc | 4190 __ Udot(v18.V4S(), v0.V16B(), v1.V16B()); in TEST() local 4191 __ Udot(v19.V2S(), v1.V8B(), v2.V8B()); in TEST() local 4221 __ Udot(v18.V4S(), v0.V16B(), v1.S4B(), 1); in TEST() local 4222 __ Udot(v19.V2S(), v1.V8B(), v2.S4B(), 1); in TEST() local 10867 __ Udot(v4.V2S(), v0.V8B(), v1.V8B()); in TEST() local 10870 __ Udot(v6.V4S(), v0.V16B(), v1.V16B()); in TEST() local
|
/aosp_15_r20/external/vixl/src/aarch64/ |
H A D | macro-assembler-sve-aarch64.cc | 1898 void MacroAssembler::Udot(const ZRegister& zd, in Udot() function in vixl::aarch64::MacroAssembler
|