/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/ |
H A D | concatenation_test.cc | 103 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2}}, /*axis=*/1, in TEST() local 111 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2, 1, 3}}, /*axis=*/2, in TEST() local 121 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2, 1, 3}}, /*axis=*/0, in TEST() local 135 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2, 1, 3}}, /*axis=*/-2, in TEST() local 148 QuantizedConcatenationOpModel m0( in TEST() local 173 ConcatenationOpModel m0( in TEST() local 195 ConcatenationOpModel m0({TensorType_FLOAT32, {1}}, /*axis=*/0, in TEST() local 203 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 3}}, /*axis=*/0, in TEST() local 215 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 3}}, /*axis=*/0, in TEST() local 249 ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2}}, /*axis=*/2, in TEST() local [all …]
|
/aosp_15_r20/external/libaom/aom_dsp/arm/ |
H A D | blend_a64_mask_neon.c | 72 uint16x8_t m0 = vmovl_u8(vld1_u8(mask + i)); in aom_lowbd_blend_a64_d16_mask_neon() local 89 uint16x8_t m0 = vmovl_u8(load_unaligned_u8_4x2(mask, mask_stride)); in aom_lowbd_blend_a64_d16_mask_neon() local 109 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride + 2 * i); in aom_lowbd_blend_a64_d16_mask_neon() local 133 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride); in aom_lowbd_blend_a64_d16_mask_neon() local 157 uint8x8_t m0 = vld1_u8(mask + 2 * i); in aom_lowbd_blend_a64_d16_mask_neon() local 177 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride); in aom_lowbd_blend_a64_d16_mask_neon() local 199 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride + i); in aom_lowbd_blend_a64_d16_mask_neon() local 259 uint8x16_t m0 = vld1q_u8(mask + i); in aom_blend_a64_mask_neon() local 276 uint8x8_t m0 = vld1_u8(mask); in aom_blend_a64_mask_neon() local 291 uint8x8_t m0 = load_unaligned_u8_4x2(mask, mask_stride); in aom_blend_a64_mask_neon() local [all …]
|
H A D | highbd_blend_a64_mask_neon.c | 305 uint16x8_t m0 = vmovl_u8(vld1_u8(mask + i)); in aom_highbd_blend_a64_mask_neon() local 322 uint16x8_t m0 = vmovl_u8(load_unaligned_u8_4x2(mask, mask_stride)); in aom_highbd_blend_a64_mask_neon() local 342 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride + 2 * i); in aom_highbd_blend_a64_mask_neon() local 366 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride); in aom_highbd_blend_a64_mask_neon() local 391 uint8x8_t m0 = vld1_u8(mask + 2 * i); in aom_highbd_blend_a64_mask_neon() local 411 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride); in aom_highbd_blend_a64_mask_neon() local 433 uint8x8_t m0 = vld1_u8(mask + 0 * mask_stride + i); in aom_highbd_blend_a64_mask_neon() local
|
H A D | highbd_blend_a64_hmask_neon.c | 48 uint16x8_t m0 = vmovl_u8(vld1_u8(mask + i)); in aom_highbd_blend_a64_hmask_neon() local 63 const uint16x8_t m0 = vmovl_u8(load_unaligned_dup_u8_4x2(mask)); in aom_highbd_blend_a64_hmask_neon() local 78 const uint16x4_t m0 = in aom_highbd_blend_a64_hmask_neon() local
|
H A D | avg_pred_neon.c | 169 const uint8x16_t m0 = vld1q_u8(mask_ptr); in aom_comp_mask_pred_neon() local 191 const uint8x8_t m0 = vld1_u8(mask); in aom_comp_mask_pred_neon() local 209 const uint8x8_t m0 = load_unaligned_u8(mask, mask_stride); in aom_comp_mask_pred_neon() local
|
H A D | masked_sad_neon.c | 28 uint8x16_t m0 = vld1q_u8(m); in masked_sad_16x1_neon() local 155 uint8x8_t m0 = vld1_u8(m); in masked_sad_8xh_neon() local 187 uint8x8_t m0 = load_unaligned_u8(m, m_stride); in masked_sad_4xh_neon() local
|
/aosp_15_r20/frameworks/native/libs/math/tests/ |
H A D | mat_test.cpp | 37 mat4 m0; in TEST_F() local 42 mat4 m0; in TEST_F() local 52 mat4 m0; in TEST_F() local 83 mat4 m0; in TEST_F() local 105 mat4 m0; in TEST_F() local 119 mat4 m0; in TEST_F() local 175 mat3 m0; in TEST_F() local 180 mat3 m0; in TEST_F() local 190 mat3 m0; in TEST_F() local 211 mat3 m0; in TEST_F() local [all …]
|
/aosp_15_r20/external/lzma/C/ |
H A D | Sha256Opt.c | 90 #define NNN(m0, m1, m2, m3) argument 92 #define SM1(m1, m2, m3, m0) \ argument 95 #define SM2(m2, m3, m0, m1) \ argument 104 #define R4(k, m0, m1, m2, m3, OP0, OP1) \ argument 148 __m128i m0, m1, m2, m3; in Sha256_UpdateBlocks_HW() local 338 #define SM1(m0, m1, m2, m3) SHA256_SU0(m3, m0) argument 339 #define SM2(m0, m1, m2, m3) SHA256_SU1(m2, m0, m1) argument 340 #define NNN(m0, m1, m2, m3) argument 342 #define R4(k, m0, m1, m2, m3, OP0, OP1) \ argument 375 v128 m0, m1, m2, m3; in Sha256_UpdateBlocks_HW() local
|
H A D | Sha512Opt.c | 90 #define NNN(m0, m1, m2, m3) argument 92 #define SM1(m1, m2, m3, m0) \ argument 95 #define SM2(m2, m3, m0, m1) \ argument 104 #define R4(k, m0, m1, m2, m3, OP0, OP1) \ argument 148 __m256i m0, m1, m2, m3; in Sha512_UpdateBlocks_HW() local 264 #define NN(m0, m1, m4, m5, m7) argument 265 #define SM(m0, m1, m4, m5, m7) \ argument 268 #define R2(k, m0,m1,m2,m3,m4,m5,m6,m7, a0,a1,a2,a3, OP) \ argument 276 #define R8(k, m0,m1,m2,m3,m4,m5,m6,m7, OP) \ argument 304 v128_64 m0, m1, m2, m3, m4, m5, m6, m7; in Sha512_UpdateBlocks_HW() local
|
H A D | Sha1Opt.c | 90 #define NNN(m0, m1, m2, m3) argument 92 #define SM1(m0, m1, m2, m3) \ argument 95 #define SM2(m0, m1, m2, m3) \ argument 99 #define SM3(m0, m1, m2, m3) \ argument 104 #define R4(k, m0, m1, m2, m3, e0, e1, OP) \ argument 148 __m128i m0, m1, m2, m3; in Sha1_UpdateBlocks_HW() local 345 v128 m0, m1, m2, m3; in Sha1_UpdateBlocks_HW() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | moments_utils.h | 26 int64_t& m0, in AddMoments() 42 int64_t& m0, in AddMomentsVec() 58 int64_t m0, in UpdateMomentsVec() 81 int64_t m0, in UpdateMomentsVec() 127 const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize); variable 162 int64_t m0 = 0; variable
|
/aosp_15_r20/external/executorch/kernels/optimized/cpu/ |
H A D | moments_utils.h | 35 int64_t& m0, in AddMoments() 52 int64_t& m0, in AddMomentsVec() 68 int64_t m0, in UpdateMomentsVec() 114 const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize); variable 149 int64_t m0 = 0; variable
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/tests/ |
H A D | concat_test.cc | 87 QuantizedConcatenationOpModel m0({{tensor_dtype, {2, 1, 1, 2}, -12.7, 12.8}, in FourInputsQuantizedSameRangeImpl() local 119 QuantizedConcatenationOpModel m0({{tensor_dtype, in TwoInputsNegativeAxisImpl() local 151 QuantizedConcatenationOpModel m0( in TEST() local 173 QuantizedConcatenationOpModel m0({{TensorType_UINT8, {2, 1, 2}, -10.7, 10.8}, in TEST() local 206 QuantizedConcatenationOpModel m0( in TEST() local
|
/aosp_15_r20/external/libaom/av1/common/arm/ |
H A D | blend_a64_hmask_neon.c | 39 uint8x16_t m0 = vld1q_u8(mask + i); in aom_blend_a64_hmask_neon() local 55 const uint8x8_t m0 = vld1_u8(mask); in aom_blend_a64_hmask_neon() local 69 const uint8x8_t m0 = load_unaligned_dup_u8_4x2(mask); in aom_blend_a64_hmask_neon() local 84 const uint8x8_t m0 = vreinterpret_u8_u16(vld1_dup_u16((uint16_t *)mask)); in aom_blend_a64_hmask_neon() local
|
H A D | blend_a64_vmask_neon.c | 38 uint8x16_t m0 = vdupq_n_u8(mask[0]); in aom_blend_a64_vmask_neon() local 58 uint8x8_t m0 = vdup_n_u8(mask[0]); in aom_blend_a64_vmask_neon() local 73 const uint16x4_t m0 = vdup_n_u16((uint16_t)mask[0]); in aom_blend_a64_vmask_neon() local 91 uint16x4_t m0 = vdup_n_u16(0); in aom_blend_a64_vmask_neon() local
|
H A D | warp_plane_neon.c | 25 int16x8_t m0 = vmulq_s16(f[0], in16_lo); in horizontal_filter_4x1_f4() local 53 int16x8_t m0 = vmulq_s16(f[0], in16_lo); in horizontal_filter_8x1_f8() local 85 int16x8_t m0 = vmulq_s16(f_s16, in16_lo); in horizontal_filter_4x1_f1_beta0() local 116 int16x8_t m0 = vmulq_s16(f_s16, in16_lo); in horizontal_filter_8x1_f1_beta0() local 186 int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); in vertical_filter_4x1_f4() local 255 int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); in vertical_filter_8x1_f8() local
|
H A D | highbd_warp_plane_neon.c | 32 int32x4_t m0 = vmull_s16(vget_low_s16(f[0]), vget_low_s16(rv0)); in highbd_horizontal_filter_4x1_f4() local 58 int32x4_t m0 = vmull_s16(vget_low_s16(f[0]), vget_low_s16(rv0)); in highbd_horizontal_filter_8x1_f8() local 95 int32x4_t m0 = vmull_s16(vget_low_s16(f), vget_low_s16(rv0)); in highbd_horizontal_filter_4x1_f1() local 120 int32x4_t m0 = vmull_s16(vget_low_s16(f), vget_low_s16(rv0)); in highbd_horizontal_filter_8x1_f1() local 206 int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); in vertical_filter_4x1_f4() local 234 int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0])); in vertical_filter_8x1_f8() local
|
H A D | highbd_warp_plane_sve.c | 33 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f[0]); in highbd_horizontal_filter_4x1_f4() local 56 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f[0]); in highbd_horizontal_filter_8x1_f8() local 87 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f); in highbd_horizontal_filter_4x1_f1() local 109 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f); in highbd_horizontal_filter_8x1_f1() local 191 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]); in vertical_filter_4x1_f4() local 216 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]); in vertical_filter_8x1_f8() local
|
/aosp_15_r20/external/ktfmt/website/ |
H A D | gulpfile.js | 41 return contents.replace(regex, function (_, m0) { argument 82 function (m0, fileContents) { argument 102 function (_, m0) { argument 117 function (m0, fileContents) { argument
|
/aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/detail/ |
H A D | NEDirectConvolutionDetail.h | 249 … const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2, in single_convolve_3x3_dilation() 353 const float32x4x3_t &m0, const float32x4x3_t &m1, const float32x4x3_t &m2, in convolve_3x3() 467 … const int32x4x3_t &m0, const int32x4x3_t &m1, const int32x4x3_t &m2, in single_convolve_3x3_dilation() 554 …e_3x3_dilation(const T *in_top, const T *in_mid, const T *in_low, const int32x4x3_t &m0, const int… in convolve_3x3_dilation() 594 const int32x4x3_t &m0, const int32x4x3_t &m1, const int32x4x3_t &m2, in convolve_3x3()
|
/aosp_15_r20/external/ComputeLibrary/src/dynamic_fusion/sketch/gpu/components/cl/ |
H A D | ClComponentDepthwiseConv2d.cpp | 95 Settings &Settings::m0(unsigned int m0) in m0() argument 101 unsigned int Settings::m0() const in m0() function in arm_compute::experimental::dynamic_fusion::Settings
|
/aosp_15_r20/external/mesa3d/src/intel/compiler/elk/ |
H A D | elk_test_vec4_register_coalesce.cpp | 147 dst_reg m0 = dst_reg(MRF, 0); in TEST_F() local 166 dst_reg m0 = dst_reg(MRF, 0); in TEST_F() local 192 dst_reg m0 = dst_reg(MRF, 0); in TEST_F() local
|
/aosp_15_r20/packages/apps/Gallery2/src/com/android/gallery3d/filtershow/crop/ |
D | BoundedRect.java | 114 Matrix m0 = getInverseRotMatrix(); in moveInner() local 178 Matrix m0 = getInverseRotMatrix(); in resizeInner() local 247 Matrix m0 = getInverseRotMatrix(); in fixedAspectResizeInner() local
|
/aosp_15_r20/external/libaom/aom_dsp/x86/ |
H A D | blend_a64_mask_avx2.c | 30 const __m256i *m0, const __m256i *v_round_offset, const __m256i *v_maxval, in blend_a64_d16_mask_w16_avx2() 51 const __m256i *m0, const __m256i *m1, const __m256i *v_round_offset, in blend_a64_d16_mask_w32_avx2() 90 const __m256i m0 = _mm256_cvtepu8_epi16(m); in lowbd_blend_a64_d16_mask_subw0_subh0_w16_avx2() local 110 const __m256i m0 = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(m)); in lowbd_blend_a64_d16_mask_subw0_subh0_w32_avx2() local 137 const __m256i m0 = _mm256_srli_epi16(_mm256_add_epi16(m0_acbd, two_w), 2); in lowbd_blend_a64_d16_mask_subw1_subh1_w16_avx2() local 167 const __m256i m0 = _mm256_srli_epi16(_mm256_add_epi16(m0_acbd, two_w), 2); in lowbd_blend_a64_d16_mask_subw1_subh1_w32_avx2() local 192 const __m256i m0 = _mm256_avg_epu16(m0_ac, zeros); in lowbd_blend_a64_d16_mask_subw1_subh0_w16_avx2() local 218 const __m256i m0 = _mm256_avg_epu16(m0_ac, zeros); in lowbd_blend_a64_d16_mask_subw1_subh0_w32_avx2() local 244 const __m256i m0 = _mm256_cvtepu8_epi16(m_ac); in lowbd_blend_a64_d16_mask_subw0_subh1_w16_avx2() local 270 const __m256i m0 = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(m_ac)); in lowbd_blend_a64_d16_mask_subw0_subh1_w32_avx2() local [all …]
|
/aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/ |
H A D | GEMMLowpFixture.h | 940 …gned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1091 …ed int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1248 …gned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1391 …gned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1599 …gned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1755 …ed int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1904 …gned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup() 1999 …ed int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int… in setup()
|