/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/ |
H A D | precise-scalar.c | 123 const int32_t x_scaled = (int32_t)(x >= 0 ? x_abs_scaled : -x_abs_scaled); in pytorch_qnnp_requantize_precise__scalar_unsigned32() local 228 const int32_t x_scaled = (int32_t)(x >= 0 ? x_abs_scaled : -x_abs_scaled); in pytorch_qnnp_requantize_precise__scalar_unsigned64() local 324 const int32_t x_scaled = in pytorch_qnnp_requantize_precise__scalar_signed64() local
|
H A D | fp32-scalar.c | 39 const float x_scaled = (float)x * scale; in pytorch_qnnp_requantize_fp32__scalar_lrintf() local 96 const float x_scaled = (float)x * scale; in pytorch_qnnp_requantize_fp32__scalar_magic() local
|
H A D | precise-neon.c | 127 const int32x4_t x_scaled = vuzp1q_s32( in pytorch_qnnp_requantize_precise__neon() local 143 const int32x4_t x_scaled = in pytorch_qnnp_requantize_precise__neon() local
|
H A D | fp32-sse2.c | 50 const __m128 x_scaled = _mm_mul_ps(_mm_cvtepi32_ps(x), vscale); in pytorch_qnnp_requantize_fp32__sse2() local
|
H A D | gemmlowp-sse4.c | 57 const __m128i x_scaled = gemmlowp_sse_rdivbypo2_s32(x_product, shift); in pytorch_qnnp_requantize_gemmlowp__sse4() local
|
H A D | fp32-psimd.c | 54 const psimd_f32 x_scaled = psimd_cvt_s32_f32(x) * vscale; in pytorch_qnnp_requantize_fp32__psimd() local
|
H A D | gemmlowp-ssse3.c | 57 const __m128i x_scaled = gemmlowp_sse_rdivbypo2_s32(x_product, shift); in pytorch_qnnp_requantize_gemmlowp__ssse3() local
|
H A D | gemmlowp-sse2.c | 57 const __m128i x_scaled = gemmlowp_sse_rdivbypo2_s32(x_product, shift); in pytorch_qnnp_requantize_gemmlowp__sse2() local
|
H A D | gemmlowp-scalar.c | 54 const int32_t x_scaled = gemmlowp_scalar_rdivbypo2_s32(x_product, shift); in pytorch_qnnp_requantize_gemmlowp__scalar() local
|
/aosp_15_r20/external/XNNPACK/src/qs8-requantization/ |
H A D | rndnu-neon-mull.c | 81 …const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23… in xnn_qs8_requantize_rndnu__neon_mull() local 90 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled)); in xnn_qs8_requantize_rndnu__neon_mull() local
|
H A D | rndna-neon.c | 109 …const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23… in xnn_qs8_requantize_rndna__neon() local 118 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled)); in xnn_qs8_requantize_rndna__neon() local
|
H A D | fp32-scalar-fmagic.c | 42 const float x_scaled = (float) x * scale; in xnn_qs8_requantize_fp32__scalar_fmagic() local
|
H A D | fp32-sse4.c | 48 const __m128 x_scaled = _mm_mul_ps(_mm_cvtepi32_ps(x), vscale); in xnn_qs8_requantize_fp32__sse4() local
|
H A D | fp32-sse2.c | 48 const __m128 x_scaled = _mm_mul_ps(_mm_cvtepi32_ps(x), vscale); in xnn_qs8_requantize_fp32__sse2() local
|
H A D | fp32-scalar-lrintf.c | 40 const float x_scaled = (float) x * scale; in xnn_qs8_requantize_fp32__scalar_lrintf() local
|
H A D | fp32-wasmsimd.c | 46 const v128_t x_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(x), vscale); in xnn_qs8_requantize_fp32__wasmsimd() local
|
H A D | rndnu-scalar.c | 60 const int32_t x_scaled = (int32_t) math_asr_s64(x_product + rounding, shift); in xnn_qs8_requantize_rndnu__scalar() local
|
H A D | rndna-scalar-signed64.c | 66 const int32_t x_scaled = (int32_t) math_asr_s64(x_adjusted_product + rounding, shift); in xnn_qs8_requantize_rndna__scalar_signed64() local
|
/aosp_15_r20/external/XNNPACK/src/qu8-requantization/ |
H A D | rndna-neon.c | 109 …const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23… in xnn_qu8_requantize_rndna__neon() local 118 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled)); in xnn_qu8_requantize_rndna__neon() local
|
H A D | fp32-sse2.c | 48 const __m128 x_scaled = _mm_mul_ps(_mm_cvtepi32_ps(x), vscale); in xnn_qu8_requantize_fp32__sse2() local
|
H A D | fp32-scalar-fmagic.c | 42 const float x_scaled = (float) x * scale; in xnn_qu8_requantize_fp32__scalar_fmagic() local
|
H A D | fp32-scalar-lrintf.c | 40 const float x_scaled = (float) x * scale; in xnn_qu8_requantize_fp32__scalar_lrintf() local
|
H A D | fp32-wasmsimd.c | 46 const v128_t x_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(x), vscale); in xnn_qu8_requantize_fp32__wasmsimd() local
|
H A D | rndna-scalar-signed64.c | 66 const int32_t x_scaled = (int32_t) math_asr_s64(x_adjusted_product + rounding, shift); in xnn_qu8_requantize_rndna__scalar_signed64() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | fused_batch_norm_op.cc | 198 auto x_scaled = x_centered * scaling_factor; in operator ()() local 327 auto x_scaled = x_centered * scaling_factor; in operator ()() local 477 auto x_scaled = x_centered * coef0_rest_by_depth; in operator ()() local
|