/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | fuse_attention.py | 33 def _sfdp_pattern_1(query, key, value, inv_scale): argument 42 def _sfdp_replacement_1(query, key, value, inv_scale): argument 277 def _sfdp_pattern_11(query, key, value, inv_scale): argument 285 def _sfdp_replacement_11(query, key, value, inv_scale): argument 338 def _sfdp_pattern_14(query, key, value, attn_mask, inv_scale): argument 351 def _sfdp_replacement_14(query, key, value, attn_mask, inv_scale): argument 364 def _sfdp_pattern_15(query, key, value, attn_mask, inv_scale): argument 380 def _sfdp_replacement_15(query, key, value, attn_mask, inv_scale): argument 401 def _sfdp_pattern_16(query, key, value, attn_mask, inv_scale, dropout_p): argument 418 def _sfdp_replacement_16(query, key, value, attn_mask, inv_scale, dropout_p): argument [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | AffineQuantizerBase.cpp | 81 float inv_scale = 1.0f / scale; in quantize_val_arm() local 134 float inv_scale = 1.0f / static_cast<float>(scale); in quantize_val() local 148 float inv_scale = 1.0f / scale; in quantize_val_arm() local 202 float inv_scale = scale == 0 ? 1.0f : 1.0f / scale; in quantize_val_float_qparams() local
|
/aosp_15_r20/external/executorch/backends/xnnpack/runtime/utils/ |
H A D | utils.h | 69 float inv_scale = 1.0f / static_cast<float>(scale); in quantize_val() local 90 const float inv_scale = 1.0f / scale; in quantize_tensor_arm64_q8() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/uniform_quant_ops/ |
H A D | uniform_quantize_op.cc | 28 const float inv_scale = 1.0f / scale; in EvalPerTensorQuantize() local 48 const float inv_scale = 1.0f / scales_data[i]; in EvalPerChannelQuantize() local
|
H A D | math_utils.h | 68 void AffineQuantize(const ConstTensorTin& input_tensor, float inv_scale, in AffineQuantize()
|
H A D | math_utils.cc | 75 float inv_scale = 0; in AsymmetricQuantize() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | AmpKernels.cu | 45 const Tensor& inv_scale) in _amp_non_finite_check_and_unscale_cuda_() 90 const Tensor& inv_scale) in _amp_foreach_non_finite_check_and_unscale_cuda_()
|
H A D | IndexKernel.cu | 258 const float inv_scale = 1.0f / static_cast<float>(scale); in index_put_kernel_quantized_cuda() local
|
H A D | Indexing.cu | 231 float inv_scale, int zero_point, int64_t qmin, int64_t qmax) { in indexing_backward_kernel_quantized() 636 float inv_scale = 1.0f / static_cast<float>(scale); in index_put_with_sort_quantized() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cuda/ |
H A D | FakeQuantizeCore.cu | 31 float inv_scale = 1.0f / scale; in fake_quantize_tensor_cachemask_kernel_cuda() local 95 float inv_scale, in _fake_quantize_grad_learnable_tensor_kernel_cuda()
|
/aosp_15_r20/external/XNNPACK/test/ |
H A D | convert-operator-tester.h | 249 const float inv_scale = 1.0f / scale(); in TestF32toQS8() local 314 const float inv_scale = 1.0f / scale(); in TestF32toQU8() local
|
/aosp_15_r20/external/ComputeLibrary/src/cpu/kernels/elementwise_binary/generic/sve2/ |
H A D | impl.h | 74 …nt8_t *ptr, svbool_t pg, svfloat32x4_t data, const svint32_t &offset, const svfloat32_t &inv_scale) in store_quantized() 88 …nt8_t *ptr, svbool_t pg, svfloat32x4_t data, const svint32_t &offset, const svfloat32_t &inv_scale) in store_quantized()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/kernels/ |
H A D | QuantizedOpKernels.cpp | 112 float inv_scale = 1.0 / scale; in qcat_nhwc_kernel() local 1287 float inv_scale = 1.0f / scale; in qadd_scalar_kernel() local 1334 float inv_scale = 1.0f / scale; in qadd_kernel() local 1404 float inv_scale = 1.0f / scale; in qmul_kernel() local 2535 float inv_scale = 1.0f / sc; local 2590 float inv_scale, 2656 float inv_scale = 1.0f / scale; local 2663 float inv_scale = 1.0f / scale; local 2678 float inv_scale = 1.0f / scale; local 2685 float inv_scale = 1.0f / scale; local [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/test/ |
H A D | quantized_test.cpp | 170 float inv_scale = 1.0f / static_cast<float>(scales[c].item<double>()); in TEST() local 204 float inv_scale = 1.0f / static_cast<float>(scales[c].item<double>()); in TEST() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | AmpKernels.cpp | 21 const at::Tensor& inv_scale) { in _amp_foreach_non_finite_check_and_unscale_cpu_()
|
/aosp_15_r20/external/executorch/backends/cadence/reference/kernels/ |
H A D | kernels.cpp | 34 float inv_scale, in quantize()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | AmpGradScalerKernels.cpp | 34 const at::Tensor& inv_scale) { in _amp_foreach_non_finite_check_and_unscale_cpu_kernel()
|
/aosp_15_r20/external/executorch/kernels/quantized/cpu/ |
H A D | op_add.cpp | 32 float inv_scale = 1.0f / static_cast<float>(scale); in quantize_val() local
|
H A D | op_quantize.cpp | 98 float inv_scale = 1.0f / static_cast<float>(scale); in quantize_val() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | fake_quant_ops_functor.h | 43 float* nudged_min, float* nudged_max, float* scale, float* inv_scale) { in Nudge()
|
/aosp_15_r20/external/rust/android-crates-io/crates/glam/src/f64/ |
D | daffine3.rs | 298 let inv_scale = scale.recip(); in to_scale_rotation_translation() localVariable
|
/aosp_15_r20/external/rust/android-crates-io/crates/glam/src/f32/ |
D | affine3a.rs | 296 let inv_scale = scale.recip(); in to_scale_rotation_translation() localVariable
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
H A D | fake_quantize_ops.cc | 76 xla::XlaOp inv_scale = xla::Div(one, input_scale); in Quantize() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/image/ |
H A D | scale_and_translate_op.cc | 57 const float inv_scale = 1.0 / scale; in ComputeSpansCore() local
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_pattern_matcher.py | 1370 def div_softmax(x, inv_scale): argument
|