/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | Normalization.cu | 154 auto invstd = as_nd(invstd_); in batch_norm_elementwise() local 181 const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd, in batch_norm_elementwise_backward_train() 257 const Tensor& invstd, const Tensor& weight) { in batch_norm_elementwise_backward_eval() 615 Tensor invstd; in batch_norm_backward_cuda() local 678 const Tensor& invstd, double epsilon) { in batch_norm_elemt_cuda() 686 … const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { in batch_norm_elemt_cuda_out() 693 …ch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const std:… in batch_norm_gather_stats_cuda() 707 …const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional<Tensor>& running… in batch_norm_gather_stats_with_counts_cuda() 725 …nst Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const std:… in batch_norm_backward_reduce_cuda() 762 …cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const std:… in batch_norm_backward_elemt_cuda()
|
H A D | Normalization.cuh | 230 stat_accscalar_t invstd; in batch_norm_transform_input_kernel() local 253 T invstd = 0; in operator ()() local 373 stat_accscalar_t mean, invstd; in batch_norm_backward_kernel() local 431 GenericPackedTensorAccessor<accscalar_t, 1, RestrictPtrTraits, index_t> invstd, in batch_norm_reduce_statistics_kernel() 477 GenericPackedTensorAccessor<stat_accscalar_t, 1, DefaultPtrTraits, index_t> invstd, in batch_norm_backward_reduce_kernel() 512 const GenericPackedTensorAccessor<stat_accscalar_t, 1, DefaultPtrTraits, index_t> invstd, in batch_norm_backward_elemt_kernel_impl() 550 const GenericPackedTensorAccessor<stat_accscalar_t, 1, DefaultPtrTraits, index_t> invstd, in batch_norm_backward_elemt_kernel() 572 const GenericPackedTensorAccessor<stat_accscalar_t, 1, DefaultPtrTraits, index_t> invstd, in batch_norm_backward_elemt_kernel() 681 auto invstd = packed_accessor_or_dummy< in batch_norm_stats_cuda_template() local 712 auto invstd = packed_accessor_or_dummy< in batch_norm_elemt_cuda_template() local [all …]
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_native_batch_norm.cpp | 118 CTYPE invstd = 1.0 / std::sqrt(var + eps); in _native_batch_norm_legit_no_training_out() local 289 CTYPE invstd = 1.0 / std::sqrt(var + eps); in _native_batch_norm_legit_no_stats_out() local 297 CTYPE invstd = invstd_data[c]; in _native_batch_norm_legit_no_stats_out() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | batch_norm_kernel.cpp | 57 opmath_t mean, invstd; in batch_norm_cpu_collect_linear_and_constant_terms() local 440 scalar_t mean, invstd; in batch_norm_cpu_backward_contiguous_impl() local 557 Tensor invstd = at::empty({0}, input.options()); in batch_norm_cpu_backward_channels_last_impl() local 644 Vec invstd = Vec::loadu(invstd_ptr + d); in batch_norm_cpu_backward_channels_last_impl() local 657 Vec invstd = Vec::loadu(invstd_ptr + d, n_channel - d); in batch_norm_cpu_backward_channels_last_impl() local 670 Vec invstd = Vec::loadu(invstd_ptr + d); in batch_norm_cpu_backward_channels_last_impl() local 677 Vec invstd = Vec::loadu(invstd_ptr + d, n_channel - d); in batch_norm_cpu_backward_channels_last_impl() local 690 [](Vec dotp, Vec invstd) { return dotp * invstd; }, in batch_norm_cpu_backward_channels_last_impl() 1030 opmath_t mean, invstd; in batch_norm_cpu_backward_contiguous_internal() local 1148 Tensor invstd = at::empty({n_channel}, input.options().dtype(kFloat)); in batch_norm_cpu_backward_channels_last_internal() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | Normalization.cpp | 106 T invstd = 0; in operator ()() local 169 auto invstd = as_nd([&]{ in batch_norm_cpu_transform_input_template() local 193 …cpu_kernel(iter, [=](scalar_t input, param_t mean, param_t invstd, param_t weight, param_t bias) -… in batch_norm_cpu_transform_input_template() 409 param_t mean{}, invstd{}; in batch_norm_backward_cpu_template() local
|