/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | layer_norm_kernel.cu | 59 T_ACC* rstd) { in RowwiseMomentsCUDAKernel() 97 const T_ACC* rstd, in LayerNormForwardCUDAKernel() 228 T_ACC* rstd, in vectorized_layer_norm_kernel_impl() 307 T_ACC* rstd, in vectorized_layer_norm_kernel() 318 const T_ACC* __restrict__ rstd, in compute_gI() 383 const T_ACC* __restrict__ rstd, in layer_norm_grad_input_kernel() 404 const T_ACC* __restrict__ rstd, in layer_norm_grad_input_kernel_vectorized() 519 const T_ACC* rstd, in GammaBetaBackwardSimpleCUDAKernel() 554 const T_ACC* rstd, in GammaBetaBackwardCUDAKernel_32x32() 654 const T_ACC* rstd, in GammaBetaBackwardCUDAKernel() [all …]
|
H A D | group_norm_kernel.cu | 37 T* rstd) { in RowwiseMomentsCUDAKernel() 80 const T* rstd, in ComputeFusedParamsCUDAKernel() 106 const T* rstd, in Compute1dBackwardFusedParamsCUDAKernel() 154 const T* rstd, in GammaBeta1dBackwardCUDAKernel1() 192 const T* rstd, in GammaBeta1dBackwardCUDAKernel2() 314 const T* rstd, in ComputeBackwardFusedParamsCUDAKernel() 362 const T* rstd, in GammaBetaBackwardCUDAKernel1() 398 const T* rstd, in GammaBetaBackwardCUDAKernel2() 485 const Tensor& rstd, in GroupNorm1dForward() 565 Tensor& rstd) { in GroupNormKernelImplInternal() [all …]
|
/aosp_15_r20/external/trusty/arm-trusted-firmware/plat/st/stm32mp1/ |
D | stm32mp1_scmi.c | 133 struct stm32_scmi_rstd *rstd; member 368 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_get_name() local 391 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_autonomous() local 422 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_set_state() local 475 struct stm32_scmi_rstd *rstd = &res->rstd[j]; in stm32mp1_init_scmi_server() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesNorm.cpp | 69 Tensor rstd; in batch_norm_batch_rule() local 133 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim, in batch_norm_backward_no_weight_bias_batch_rule() 309 Tensor rstd; in native_group_norm_plumbing() local 344 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim, in group_norm_backward_no_weight_bias_batch_rule() 375 const Tensor & rstd, const std::optional<Tensor> & weight_opt, in native_group_norm_backward_plumbing() 498 const auto rstd = std::get<2>(result); in native_layer_norm_batch_rule() local 514 const auto rstd = std::get<2>(result); in native_layer_norm_batch_rule() local 538 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim) { in native_layer_norm_backward_no_weight_bias_batch_rule() 575 const at::Tensor & rstd, in native_layer_norm_backward_plumbing()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | layer_norm.cpp | 37 at::Tensor& rstd, in layer_norm_with_mean_rstd_out() 106 Tensor rstd = at::empty({M}, X->options().dtype(dtype)); in layer_norm_cpu() local 117 const Tensor& rstd, in layer_norm_backward_cpu() 253 at::Tensor rstd = std::get<2>(outputs); in math_native_layer_norm() local
|
H A D | group_norm.cpp | 99 Tensor rstd = at::empty({N, group}, X.options().dtype(dtype)); in native_group_norm() local 109 const Tensor& rstd, in native_group_norm_backward() 254 …at::Tensor rstd = std::get<2>(outputs).to(c10::TensorOptions().dtype(input.scalar_type())).view({N… in math_group_norm() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | group_norm_kernel.cpp | 40 Tensor& rstd) { in GroupNormKernelImplInternal() 295 Tensor& rstd) { in GroupNormKernelImplChannelsLastInternal() 495 Tensor& rstd) { in GroupNormKernelImpl() 663 const PT* rstd, in GroupNormInputBackward() 717 const PT* rstd, in GammaBackward() 764 const PT* rstd, in GammaBackward() 886 const Tensor& rstd, in GroupNormBackwardKernelImplInternal() 1057 const PT* rstd, in ApplyInputGradientsChannelsLastColMov() 1105 const PT* rstd, in ApplyInputGradientsChannelsLastColMov() 1162 const PT* rstd, in ApplyInputGradientsChannelsLastRowMov() [all …]
|
H A D | layer_norm_kernel.cpp | 37 Tensor* rstd) { in LayerNormKernelImplInternal() 96 Tensor* rstd) { in layer_norm_kernel_mixed_type() 155 Tensor* rstd) { in LayerNormKernelImplInternal() 173 Tensor* rstd) { in LayerNormKernelImpl() 495 const Tensor& rstd, in LayerNormBackwardKernelImplInternal() 585 const Tensor& rstd, in LayerNormBackwardKernelImpl()
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | NativeLayerNorm.cpp | 37 vTensorPtr rstd = graph->get_tensor(args[0].refs[2]); in resize_native_layer_norm_node() local
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_native_layer_norm.cpp | 32 Tensor& rstd) { in layer_norm()
|
H A D | op_native_group_norm.cpp | 35 Tensor& rstd) { in group_norm()
|
/aosp_15_r20/external/executorch/kernels/optimized/cpu/ |
H A D | op_native_layer_norm.cpp | 35 Tensor& rstd) { in layer_norm()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorBackward.cpp | 201 const Tensor& rstd, in layer_norm_backward_nested()
|
H A D | NestedTensorMath.cpp | 179 Tensor rstd = at::empty({M}, options); in nested_layer_norm() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | Normalization.cpp | 107 auto rstd = empty_mkldnn( in mkldnn_layer_norm_last_index_weight_bias_f32() local
|
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/ |
H A D | shape_inference.cpp | 633 const at::Tensor& rstd, in compute_shape_native_layer_norm_backward()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/kernels/ |
H A D | QuantizedOpKernels.cpp | 3196 Tensor rstd = at::empty(M, X.options().dtype(at::kFloat)); local
|
/aosp_15_r20/external/pytorch/torch/csrc/autograd/ |
H A D | FunctionsManual.cpp | 4939 const Tensor& rstd, in infinitely_differentiable_native_group_norm_backward()
|