Home
last modified time | relevance | path

Searched defs:rstd (Results 1 – 18 of 18) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A Dlayer_norm_kernel.cu59 T_ACC* rstd) { in RowwiseMomentsCUDAKernel()
97 const T_ACC* rstd, in LayerNormForwardCUDAKernel()
228 T_ACC* rstd, in vectorized_layer_norm_kernel_impl()
307 T_ACC* rstd, in vectorized_layer_norm_kernel()
318 const T_ACC* __restrict__ rstd, in compute_gI()
383 const T_ACC* __restrict__ rstd, in layer_norm_grad_input_kernel()
404 const T_ACC* __restrict__ rstd, in layer_norm_grad_input_kernel_vectorized()
519 const T_ACC* rstd, in GammaBetaBackwardSimpleCUDAKernel()
554 const T_ACC* rstd, in GammaBetaBackwardCUDAKernel_32x32()
654 const T_ACC* rstd, in GammaBetaBackwardCUDAKernel()
[all …]
H A Dgroup_norm_kernel.cu37 T* rstd) { in RowwiseMomentsCUDAKernel()
80 const T* rstd, in ComputeFusedParamsCUDAKernel()
106 const T* rstd, in Compute1dBackwardFusedParamsCUDAKernel()
154 const T* rstd, in GammaBeta1dBackwardCUDAKernel1()
192 const T* rstd, in GammaBeta1dBackwardCUDAKernel2()
314 const T* rstd, in ComputeBackwardFusedParamsCUDAKernel()
362 const T* rstd, in GammaBetaBackwardCUDAKernel1()
398 const T* rstd, in GammaBetaBackwardCUDAKernel2()
485 const Tensor& rstd, in GroupNorm1dForward()
565 Tensor& rstd) { in GroupNormKernelImplInternal()
[all …]
/aosp_15_r20/external/trusty/arm-trusted-firmware/plat/st/stm32mp1/
Dstm32mp1_scmi.c133 struct stm32_scmi_rstd *rstd; member
368 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_get_name() local
391 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_autonomous() local
422 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_set_state() local
475 struct stm32_scmi_rstd *rstd = &res->rstd[j]; in stm32mp1_init_scmi_server() local
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesNorm.cpp69 Tensor rstd; in batch_norm_batch_rule() local
133 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim, in batch_norm_backward_no_weight_bias_batch_rule()
309 Tensor rstd; in native_group_norm_plumbing() local
344 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim, in group_norm_backward_no_weight_bias_batch_rule()
375 const Tensor & rstd, const std::optional<Tensor> & weight_opt, in native_group_norm_backward_plumbing()
498 const auto rstd = std::get<2>(result); in native_layer_norm_batch_rule() local
514 const auto rstd = std::get<2>(result); in native_layer_norm_batch_rule() local
538 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim) { in native_layer_norm_backward_no_weight_bias_batch_rule()
575 const at::Tensor & rstd, in native_layer_norm_backward_plumbing()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A Dlayer_norm.cpp37 at::Tensor& rstd, in layer_norm_with_mean_rstd_out()
106 Tensor rstd = at::empty({M}, X->options().dtype(dtype)); in layer_norm_cpu() local
117 const Tensor& rstd, in layer_norm_backward_cpu()
253 at::Tensor rstd = std::get<2>(outputs); in math_native_layer_norm() local
H A Dgroup_norm.cpp99 Tensor rstd = at::empty({N, group}, X.options().dtype(dtype)); in native_group_norm() local
109 const Tensor& rstd, in native_group_norm_backward()
254 …at::Tensor rstd = std::get<2>(outputs).to(c10::TensorOptions().dtype(input.scalar_type())).view({N… in math_group_norm() local
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A Dgroup_norm_kernel.cpp40 Tensor& rstd) { in GroupNormKernelImplInternal()
295 Tensor& rstd) { in GroupNormKernelImplChannelsLastInternal()
495 Tensor& rstd) { in GroupNormKernelImpl()
663 const PT* rstd, in GroupNormInputBackward()
717 const PT* rstd, in GammaBackward()
764 const PT* rstd, in GammaBackward()
886 const Tensor& rstd, in GroupNormBackwardKernelImplInternal()
1057 const PT* rstd, in ApplyInputGradientsChannelsLastColMov()
1105 const PT* rstd, in ApplyInputGradientsChannelsLastColMov()
1162 const PT* rstd, in ApplyInputGradientsChannelsLastRowMov()
[all …]
H A Dlayer_norm_kernel.cpp37 Tensor* rstd) { in LayerNormKernelImplInternal()
96 Tensor* rstd) { in layer_norm_kernel_mixed_type()
155 Tensor* rstd) { in LayerNormKernelImplInternal()
173 Tensor* rstd) { in LayerNormKernelImpl()
495 const Tensor& rstd, in LayerNormBackwardKernelImplInternal()
585 const Tensor& rstd, in LayerNormBackwardKernelImpl()
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DNativeLayerNorm.cpp37 vTensorPtr rstd = graph->get_tensor(args[0].refs[2]); in resize_native_layer_norm_node() local
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_native_layer_norm.cpp32 Tensor& rstd) { in layer_norm()
H A Dop_native_group_norm.cpp35 Tensor& rstd) { in group_norm()
/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_native_layer_norm.cpp35 Tensor& rstd) { in layer_norm()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/
H A DNestedTensorBackward.cpp201 const Tensor& rstd, in layer_norm_backward_nested()
H A DNestedTensorMath.cpp179 Tensor rstd = at::empty({M}, options); in nested_layer_norm() local
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DNormalization.cpp107 auto rstd = empty_mkldnn( in mkldnn_layer_norm_last_index_weight_bias_f32() local
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/
H A Dshape_inference.cpp633 const at::Tensor& rstd, in compute_shape_native_layer_norm_backward()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/kernels/
H A DQuantizedOpKernels.cpp3196 Tensor rstd = at::empty(M, X.options().dtype(at::kFloat)); local
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.cpp4939 const Tensor& rstd, in infinitely_differentiable_native_group_norm_backward()