/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | layer_norm.cpp | 39 IntArrayRef normalized_shape, in layer_norm_with_mean_rstd_out() 77 …IntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt /* optional */, const std::o… in layer_norm_cpu() 115 IntArrayRef normalized_shape, in layer_norm_backward_cpu() 189 …c10::SymIntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt /* optional */, cons… in layer_norm_symint() 207 …IntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>… in math_native_layer_norm() 268 IntArrayRef normalized_shape, in rms_norm()
|
H A D | layer_norm.h | 13 IntArrayRef normalized_shape, in _check_layer_norm_inputs()
|
/aosp_15_r20/external/executorch/kernels/test/ |
H A D | op_native_layer_norm_test.cpp | 39 IntArrayRef normalized_shape, in op_native_layer_norm_out() 62 const std::vector<int32_t> normalized_shape; member 91 auto normalized_shape = exec_aten::ArrayRef<int64_t>( in run_test_cases() local 256 auto normalized_shape = exec_aten::ArrayRef<int64_t>( in run_death_test_cases() local 372 int64_t normalized_shape[] = {3}; in test_dynamic_shape() local
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/ |
H A D | layout_normalization.cc | 69 auto normalized_shape = Normalize(shape); in DefaultAction() local 121 auto normalized_shape = in HandleConcatenate() local 152 auto normalized_shape = Normalize(s); in HandleBroadcast() local 300 auto normalized_shape = Normalize(s); in HandleTranspose() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesNorm.cpp | 439 c10::SymIntArrayRef normalized_shape) { in has_same_shape() 462 c10::SymIntArrayRef normalized_shape, const std::string& name) { in check_same_shape() 473 SymIntArrayRef normalized_shape, in _check_layer_norm_inputs() 490 c10::SymIntArrayRef normalized_shape, in native_layer_norm_batch_rule() 536 at::IntArrayRef normalized_shape, in native_layer_norm_backward_no_weight_bias_batch_rule() 573 at::IntArrayRef normalized_shape, in native_layer_norm_backward_plumbing()
|
/aosp_15_r20/external/pytorch/torch/csrc/api/src/nn/options/ |
H A D | normalization.cpp | 6 LayerNormOptions::LayerNormOptions(std::vector<int64_t> normalized_shape) in LayerNormOptions() 17 std::vector<int64_t> normalized_shape) in LayerNormFuncOptions()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Layernorm.cpp | 60 IntArrayRef normalized_shape, in run_layernorm_context() 83 IntArrayRef normalized_shape, in layer_norm()
|
H A D | NativeLayerNorm.cpp | 14 IntArrayRef normalized_shape, in _check_layer_norm_inputs() 57 IntArrayRef normalized_shape, in native_layer_norm()
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/ |
H A D | normalization_ops_util.cpp | 77 IntArrayRef normalized_shape, in check_layer_norm_args() 119 IntArrayRef normalized_shape, in get_layer_norm_out_target_size()
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_native_layer_norm.cpp | 26 IntArrayRef normalized_shape, in layer_norm() 102 IntArrayRef normalized_shape, in native_layer_norm_out()
|
/aosp_15_r20/external/executorch/kernels/optimized/cpu/ |
H A D | op_native_layer_norm.cpp | 29 IntArrayRef normalized_shape, in layer_norm() 117 IntArrayRef normalized_shape, in opt_native_layer_norm_out()
|
/aosp_15_r20/external/executorch/backends/cadence/reference/operators/ |
H A D | quantized_layer_norm.cpp | 120 __ET_UNUSED const executorch::aten::IntArrayRef normalized_shape, in quantized_layer_norm_out() 162 __ET_UNUSED const executorch::aten::IntArrayRef normalized_shape, in quantized_layer_norm_per_tensor_out()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | qnormalization.cpp | 25 IntArrayRef normalized_shape, in quantized_layer_norm_impl() 142 int64_t output_zero_point) { in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/executorch/backends/cadence/hifi/operators/ |
H A D | quantized_layer_norm.cpp | 127 __ET_UNUSED const IntArrayRef normalized_shape, in quantized_layer_norm_out() 165 __ET_UNUSED const IntArrayRef normalized_shape, in quantized_layer_norm_per_tensor_out()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | Normalization.cpp | 43 IntArrayRef normalized_shape, const Tensor& weight, const Tensor& bias, in mkldnn_layer_norm_last_index_weight_bias_f32() 93 IntArrayRef normalized_shape, const Tensor& weight, const Tensor& bias, in mkldnn_layer_norm_last_index_weight_bias_f32()
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
H A D | normalization.h | 33 LayerNormImpl(std::vector<int64_t> normalized_shape) in LayerNormImpl()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorMath.h | 30 IntArrayRef normalized_shape, in _check_nested_layer_norm_inputs()
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset17.py | 77 normalized_shape, argument
|
/aosp_15_r20/external/pytorch/test/onnx/ |
H A D | test_onnxscript_no_runtime.py | 72 g, input, normalized_shape, weight, bias, eps, cudnn_enable argument
|
H A D | test_onnxscript_runtime.py | 111 g, input, normalized_shape, weight, bias, eps, cudnn_enable argument
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | normalization.h | 56 const std::vector<int64_t>& normalized_shape, in layer_norm()
|
/aosp_15_r20/external/XNNPACK/src/ |
H A D | normalization.c | 76 size_t* normalized_shape, in xnn_normalize_transpose_permutation()
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | NativeLayerNorm.cpp | 59 const ValueRef normalized_shape, in add_native_layer_norm_node()
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/examples/ |
H A D | convnext_example.py | 30 def __init__(self, normalized_shape, eps=1e-6, data_format=torch.contiguous_format): argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/test/ |
H A D | math_kernel_test.cpp | 62 std::vector<int64_t> normalized_shape(normalized_size, 10); in TEST() local
|