Home
last modified time | relevance | path

Searched full:layer_norm (Results 1 – 25 of 150) sorted by relevance

123456

/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_layer_norm.py52 self.layer_norm = torch.nn.LayerNorm(
59 self.layer_norm.weight = torch.nn.Parameter(
63 self.layer_norm.bias = torch.nn.Parameter(
68 return self.layer_norm(x)
82 .check(["torch.ops.aten.layer_norm.default"])
86 .check_not(["torch.ops.aten.layer_norm.default"])
103 .check_not(["torch.ops.aten.layer_norm.default"])
125 .check_not(["torch.ops.aten.layer_norm.default"])
/aosp_15_r20/external/pytorch/test/distributed/_tensor/
H A Dtest_math_ops.py279 layer_norm = torch.nn.LayerNorm(
284 layer_norm_local = copy.deepcopy(layer_norm).to(self.device_type)
294 layer_norm_dist = distribute_module(layer_norm, device_mesh, _replicate_fn)
344 layer_norm = torch.nn.LayerNorm(
349 layer_norm_local = copy.deepcopy(layer_norm).to(self.device_type)
359 layer_norm_dist = distribute_module(layer_norm, device_mesh, _replicate_fn)
472 self.layer_norm = torch.nn.LayerNorm(
481 h = self.layer_norm(h)
489 "layer_norm": SequenceParallel(),
502 "layer_norm": ln_req_grad,
/aosp_15_r20/external/pytorch/torch/onnx/
H A Dsymbolic_opset17.py32 __all__ = ["layer_norm", "stft", "quantized_layer_norm"]
37 @_onnx_symbolic("aten::layer_norm")
39 def layer_norm( function
50 # layer_norm normalizes on the last D dimensions,
73 @_onnx_symbolic("quantized::layer_norm")
86 output = layer_norm(g, x, normalized_shape, weight, bias, eps, False)
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/
H A Dnormalization.h54 inline Tensor layer_norm( in layer_norm() function
60 return torch::layer_norm(input, normalized_shape, weight, bias, eps); in layer_norm()
66 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.layer_norm
75 /// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
77 inline Tensor layer_norm( in layer_norm() function
80 return detail::layer_norm( in layer_norm()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/
H A DLayernorm.cpp74 // We invoke native_layer_norm which returns a tuple of tensors: <layer_norm, in run_layernorm_context()
75 // mean, 1/sqrt(var+eps)>, but we only need the first tensor (layer_norm). in run_layernorm_context()
81 static Tensor layer_norm( in layer_norm() function
98 m.impl(TORCH_SELECTIVE_NAME("aten::layer_norm"), TORCH_FN(layer_norm)); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/normalization/
H A Dlayer_normalization_test.py159 layer_norm = layer_normalization.LayerNormalization(axis=3)
160 layer_norm.build(input_shape=(2, 2, 2))
165 layer_norm = layer_normalization.LayerNormalization(axis=[-1, -1])
166 layer_norm.build(input_shape=(2, 2, 2))
170 layer_norm = layer_normalization.LayerNormalization(axis=[-2, -1])
171 layer_norm.build(input_shape=(2, 2, 2))
172 self.assertEqual(layer_norm._fused, True)
/aosp_15_r20/external/pytorch/test/onnx/
H A Dtest_onnxscript_no_runtime.py51 # 2. Register layer_norm onnxscript function as custom Op
53 def layer_norm( function
77 layer_norm, input, weight, bias, axes_i=axes, eps_f=eps
81 symbolic_name="aten::layer_norm",
107 self.assertEqual(layer_norm_proto.functions[0].name, "layer_norm")
H A Dtest_onnxscript_runtime.py92 def layer_norm( function
116 layer_norm, input, weight, bias, axes_i=axes, eps_f=eps
120 symbolic_name="aten::layer_norm",
H A Dtest_pytorch_onnx_onnxruntime_cuda.py54 self.layer_norm = torch.nn.LayerNorm([10, 10])
58 return self.layer_norm(x)
/aosp_15_r20/external/executorch/exir/tests/
H A Dmodels.py250 self.layer_norm = nn.LayerNorm(input_dim)
262 y = self.layer_norm(x)
498 self.layer_norm = nn.LayerNorm(input_dim)
532 CR_normed = self.layer_norm(CR)
533 # C_normed = self.layer_norm(C)
534 # R_normed = self.layer_norm(R)
563 Z_CR_normed = self.layer_norm(Z_CR)
567 output = self.layer_norm(self.elem_add(ffn_out, Z_CR))
/aosp_15_r20/external/pytorch/torch/nn/modules/
H A Dnormalization.py155 >>> layer_norm = nn.LayerNorm(embedding_dim)
157 >>> layer_norm(embedding)
164 >>> layer_norm = nn.LayerNorm([C, H, W])
165 >>> output = layer_norm(input)
167 .. image:: ../_static/img/nn/layer_norm.jpg
217 return F.layer_norm(
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/
H A Ddecompose_ops.cpp37 …"aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps, b… in isDecomposableNorm()
160 …"aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps, b… in DecomposeOps()
175 toGraphFunction(decompose_funcs.get_function("layer_norm")).graph(); in DecomposeOps()
210 …def layer_norm(input : Tensor, normalized_shape : List[int], eps : float, cudnn_enable : bool) -> … in DecomposeOps()
/aosp_15_r20/external/executorch/examples/cadence/models/
H A Drnnt_encoder.py90 self.layer_norm = torch.nn.LayerNorm(output_dim)
103 layer_norm_out = self.layer_norm(output_linear_out)
123 layer_norm_out = self.layer_norm(output_linear_out)
H A Drnnt_predictor.py36 self.layer_norm = torch.nn.LayerNorm(symbol_embedding_dim)
47 layer_norm_out = self.layer_norm(linear_out)
/aosp_15_r20/external/executorch/backends/arm/_passes/
H A Ddecompose_layernorm_pass.py29 if op == torch.ops.aten.layer_norm.default:
40 raise RuntimeError(f"Can't get layer_norm composition for op {op}")
62 torch.ops.aten.layer_norm.default,
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_native_layer_norm.cpp24 void layer_norm( in layer_norm() function
98 …da5b17b086554c6cd0c3ab08a35aeec2a8bad8c]/xplat/caffe2/aten/src/ATen/native/layer_norm.cpp?lines=188
172 layer_norm<CTYPE>( in native_layer_norm_out()
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/
H A Dmappings.py215 # F.layer_norm
217 F.layer_norm,
497 F.layer_norm,
525 toq.layer_norm,
/aosp_15_r20/external/executorch/backends/cadence/hifi/operators/
H A Dquantized_layer_norm.cpp27 // Compute quantized layer_norm. The current implementation assumes that the
52 // layer_norm for each vector. in quantized_layer_norm_per_tensor_()
92 // Compute quantized layer_norm. The current implementation assumes that the
/aosp_15_r20/external/executorch/backends/cadence/reference/operators/
H A Dquantized_layer_norm.cpp24 // Compute quantized layer_norm. The current implementation assumes that the
49 // layer_norm for each vector. in quantized_layer_norm_per_tensor_()
85 // Compute quantized layer_norm. The current implementation assumes that the
/aosp_15_r20/external/pytorch/test/
H A Dtest_nestedtensor.py937 layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
938 nt_result = layer_norm(nt)
940 t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
949 layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
950 nt_result = layer_norm(nt)
952 t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
969 layer_norm = torch.nn.LayerNorm(
972 nt_result = layer_norm(nt)
974 t_result = layer_norm(t.reshape(1, -1, size, size, 4).squeeze(0))
978 layer_norm = torch.nn.LayerNorm((size, 4), device=device, dtype=dtype)
[all …]
/aosp_15_r20/external/pytorch/torch/nn/utils/_expanded_weights/
H A Dlayer_norm_expanded_weights.py17 @implements_per_sample_grads(F.layer_norm)
48 F.layer_norm(input, normalized_shape, eps=ctx.eps) * grad_output,
/aosp_15_r20/external/pytorch/benchmarks/tensorexpr/
H A Dpt_engine.py39 def layer_norm(self, data, shape): member in TorchTensorEngine
40 return torch.nn.functional.layer_norm(data, shape)
/aosp_15_r20/external/executorch/backends/qualcomm/builders/
H A DREADME.md31 self.layer_norm = torch.nn.LayerNorm([768], eps=1e-6)
35 return self.linear(self.layer_norm(x))
127 …ion](https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/layer_norm.cpp) mentioned i…
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/
H A Dtransformer.cpp18 #include <ATen/ops/layer_norm.h>
70 return at::layer_norm(input, {embed_dim}, weight, bias, eps, true); in norm()
/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_native_layer_norm.cpp27 void layer_norm( in layer_norm() function
160 layer_norm<CTYPE>( in opt_native_layer_norm_out()

123456