Home
last modified time | relevance | path

Searched full:native_layer_norm (Results 1 – 25 of 130) sorted by relevance

123456

/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DNativeLayerNorm.cpp67 VK_THROW("native_layer_norm only supports normalized_shape with dim == 1"); in add_native_layer_norm_node()
71 VK_THROW("native_layer_norm requires weight to be non-None"); in add_native_layer_norm_node()
75 VK_THROW("native_layer_norm requires bias to be non-None"); in add_native_layer_norm_node()
94 std::string kernel_name("native_layer_norm"); in add_native_layer_norm_node()
124 void native_layer_norm(ComputeGraph& graph, const std::vector<ValueRef>& args) { in native_layer_norm() function
130 VK_REGISTER_OP(aten.native_layer_norm.default, native_layer_norm);
/aosp_15_r20/external/executorch/exir/dialects/edge/test/
H A Dtest_edge_yaml.py157 # Two of three tensor inputs of native_layer_norm are in optional tensor type.
158 ret = gen_op_yaml("native_layer_norm.default")
160 self.assertEqual(ret.func_name, "aten::native_layer_norm")
161 self.assertEqual(ret.inherits, "aten::native_layer_norm")
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/
H A DLayernorm.cpp13 #include <ATen/ops/native_layer_norm.h>
74 // We invoke native_layer_norm which returns a tuple of tensors: <layer_norm, in run_layernorm_context()
77 at::native_layer_norm(input, normalized_shape, weight_opt, bias_opt, eps); in run_layernorm_context()
H A DNativeLayerNorm.cpp55 std::tuple<Tensor, Tensor, Tensor> native_layer_norm( in native_layer_norm() function
107 TORCH_SELECTIVE_NAME("aten::native_layer_norm"), in TORCH_LIBRARY_IMPL()
108 TORCH_FN(native_layer_norm)); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/glsl/
H A Dnative_layer_norm.yaml7 native_layer_norm:
16 - NAME: native_layer_norm
/aosp_15_r20/external/executorch/backends/qualcomm/builders/
H A DREADME.md53 KeyError: 'aten.native_layer_norm.default'
62 if node.op == "call_function" and node.target.__name__ == 'aten.native_layer_norm.default':
188 target = ["aten.native_layer_norm.default"]
H A Dop_layer_norm.py23 target = ["aten.native_layer_norm.default"]
/aosp_15_r20/external/executorch/backends/arm/_passes/
H A Ddecompose_layernorm_pass.py18 if op == exir_ops.edge.aten.native_layer_norm.default:
61 exir_ops.edge.aten.native_layer_norm.default,
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_native_layer_norm.cpp94 // native_layer_norm.out(Tensor input, int[] normalized_shape, Tensor? weight,
171 input.scalar_type(), ctx, "native_layer_norm.out", CTYPE, [&]() { in native_layer_norm_out()
/aosp_15_r20/external/pytorch/torch/onnx/
H A Dsymbolic_opset18.py113 @_onnx_symbolic("aten::native_layer_norm")
124 return opset9.native_layer_norm(g, input, normalized_shape, weight, bias, eps)
/aosp_15_r20/external/pytorch/test/expect/
H A DHasDecompTest.test_aten_core_operators.expect369 aten::native_layer_norm
370 aten::native_layer_norm.out
/aosp_15_r20/external/pytorch/test/
H A Dtest_decomp.py190 (torch.bfloat16, torch.ops.aten.native_layer_norm.default): 1e-5,
191 (torch.float16, torch.ops.aten.native_layer_norm.default): 1e-5,
255 (torch.float32, torch.ops.aten.native_layer_norm.default): (1e-3, 1e-3),
260 (torch.float64, torch.ops.aten.native_layer_norm.default): (1e-6, 1e-6),
/aosp_15_r20/external/executorch/kernels/optimized/
H A Doptimized-oss.yaml68 - op: native_layer_norm.out
H A Doptimized.yaml80 - op: native_layer_norm.out
/aosp_15_r20/external/pytorch/torch/nn/utils/_expanded_weights/
H A Dlayer_norm_expanded_weights.py32 torch.native_layer_norm, expanded_args, expanded_kwargs
/aosp_15_r20/external/executorch/backends/apple/mps/operators/
H A Dnormalization_ops.py66 target = "aten.native_layer_norm.default"
/aosp_15_r20/external/pytorch/torch/jit/
H A D_shape_functions.py1122 def native_layer_norm( function
1431 …"aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float…
1432 native_layer_norm,
/aosp_15_r20/external/pytorch/test/functorch/
H A Dtest_ops.py851 "native_layer_norm", ""
964 xfail("native_layer_norm"), # vmap: inplace into a regular tensor
1214 "native_layer_norm",
1927 skip("native_layer_norm"),
2390 skip("native_layer_norm", "", device_type="cpu"),
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A Dts_native_functions.yaml91 - native_layer_norm
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_standalone_compile.py107 torch.ops.aten.native_layer_norm.default, (x1, x2, x3, x4, x5), {}
/aosp_15_r20/external/executorch/backends/arm/operator_support/
H A Dtosa_supported_operators.py91 exir_ops.edge.aten.native_layer_norm.default,
/aosp_15_r20/external/executorch/backends/vulkan/partitioner/
H A Dsupported_ops.py102 exir_ops.edge.aten.native_layer_norm.default,
/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_native_layer_norm.cpp159 input.scalar_type(), ctx, "native_layer_norm.out", CTYPE, [&]() { in opt_native_layer_norm_out()
/aosp_15_r20/external/pytorch/functorch/op_analysis/
H A Dpublic_api439 native_layer_norm
/aosp_15_r20/external/executorch/kernels/aten/
H A Dfunctions.yaml278 - op: native_layer_norm.out

123456