/aosp_15_r20/external/pytorch/test/export/ |
H A D | test_experimental.py | 220 _softmax = torch.ops.aten._softmax.default(view_1, 0, False); view_1 = None 221 alias = torch.ops.aten.alias.default(_softmax) 227 _log_softmax = torch.ops.aten._log_softmax.default(_softmax, 0, False); _softmax = None 269 _softmax = torch.ops.aten._softmax.default(view_1, 0, False); view_1 = None 270 alias = torch.ops.aten.alias.default(_softmax) 276 _log_softmax = torch.ops.aten._log_softmax.default(_softmax, 0, False); _softmax = None
|
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ragged/ |
H A D | ragged_math_ops_test.py | 33 def _softmax(self, x): member in RaggedSoftmaxTest 48 self._softmax(np.array(row_matrix)).tolist() 67 self._softmax(np.array(row_matrix)).tolist() 83 self._softmax(np.array(row_matrix)).tolist()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | SoftMax.cpp | 22 #include <ATen/ops/_softmax.h> 40 TORCH_META_FUNC(_softmax) in TORCH_META_FUNC() argument 97 // the CPU implementation of _softmax. There is a TORCH_CHECK in the CUDA in TORCH_META_FUNC() 129 // the CPU implementation of _softmax. There is a TORCH_CHECK in the CUDA in TORCH_META_FUNC() 447 return at::_softmax(input_, dim_, true); in softmax() 450 return at::_softmax(converted, dim_, false); in softmax()
|
H A D | ts_native_functions.yaml | 8 - _softmax
|
/aosp_15_r20/external/pytorch/test/distributed/_tensor/experimental/ |
H A D | test_register_sharding.py | 26 aten._softmax.default, None 29 @register_sharding(aten._softmax.default) 54 aten._softmax.default
|
/aosp_15_r20/external/executorch/backends/apple/mps/operators/ |
H A D | activation_ops.py | 79 target = ["aten._softmax.default", "aten._log_softmax.default"] 91 if node.target == exir_ops.edge.aten._softmax.default
|
/aosp_15_r20/external/pytorch/test/expect/ |
H A D | HasDecompTest.test_aten_core_operators.expect | 21 aten::_softmax 22 aten::_softmax.out
|
/aosp_15_r20/external/executorch/examples/arm/ |
H A D | run.sh | 28 portable_kernels="aten::_softmax.out" 89 echo " e.g. \"aten::_softmax.out,aten::add.out\""
|
/aosp_15_r20/external/pytorch/torch/masked/maskedtensor/ |
H A D | _ops_refs.py | 357 @register_dispatch_func([torch.ops.aten._softmax]) 358 def _softmax(func, *args, **kwargs): function
|
/aosp_15_r20/external/executorch/backends/xnnpack/operators/ |
H A D | op_softmax.py | 25 target = "aten._softmax.default"
|
/aosp_15_r20/external/executorch/backends/qualcomm/builders/ |
H A D | op_softmax.py | 20 target = ["aten._softmax.default", "aten._safe_softmax.default"]
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_softmax.cpp | 45 ET_SWITCH_FLOATH_TYPES(in.scalar_type(), ctx, "_softmax.out", CTYPE, [&]() { in softmax_out()
|
/aosp_15_r20/external/executorch/backends/arm/_passes/ |
H A D | decompose_softmaxes_pass.py | 18 exir_ops.edge.aten._softmax.default,
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | Softmax.cpp | 120 VK_REGISTER_OP(aten._softmax.default, softmax);
|
/aosp_15_r20/external/executorch/backends/arm/operator_support/ |
H A D | tosa_supported_operators.py | 100 exir_ops.edge.aten._softmax.default,
|
/aosp_15_r20/external/executorch/backends/vulkan/partitioner/ |
H A D | supported_ops.py | 97 exir_ops.edge.aten._softmax.default,
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/experimental/ |
H A D | _register_sharding.py | 48 >>> @register_sharding(aten._softmax.default)
|
/aosp_15_r20/external/pytorch/test/nn/ |
H A D | test_multihead_attention.py | 74 reference = _softmax(QKT) 93 def _softmax(x): # softmax over 4 dim matrix function
|
/aosp_15_r20/external/executorch/backends/cadence/aot/ |
H A D | functions_hifi.yaml | 20 - op: _softmax.out
|
H A D | functions.yaml | 20 - op: _softmax.out
|
H A D | compiler.py | 68 # Swap _safe_softmax with _softmax (see https://github.com/pytorch/pytorch/pull/133882
|
/aosp_15_r20/external/executorch/kernels/aten/ |
H A D | functions.yaml | 25 - op: _softmax.out
|
/aosp_15_r20/external/executorch/backends/xnnpack/partition/ |
H A D | configs.py | 60 exir_ops.edge.aten._softmax.default,
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Softmax.cpp | 210 m.impl("_softmax", TORCH_FN(softmax)); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/test/edge/ |
H A D | selected_operators.yaml | 26 aten::_softmax.out:
|