Home
last modified time | relevance | path

Searched full:_softmax (Results 1 – 25 of 138) sorted by relevance

123456

/aosp_15_r20/external/pytorch/test/export/
H A Dtest_experimental.py220 _softmax = torch.ops.aten._softmax.default(view_1, 0, False); view_1 = None
221 alias = torch.ops.aten.alias.default(_softmax)
227 _log_softmax = torch.ops.aten._log_softmax.default(_softmax, 0, False); _softmax = None
269 _softmax = torch.ops.aten._softmax.default(view_1, 0, False); view_1 = None
270 alias = torch.ops.aten.alias.default(_softmax)
276 _log_softmax = torch.ops.aten._log_softmax.default(_softmax, 0, False); _softmax = None
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ragged/
H A Dragged_math_ops_test.py33 def _softmax(self, x): member in RaggedSoftmaxTest
48 self._softmax(np.array(row_matrix)).tolist()
67 self._softmax(np.array(row_matrix)).tolist()
83 self._softmax(np.array(row_matrix)).tolist()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DSoftMax.cpp22 #include <ATen/ops/_softmax.h>
40 TORCH_META_FUNC(_softmax) in TORCH_META_FUNC() argument
97 // the CPU implementation of _softmax. There is a TORCH_CHECK in the CUDA in TORCH_META_FUNC()
129 // the CPU implementation of _softmax. There is a TORCH_CHECK in the CUDA in TORCH_META_FUNC()
447 return at::_softmax(input_, dim_, true); in softmax()
450 return at::_softmax(converted, dim_, false); in softmax()
H A Dts_native_functions.yaml8 - _softmax
/aosp_15_r20/external/pytorch/test/distributed/_tensor/experimental/
H A Dtest_register_sharding.py26 aten._softmax.default, None
29 @register_sharding(aten._softmax.default)
54 aten._softmax.default
/aosp_15_r20/external/executorch/backends/apple/mps/operators/
H A Dactivation_ops.py79 target = ["aten._softmax.default", "aten._log_softmax.default"]
91 if node.target == exir_ops.edge.aten._softmax.default
/aosp_15_r20/external/pytorch/test/expect/
H A DHasDecompTest.test_aten_core_operators.expect21 aten::_softmax
22 aten::_softmax.out
/aosp_15_r20/external/executorch/examples/arm/
H A Drun.sh28 portable_kernels="aten::_softmax.out"
89 echo " e.g. \"aten::_softmax.out,aten::add.out\""
/aosp_15_r20/external/pytorch/torch/masked/maskedtensor/
H A D_ops_refs.py357 @register_dispatch_func([torch.ops.aten._softmax])
358 def _softmax(func, *args, **kwargs): function
/aosp_15_r20/external/executorch/backends/xnnpack/operators/
H A Dop_softmax.py25 target = "aten._softmax.default"
/aosp_15_r20/external/executorch/backends/qualcomm/builders/
H A Dop_softmax.py20 target = ["aten._softmax.default", "aten._safe_softmax.default"]
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_softmax.cpp45 ET_SWITCH_FLOATH_TYPES(in.scalar_type(), ctx, "_softmax.out", CTYPE, [&]() { in softmax_out()
/aosp_15_r20/external/executorch/backends/arm/_passes/
H A Ddecompose_softmaxes_pass.py18 exir_ops.edge.aten._softmax.default,
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DSoftmax.cpp120 VK_REGISTER_OP(aten._softmax.default, softmax);
/aosp_15_r20/external/executorch/backends/arm/operator_support/
H A Dtosa_supported_operators.py100 exir_ops.edge.aten._softmax.default,
/aosp_15_r20/external/executorch/backends/vulkan/partitioner/
H A Dsupported_ops.py97 exir_ops.edge.aten._softmax.default,
/aosp_15_r20/external/pytorch/torch/distributed/tensor/experimental/
H A D_register_sharding.py48 >>> @register_sharding(aten._softmax.default)
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_multihead_attention.py74 reference = _softmax(QKT)
93 def _softmax(x): # softmax over 4 dim matrix function
/aosp_15_r20/external/executorch/backends/cadence/aot/
H A Dfunctions_hifi.yaml20 - op: _softmax.out
H A Dfunctions.yaml20 - op: _softmax.out
H A Dcompiler.py68 # Swap _safe_softmax with _softmax (see https://github.com/pytorch/pytorch/pull/133882
/aosp_15_r20/external/executorch/kernels/aten/
H A Dfunctions.yaml25 - op: _softmax.out
/aosp_15_r20/external/executorch/backends/xnnpack/partition/
H A Dconfigs.py60 exir_ops.edge.aten._softmax.default,
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/
H A DSoftmax.cpp210 m.impl("_softmax", TORCH_FN(softmax)); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/test/edge/
H A Dselected_operators.yaml26 aten::_softmax.out:

123456