Home
last modified time | relevance | path

Searched full:softmax (Results 1 – 25 of 1503) sorted by relevance

12345678910>>...61

/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dsoftmax_op_functor.h29 // Computes Softmax or LogSoftmax activation.
32 // softmax: dims: batch_size, num_classes.
35 typename TTypes<T>::Matrix softmax, const bool log);
45 typename TTypes<T>::Matrix softmax, const bool log) { in Compute()
66 // Calculate the log of the softmax in Compute()
67 // softmax = logits - max(logits along classes); in Compute()
68 softmax.device(d) = shifted_logits; in Compute()
69 // softmax = softmax - log(sum(exp(softmax along classes))); in Compute()
70 softmax.device(d) = (softmax - softmax.exp() in Compute()
80 // softmax = exp(logits - max(logits along classes)); in Compute()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/jit/tensorexpr/operators/
H A Dsoftmax.cpp1 #include <torch/csrc/jit/tensorexpr/operators/softmax.h>
12 // Softmax is computed as follows: in computeSoftmax()
13 // softmax(vi) = exp(vi) / sum(exp(vi)) in computeSoftmax()
17 // softmax(vi) = exp(vi - max(vi)) / sum(exp(vi - max(vi))) in computeSoftmax()
20 // - First loop computes the max over the softmax dim. in computeSoftmax()
22 // the max of the softmax dim it belongs to. in computeSoftmax()
23 // - Third loop computes the sum over the softmax dim. in computeSoftmax()
24 // - Final loop computes softmax for every element in v. in computeSoftmax()
27 // log_softmax(vi) = log(softmax(vi)) in computeSoftmax()
34 // - First loop computes the max over the softmax dim. in computeSoftmax()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/sparse/
H A Dkernels_gpu.cu.cc348 T* softmax) { in CalculateRowSoftmax() argument
350 // softmax[row] = exp(shifted_logits[row]) / sum(exp(shifted_logits[row])) in CalculateRowSoftmax()
361 softmax[r_i] = exp_i; in CalculateRowSoftmax()
365 softmax[r_i] = softmax[r_i] / sum_exp; in CalculateRowSoftmax()
372 const T* logits, T* softmax) { in CSRSparseMatrixSoftmaxKernel2D() argument
379 softmax); in CSRSparseMatrixSoftmaxKernel2D()
397 const int* row_ptr, const T* logits, T* softmax) { in CSRSparseMatrixSoftmaxKernel3D() argument
414 softmax); in CSRSparseMatrixSoftmaxKernel3D()
481 const T* softmax, const int grad_softmax_begin, const int grad_softmax_end, in CalculateRowSoftmaxGrad() argument
490 // looking for matching indices. In the softmax indices only, perform: in CalculateRowSoftmaxGrad()
[all …]
H A Dsoftmax_op.cc16 // Implements the kernel for the CSRSoftmax op, which performs softmax
76 functor::CSRSparseMatrixSoftmax<Device, T> softmax; in Compute() local
78 ctx, softmax(ctx, *logits_matrix, output_matrix.values().vec<T>())); in Compute()
125 "dtype of softmax is not equal to 'type': ", in Compute()
140 "Ranks of softmax and grad_softmax matrices differ: ", in Compute()
146 "Ranks of softmax and grad_softmax matrices differ: ", in Compute()
159 "Shapes of softmax and grad_softmax matrices differ: ", in Compute()
164 // Allocate output shapes. Note that since the Softmax Gradient in Compute()
166 // softmax value, it will keep the sparsity structure of the softmax. in Compute()
211 OpKernelContext* ctx, const CSRSparseMatrix& softmax, \
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/
H A Dactivations.py27 # In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
28 # layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
35 'softmax_v2': 'softmax',
39 @keras_export('keras.activations.softmax')
41 def softmax(x, axis=-1): function
42 """Softmax converts a vector of values to a probability distribution.
49 Softmax is often used as the activation for the last
53 The softmax of each vector x is computed as
60 axis: Integer, axis along which the softmax normalization is applied.
63 Tensor, output of softmax transformation (all values are non-negative
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/
H A Dnn_grad.py280 @ops.RegisterGradient("Softmax")
282 """The derivative of the softmax nonlinearity.
285 The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
289 grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
292 op: the Softmax op.
293 grad_softmax: the tensor representing the gradient w.r.t. the softmax
297 gradient w.r.t the input to the softmax
300 softmax = op.outputs[0]
301 sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)
302 return (grad_softmax - sum_channels) * softmax
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_fused_attention.py117 .softmax(dim=-1)
145 .softmax(dim=-1)
255 .softmax(dim=-1)
276 .softmax(dim=-1)
289 torch.matmul(query, key.transpose(-2, -1)).div(3.0).softmax(dim=-1),
309 torch.matmul(query, key.transpose(-2, -1)).mul(0.4).softmax(dim=-1),
328 attn_weight = torch.softmax(
339 attn_weight = torch.softmax(
359 attn_weight = torch.softmax(
379 attn_weight = torch.softmax(div, dim=-1)
[all …]
/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_softmax.py33 """Tests softmax."""
35 class Softmax(torch.nn.Module): class in TestSoftmax
38 self.softmax = torch.nn.Softmax(dim=dim)
41 return self.softmax(x)
53 .check(["torch.ops.aten.softmax.int"])
74 .check_not(["torch.ops.aten.softmax.int"])
98 .check_not(["torch.ops.aten.softmax.int"])
128 self._test_softmax_tosa_MI_pipeline(self.Softmax(dim=dim), (test_data,))
137 self._test_softmax_tosa_BI_pipeline(self.Softmax(dim=dim), (test_data,))
146 self._test_softmax_tosa_u55_BI_pipeline(self.Softmax(dim=dim), (test_data,))
[all …]
/aosp_15_r20/external/ComputeLibrary/arm_compute/runtime/CL/functions/
H A DCLSoftmaxLayer.h41 * Softmax is calculated by :
44 * Log Softmax is calculated by :
74 …ensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
78 … * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
84 …ensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
88 … * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
93 …ensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax
97 … * axis=1, softmax will be applied to 4x6=24 vectors of size 5. Defaults to 0
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/lite/tests/
H A Dmodify_io_nodes.mlir12 …%5 = "tfl.softmax"(%4) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform<i8:f32, 0.023…
23 // CHECK-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<…
24 // CHECK-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[softmax]]) : (tensor<1x401408x!quant.uniform<i8:f32…
33 // INT8-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<1…
34 // INT8-NEXT: return %[[softmax]] : tensor<1x401408x!quant.uniform<i8:f32, 3.906250e-03>>
43 // UINT8-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<…
44 // UINT8-NEXT: %[[dq:.*]] = "tfl.quantize"(%[[softmax]]) {qtype = tensor<1x401408x!quant.uniform<u8…
55 …%5 = "tfl.softmax"(%4) {beta = 1.000000e+00 : f32} : (tensor<1x401408x!quant.uniform<i8:f32, 0.023…
66 // CHECK-NEXT: %[[softmax:.*]] = "tfl.softmax"(%[[reshape]]) {beta = 1.000000e+00 : f32} : (tensor<…
67 // CHECK-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[softmax]]) : (tensor<1x401408x!quant.uniform<i8:f32…
[all …]
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/
H A Dfuse_attention.py37 .softmax(dim=-1)
59 .softmax(dim=-1)
81 .softmax(dim=-1),
101 torch.matmul(query, key.transpose(-2, -1)).mul(scale_factor).softmax(dim=-1),
120 attn_weight = torch.softmax(
140 attn_weight = torch.softmax(
168 attn_weight = torch.softmax(div, dim=-1)
201 attn_weight = torch.softmax(div, dim=-1)
228 attn_weight = torch.softmax(div, dim=-1)
257 attn_weight = torch.softmax(div, dim=-1)
[all …]
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/
H A Dsoftmax.py14 class Softmax(torch.nn.Module): class in TestSoftmax
20 return torch.nn.Softmax(dim=self.dim)(x)
24 # as xnnpack only supports softmax on the last dimension.
29 Tester(self.Softmax(dim), inputs)
31 .check_count({"torch.ops.aten.softmax": 1})
52 # as xnnpack only supports softmax on the last dimension.
53 # This test validates the delegate does not attempt to delegate softmax
59 Tester(self.Softmax(dim), inputs)
61 .check_count({"torch.ops.aten.softmax": 1})
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/mkl/
H A Dmkl_softmax_op.cc61 // Softmax forward execute
105 // Softmax primitive.
121 // Softmax forward primitive setup
123 // Create memory descriptors for softmax data with specified format. in Setup()
128 // Create softmax descriptor and primitive descriptor. in Setup()
140 // Create softmax primitive and add it to net in Setup()
159 // Get a softmax fwd primitive from the cached pool. in Get()
239 // In MKL, data format passed to mkl softmax op depends on dimension of in Compute()
247 // dimension to do softmax. in Compute()
284 // Get a softmax fwd primitive from primitive pool. in Compute()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/
H A Dsoftmax_quantized_test.cc45 // float Softmax. in RunSoftmaxFloatReference()
53 optimized_ops::Softmax(sm_params, shape_common, reference_dequant_data.data(), in RunSoftmaxFloatReference()
55 // Work with quantized scaling for Softmax, under which 256 represents 1, but in RunSoftmaxFloatReference()
104 // Runs the Softmax and compares against the float reference implementation and
138 optimized_ops::Softmax(params, shape_common, input_data, shape_common, in RunOneSoftmaxTest()
140 reference_ops::Softmax(params, shape_common, input_data, shape_common, in RunOneSoftmaxTest()
167 // This function picks some random Softmax params, which are checked for
169 // it runs the Softmax test and returns true. This allows the caller
176 // Softmax, the width and height really just create test repetitions. in TryOneUniformSoftmax()
202 // Softmax may adapt as they traverse the depth, and so we test handling of
[all …]
/aosp_15_r20/external/pytorch/test/expect/
H A DTestTensorBoard.test_caffe2_simple_model.expect344 name: "classifier/Softmax"
345 op: "Softmax"
364 input: "classifier/softmax"
396 input: "classifier/softmax"
404 input: "classifier/softmax"
766 name: "classifier/softmax"
768 input: "classifier/Softmax:0"
772 name: "classifier/softmax"
774 input: "classifier/Softmax:0"
830 name: "classifier/softmax"
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/
H A Dsoftmax_op_test.py46 softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
48 res = np.log(softmax)
50 res = softmax
73 tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name)
142 logging.info("Testing softmax float dtype in shape [%d, %d]", row, col)
157 logging.info("Testing softmax half dtype in shape [%d, %d]", row, col)
173 logging.info("Testing softmax float dtype in shape [%d, %d]", row, col)
236 op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
245 self.assertAllEqual(y, self.evaluate(nn_ops.softmax(x, axis=0)))
253 nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval()
[all …]
/aosp_15_r20/external/libtextclassifier/native/lang_id/common/
H A Dembedding-network-params.h143 // Returns true if a softmax layer exists.
148 // Returns weight matrix for the softmax layer. Note: should be called only
153 SAFTM_CHECK(HasSoftmax()) << "No softmax layer."; in GetSoftmaxMatrix()
164 // Returns bias for the softmax layer. Technically a Matrix, but we expect it
168 SAFTM_CHECK(HasSoftmax()) << "No softmax layer."; in GetSoftmaxBias()
255 // ** Access methods for optional MatrixParams softmax.
257 // Returns 1 if proto has optional field softmax, 0 otherwise.
260 // Returns number of rows of transpose(proto.softmax()).
263 // Returns number of columns of transpose(proto.softmax()).
266 // Returns quantization mode for the softmax weights.
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tfrt/benchmarks/
H A Dsoftmax_op_benchmark.cc27 %result = "tf.Softmax"(%input)
34 std::string Softmax(llvm::ArrayRef<bool> dynamic_dims, in Softmax() function
47 OutT softmax) { in ComputeSoftmax() argument
66 softmax.device(d) = shifted_logits.exp(); in ComputeSoftmax()
67 softmax.device(d) = (softmax * softmax.sum(along_class) in ComputeSoftmax()
95 BM(JitrtV(NAME, Softmax({DYNAMIC_ROW, DYNAMIC_COL}, {ROWS, COLS}), "main", \
98 BM(Tfrt(NAME, Softmax({DYNAMIC_ROW, DYNAMIC_COL}, {ROWS, COLS}), "main", \
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tosa/transforms/
H A Dpasses.td63 def TosaDequantizeTFLSoftmaxPass : Pass<"tosa-dequantize-tfl-softmax", "mlir::func::FuncOp"> {
64 let summary = "Dequantize TFLite Softmax ops.";
66 This pass rewrites quantized TFLite Softmax ops as: Dequantize, (float) Softmax, Quantize.
67 It is a work around for current performance issues with quantized Softmax codegen.
68 For instance it is a 20% end-to-end speedup on certain Softmax-heavy BERTs.
70 Softmax lowering. But as Softmax isn't currently a TOSA op, this isn't a TOSA
/aosp_15_r20/external/armnn/src/backends/backendsCommon/test/
H A DJsonPrinterTestImpl.cpp142 IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax"); in GetSoftmaxProfilerJson() local
145 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0)); in GetSoftmaxProfilerJson()
146 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0)); in GetSoftmaxProfilerJson()
157 softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); in GetSoftmaxProfilerJson()
176 // one of inputs is sufficiently larger than the others to saturate softmax in GetSoftmaxProfilerJson()
269 …bool softmaxCheck = ((result.find("softmax") != std::string::npos) || // Validate softm… in RunSoftmaxProfilerJsonPrinterTest()
270 (result.find("Softmax") != std::string::npos) || in RunSoftmaxProfilerJsonPrinterTest()
271 (result.find("SoftMax") != std::string::npos)); in RunSoftmaxProfilerJsonPrinterTest()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/
H A DSoftMax.cu28 #include <ATen/ops/softmax.h>
101 See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax for the CPU in cuda_sparse_coo_softmax_kernel()
102 implementation of the sparse softmax algorithm that this implementation is in cuda_sparse_coo_softmax_kernel()
163 See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax_backward for in cuda_sparse_coo_softmax_backward_kernel()
164 the CPU implementation of the sparse softmax backward algorithm that this in cuda_sparse_coo_softmax_backward_kernel()
236 See ATen/native/sparse/SoftMax.cpp:get_offsets for the CPU in get_offsets()
294 See ATen/native/sparse/SoftMax.cpp:get_offsets and in compute_pool_max()
295 ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax for the CPU in compute_pool_max()
389 See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax for the CPU in cuda_sparse_coo_softmax()
390 implementation of the sparse softmax algorithm that this implementation is in cuda_sparse_coo_softmax()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/expansions/
H A Dsoftmax_spmd_expander.cc109 return errors::Unimplemented("softmax not supported for rank 0 tensors."); in ComputeExpAndSum()
113 // Softmax is exp(input)/sum(exp(input)) and LogSoftmax is in ComputeExpAndSum()
140 // Computes softmax from its components. Assumes that builder's insertion point
145 // For Softmax, we compute exp(shifted_logits)/sum(exp(shifted_logits)) in ComputeSoftmax()
146 auto softmax = builder.create<mlir::TF::DivOp>( in ComputeSoftmax() local
148 return softmax.getResult(); in ComputeSoftmax()
151 // Computes softmax from its components. Assumes that builder's insertion point
164 // Computes the softmax of the input along the last axis, assuming that the
321 // Expander for Softmax and LogSoftmax ops.
333 // (Log)Softmax's logits are a rank >= 1 tensor. We reduce over the last in ExpandOp()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/
H A Dadvanced_activations.py294 @keras_export('keras.layers.Softmax')
295 class Softmax(Layer): class
296 """Softmax activation function.
301 >>> layer = tf.keras.layers.Softmax()
317 axis: Integer, or list of Integers, axis along which the softmax
320 inputs: The inputs, or logits to the softmax layer.
329 super(Softmax, self).__init__(**kwargs)
341 # Since we are adding it to the raw scores before the softmax, this is
349 return backend.softmax(inputs, axis=self.axis[0])
350 return backend.softmax(inputs, axis=self.axis)
[all …]
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/generated/spec_V1_2/
Dsoftmax_v1_2.example.cpp44 .type = TestOperationType::SOFTMAX, in get_test_model()
135 .type = TestOperationType::SOFTMAX, in get_test_model_all_inputs_as_internal()
192 .type = TestOperationType::SOFTMAX, in get_test_model_dim1_axis0()
283 .type = TestOperationType::SOFTMAX, in get_test_model_dim1_axis0_all_inputs_as_internal()
340 .type = TestOperationType::SOFTMAX, in get_test_model_dim3_axis2()
431 .type = TestOperationType::SOFTMAX, in get_test_model_dim3_axis2_all_inputs_as_internal()
488 .type = TestOperationType::SOFTMAX, in get_test_model_relaxed()
579 .type = TestOperationType::SOFTMAX, in get_test_model_relaxed_all_inputs_as_internal()
636 .type = TestOperationType::SOFTMAX, in get_test_model_relaxed_dim1_axis0()
727 .type = TestOperationType::SOFTMAX, in get_test_model_relaxed_dim1_axis0_all_inputs_as_internal()
[all …]
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dsoftmax_quant8_signed.mod.py23 model = model.Operation("SOFTMAX", i1, beta).To(output)
42 model = model.Operation("SOFTMAX", i1, beta).To(output)
84 Model().Operation("SOFTMAX", i, 1.0).To(o)
87 Model().Operation("SOFTMAX", i, 0.000001).To(o)
93 Model("axis").Operation("SOFTMAX", i, 1.0, axis).To(o)
96 Model("axis").Operation("SOFTMAX", i, 0.000001, axis).To(o)
117 # SOFTMAX op with numBatches = 0.
119 model = model.Operation("SOFTMAX", zero_sized, 1.0).To(o3)

12345678910>>...61