/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
H A D | softplus_op_test.py | 15 """Tests for Softplus and SoftplusGrad.""" 38 softplus = nn_ops.softplus(np_features) 39 tf_softplus = self.evaluate(softplus) 42 self.assertShapeEqual(np_softplus, softplus) 77 y = nn_ops.softplus(x, name="softplus") 84 print("softplus (float) gradient err = ", err) 94 y = nn_ops.softplus(x, name="softplus") 102 print("softplus (float) gradient of gradient err = ", err) 112 y = nn_ops.softplus(x, name="softplus") 121 print("softplus (float) third-order gradient err = ", err) [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | softplus_op.h | 32 struct Softplus { struct 33 // Computes Softplus activation. 41 // true softplus(x). Offset of 2 from machine epsilon checked in operator() 43 // softplus implemented with numpy's log1p and numpy's logaddexp. in operator() 46 // Value above which exp(x) may overflow, but softplus(x) == x in operator() 49 // Value below which exp(x) may underflow, but softplus(x) == exp(x) in operator() 54 features, // softplus(x) ~= x for x large in operator() 55 too_small.select(features_exp, // softplus(x) ~= exp(x) for x small in operator() 65 // gradients: gradients backpropagated to the Softplus op. 66 // features: inputs that where passed to the Softplus op. [all …]
|
H A D | softplus_op.cc | 40 functor::Softplus<Device, T> functor; in Operate() 81 Name("Softplus").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 96 void Softplus<GPUDevice, T>::operator()( \ 99 extern template struct Softplus<GPUDevice, T>; 119 Name("Softplus").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
|
/aosp_15_r20/external/tensorflow/tensorflow/python/tools/ |
H A D | print_selective_registration_header_test.py | 168 ["Softplus", "SoftplusOp<CPUDevice, float>"]]""" 173 ('Softplus', 'SoftplusOp<CPUDevice, float>'), 177 ops_list = '[["Softplus", "SoftplusOp<CPUDevice, float>"]]' 181 ('Softplus', 'SoftplusOp<CPUDevice, float>'), 194 ops_list = '[["Softplus", ""]]' 198 ('Softplus', None), 202 ops_list = '[["Softplus", "SoftplusOp<CPUDevice, float>"]]' 208 ('Softplus', 'SoftplusOp<CPUDevice, float>'),
|
/aosp_15_r20/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
H A D | Softplus.pbtxt | 2 name: "Softplus" 30 name: "Softplus" 60 name: "Softplus" 91 name: "Softplus" 122 name: "Softplus"
|
/aosp_15_r20/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
H A D | Softplus.pbtxt | 2 name: "Softplus" 30 name: "Softplus" 60 name: "Softplus" 91 name: "Softplus" 122 name: "Softplus"
|
/aosp_15_r20/external/tensorflow/tensorflow/core/api_def/base_api/ |
H A D | api_def_SoftplusGrad.pbtxt | 7 The backpropagated gradients to the corresponding softplus operation. 13 The features passed as input to the corresponding softplus operation. 22 summary: "Computes softplus gradients for a softplus operation."
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
H A D | unary_ops.cc | 96 static xla::XlaOp Softplus(xla::XlaBuilder* b, xla::XlaOp features) { in Softplus() function 101 // Value above which exp(x) may overflow, but softplus(x) == x in Softplus() 104 // Value below which exp(x) may underflow, but softplus(x) == exp(x) in Softplus() 114 XLAJIT_MAKE_UNARY(Softplus, Softplus(b, x));
|
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/distributions/ |
H A D | bernoulli.py | 152 nn.softplus(-self.logits)) # pylint: disable=invalid-unary-operand-type 180 delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits) 181 delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)
|
H A D | gamma.py | 288 """`Gamma` with softplus of `concentration` and `rate`.""" 292 "Use `tfd.Gamma(tf.nn.softplus(concentration), " 293 "tf.nn.softplus(rate))` instead.", 304 concentration=nn.softplus(concentration, 306 rate=nn.softplus(rate, name="softplus_rate"),
|
H A D | beta.py | 351 """Beta with softplus transform of `concentration1` and `concentration0`.""" 355 "Use `tfd.Beta(tf.nn.softplus(concentration1), " 356 "tf.nn.softplus(concentration2))` instead.", 368 concentration1=nn.softplus(concentration1, 370 concentration0=nn.softplus(concentration0,
|
H A D | exponential.py | 144 """Exponential with softplus transform on `rate`.""" 148 "Use `tfd.Exponential(tf.nn.softplus(rate)).", 158 rate=nn.softplus(rate, name="softplus_rate"),
|
H A D | laplace.py | 217 """Laplace with softplus applied to `scale`.""" 221 "Use `tfd.Laplace(loc, tf.nn.softplus(scale)) " 234 scale=nn.softplus(scale, name="softplus_scale"),
|
H A D | normal.py | 246 """Normal with softplus applied to `scale`.""" 250 "Use `tfd.Normal(loc, tf.nn.softplus(scale)) " 263 scale=nn.softplus(scale, name="softplus_scale"),
|
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/ |
H A D | activations.py | 200 @keras_export('keras.activations.softplus') 202 def softplus(x): function 203 """Softplus activation function, `softplus(x) = log(exp(x) + 1)`. 208 >>> b = tf.keras.activations.softplus(a) 217 The softplus activation: `log(exp(x) + 1)`. 219 return math_ops.softplus(x)
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/mlir_generated/ |
H A D | gpu_op_softplus.cc | 21 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Softplus, DT_HALF); 22 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Softplus, DT_FLOAT); 23 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Softplus, DT_DOUBLE);
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | activation.py | 35 "Softplus", 445 \text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x)) 858 class Softplus(Module): class 859 r"""Applies the Softplus function element-wise. 862 \text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) 864 SoftPlus is a smooth approximation to the ReLU function and can be used 871 beta: the :math:`\beta` value for the Softplus formulation. Default: 1 878 .. image:: ../scripts/activation_images/Softplus.png 882 >>> m = nn.Softplus() 897 return F.softplus(input, self.beta, self.threshold)
|
/aosp_15_r20/external/pytorch/torch/distributions/ |
H A D | transforms.py | 19 from torch.nn.functional import pad, softplus 630 return -F.softplus(-x) - F.softplus(x) 635 Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`. 647 return softplus(x) 653 return -softplus(-x) 689 return 2.0 * (math.log(2.0) - x - softplus(-2.0 * x)) 869 tanh_logdet = -2 * (x + softplus(-2 * x) - math.log(2.0)).sum(dim=-1)
|
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/distributions/ |
H A D | util_test.py | 924 softplus = nn_ops.softplus(np_features) 925 softplus_inverse = du.softplus_inverse(softplus) 927 softplus, softplus_inverse]) 939 self.assertShapeEqual(np_softplus, softplus) 986 y = nn_ops.softplus(x, name="softplus") 993 tf_logging.vlog(2, "softplus (float) gradient err = ", err)
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
H A D | activation.h | 640 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Softplus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 642 /// Applies softplus over a given input. 643 /// See https://pytorch.org/docs/main/nn.html#torch.nn.Softplus to learn 651 /// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42)); 661 /// Pretty prints the `Softplus` module into the given `stream`. 670 /// provides, and examples of how to use `Softplus` with 673 TORCH_MODULE(Softplus);
|
/aosp_15_r20/external/pytorch/torch/_refs/nn/functional/ |
H A D | __init__.py | 62 "softplus", 397 return a * torch.tanh(torch.nn.functional.softplus(a)) 453 # softplus is implemented specially because it has beta and threshold arguments 454 @register_decomposition(aten.softplus) 461 def softplus( function 468 Reference implementation of torch.nn.functional.softplus
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | activation.h | 537 inline Tensor softplus(const Tensor& input, double beta, double threshold) { in softplus() function 538 return torch::softplus(input, beta, threshold); in softplus() 544 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.softplus 553 /// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0)); 555 inline Tensor softplus( 558 return detail::softplus(input, options.beta(), options.threshold());
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/ |
H A D | activation.h | 495 /// Options for the `Softplus` module. 499 /// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42)); 502 /// the `beta` value for the Softplus formulation. Default: 1 510 /// Options for `torch::nn::functional::softplus`. 518 /// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
|
/aosp_15_r20/external/tensorflow/tensorflow/core/api_def/java_api/ |
H A D | api_def_Softplus.pbtxt | 2 graph_op_name: "Softplus" 4 name: "math.Softplus"
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/experimental/mlir/testing/op_tests/ |
H A D | softplus.py | 28 """Make a set of tests to do softplus.""" 42 out = tf.math.softplus(input_tensor)
|