Home
last modified time | relevance | path

Searched full:tanh (Results 1 – 25 of 3571) sorted by relevance

12345678910>>...143

/aosp_15_r20/external/libopus/dnn/torch/rdovae/
H A Dexport_rdovae_weights.py190 ('core_encoder.module.dense_1' , 'enc_dense1', 'TANH', False,),
192 ('core_encoder.module.state_dense_1' , 'gdense1' , 'TANH', True,),
193 ('core_encoder.module.state_dense_2' , 'gdense2' , 'TANH', True)
202 ('core_encoder.module.gru1' , 'enc_gru1', 'TANH', True),
203 ('core_encoder.module.gru2' , 'enc_gru2', 'TANH', True),
204 ('core_encoder.module.gru3' , 'enc_gru3', 'TANH', True),
205 ('core_encoder.module.gru4' , 'enc_gru4', 'TANH', True),
206 ('core_encoder.module.gru5' , 'enc_gru5', 'TANH', True),
214 ('core_encoder.module.conv1.conv' , 'enc_conv1', 'TANH', True),
215 ('core_encoder.module.conv2.conv' , 'enc_conv2', 'TANH', True),
[all …]
/aosp_15_r20/libcore/luni/src/test/resources/
H A Dmath_important_numbers.csv568 tanh,0x1.fb8f76b1e2ab6p-1,0x1.5bf0a8b145769p1,2.718281828459045
569 tanh,-0x1.fb8f76b1e2ab6p-1,-0x1.5bf0a8b145769p1,-2.718281828459045
570 tanh,0x0.0p0,0x0.0p0,0.0
571 tanh,-0x0.0p0,-0x0.0p0,-0.0
572 tanh,0x1.85efab514f394p-1,0x1.0p0,1.0
573 tanh,-0x1.85efab514f394p-1,-0x1.0p0,-1.0
574 tanh,-0x1.d9353d7568af3p-2,-0x1.0p-1,-0.5
575 tanh,0x1.d9353d7568af3p-2,0x1.0p-1,0.5
576 tanh,-0x1.ffff15f81f9abp-1,-0x1.921fb54442d18p2,-6.283185307179586
577 tanh,-0x1.fffe74ef7ed71p-1,-0x1.815e630c155e1p2,-6.021385919380436
[all …]
/aosp_15_r20/external/python/cpython3/Lib/test/
Dcmath_testcases.txt1580 -- For exp, cosh, sinh, tanh we limit tests to arguments whose
1932 -- tanh: Hyperbolic Tangent --
1939 -- tanh0000 tanh 0.0 0.0 -> 0.0 0.0
1940 -- tanh0001 tanh 0.0 -0.0 -> 0.0 -0.0
1941 -- tanh0002 tanh -0.0 0.0 -> -0.0 0.0
1942 -- tanh0003 tanh -0.0 -0.0 -> -0.0 -0.0
1945 tanh0004 tanh -21.200500450664993 -1.6970729480342996 -> -1.0 1.9241352344849399e-19
1946 tanh0005 tanh -0.34158771504251928 -8.0848504951747131 -> -2.123711225855613 1.2827526782026006
1947 tanh0006 tanh -15.454144725193689 -0.23619582288265617 -> -0.99999999999993283 -3.4336684248260036e…
1948 tanh0007 tanh -7.6103163119661952 -0.7802748320307008 -> -0.99999999497219438 -4.9064845343755437e-…
[all …]
/aosp_15_r20/bionic/libm/upstream-freebsd/lib/msun/src/
H A Ds_tanh.c12 /* Tanh(x)
18 * 0. tanh(x) is defined to be -----------
21 * 1. reduce x to non-negative by tanh(-x) = -tanh(x).
22 * 2. 0 <= x < 2**-28 : tanh(x) := x with inexact if x != 0
24 * 2**-28 <= x < 1 : tanh(x) := -----; t = expm1(-2x)
27 * 1 <= x < 22 : tanh(x) := 1 - -----; t = expm1(2x)
29 * 22 <= x <= INF : tanh(x) := 1.
32 * tanh(NaN) is NaN;
33 * only tanh(0)=0 is exact for finite argument.
45 tanh(double x) in tanh() function
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
H A Dtanh.c42 "failed to create TanH operator with %zu channels: number of channels must be non-zero", in pytorch_qnnp_create_tanh_nc_q8()
49 "failed to create TanH operator with %.7g input scale: scale must be finite and positive", in pytorch_qnnp_create_tanh_nc_q8()
56 "failed to create TanH operator with %.7g output scale: scale must be finite and positive", in pytorch_qnnp_create_tanh_nc_q8()
63 "failed to create TanH operator with [%" PRIu8 ", %" PRIu8 in pytorch_qnnp_create_tanh_nc_q8()
74 … "failed to create TanH operator with %.7g output scale: only output scale of 2/256 is supported", in pytorch_qnnp_create_tanh_nc_q8()
81 "failed to create TanH operator with %" PRIu8 in pytorch_qnnp_create_tanh_nc_q8()
100 "failed to allocate 256 bytes for TanH lookup table"); in pytorch_qnnp_create_tanh_nc_q8()
110 /* Scale tanh(x) by 1 / output scale = 128.0 in pytorch_qnnp_create_tanh_nc_q8()
137 pytorch_qnnp_operator_t tanh, in pytorch_qnnp_setup_tanh_nc_q8() argument
150 tanh->batch_size = 0; in pytorch_qnnp_setup_tanh_nc_q8()
[all …]
/aosp_15_r20/external/pytorch/test/cpp/tensorexpr/
H A Dtest_graph_opt.cpp85 %6 : Float(60, strides=[1], device=cpu) = aten::tanh(%5) in TEST_F()
93 // The `aten::log` and `aten::tanh` ops must be moved to the inputs of in TEST_F()
99 ->check("aten::tanh") in TEST_F()
100 ->check("aten::tanh") in TEST_F()
101 ->check("aten::tanh") in TEST_F()
104 ->check_not("aten::tanh") in TEST_F()
110 auto ref = at::tanh(at::log(at::cat({x, y, z}, 0))); in TEST_F()
132 %5 : Float(60, strides=[1], device=cpu) = aten::tanh(%cat) in TEST_F()
141 // The `aten::tanh` op must be moved to the inputs of `aten::cat`. in TEST_F()
145 .check("aten::tanh") in TEST_F()
[all …]
/aosp_15_r20/external/python/cpython2/Lib/test/
Dcmath_testcases.txt1555 -- For exp, cosh, sinh, tanh we limit tests to arguments whose
1858 -- tanh: Hyperbolic Tangent --
1862 tanh0000 tanh 0.0 0.0 -> 0.0 0.0
1863 tanh0001 tanh 0.0 -0.0 -> 0.0 -0.0
1864 tanh0002 tanh -0.0 0.0 -> -0.0 0.0
1865 tanh0003 tanh -0.0 -0.0 -> -0.0 -0.0
1868 tanh0004 tanh -21.200500450664993 -1.6970729480342996 -> -1.0 1.9241352344849399e-19
1869 tanh0005 tanh -0.34158771504251928 -8.0848504951747131 -> -2.123711225855613 1.2827526782026006
1870 tanh0006 tanh -15.454144725193689 -0.23619582288265617 -> -0.99999999999993283 -3.4336684248260036e…
1871 tanh0007 tanh -7.6103163119661952 -0.7802748320307008 -> -0.99999999497219438 -4.9064845343755437e-…
[all …]
/aosp_15_r20/art/test/123-inline-execute2/
H A Dexpected-stdout.txt8 Math.tanh(0.0) = 0.000000000000
18 Math.tanh(0.7853981633974483) = 0.655794202633
27 Math.tanh(1.5707963267948966) = 0.917152335667
37 Math.tanh(2.356194490192345) = 0.982193380007
47 Math.tanh(3.141592653589793) = 0.996272076221
57 Math.tanh(3.9269908169872414) = 0.999223894879
66 Math.tanh(4.71238898038469) = 0.999838613989
76 Math.tanh(5.497787143782138) = 0.999966449000
86 Math.tanh(6.283185307179586) = 0.999993025340
157 StrictMath.tanh(0.0) = 0.0
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
H A Dcwise_ops_unary_test.py206 self._compareBoth(x, np.tanh, math_ops.tanh)
235 self._compareBothSparse(x, np.tanh, math_ops.tanh)
242 self._compareBoth(x, np.tanh, math_ops.tanh)
244 self._compareBoth(x, np.tanh, math_ops.tanh)
265 self._compareBoth(x, np.tanh, math_ops.tanh)
290 self._compareBothSparse(x, np.tanh, math_ops.tanh)
316 self._compareBoth(x, np.tanh, math_ops.tanh)
344 self._compareBothSparse(x, np.tanh, math_ops.tanh)
369 self._compareBoth(x, np.tanh, math_ops.tanh)
392 self._compareBothSparse(x, np.tanh, math_ops.tanh)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/jit/
H A Dintroduce_floating_point_jitter_pass_test.cc46 Output tanh_a = ops::Tanh(root.WithOpName("tanh_a"), sigmoid_a); in TEST()
47 Output tanh_b = ops::Tanh(root.WithOpName("tanh_b"), sigmoid_b); in TEST()
62 auto m_tanh_a = NodeWith(Op("Tanh"), Inputs(Out(m_sigmoid_a_with_jitter))); in TEST()
67 auto m_tanh_b = NodeWith(Op("Tanh"), Inputs(Out(m_sigmoid_b_with_jitter))); in TEST()
125 Output tanh = ops::Tanh(root.WithOpName("tanh"), sigmoid); in TEST() local
139 auto m_tanh = NodeWith(Op("Tanh"), Inputs(Out(m_sigmoid_with_jitter))); in TEST()
141 Node* tanh_transformed = testing::FindNodeByName(graph.get(), "tanh"); in TEST()
155 Output tanh_s = ops::Tanh(root.WithOpName("tanh_s"), svd.s); in TEST()
156 Output tanh_u = ops::Tanh(root.WithOpName("tanh_u"), svd.u); in TEST()
157 Output tanh_v = ops::Tanh(root.WithOpName("tanh_v"), svd.v); in TEST()
[all …]
/aosp_15_r20/libcore/ojluni/src/test/java/lang/Math/
H A DHyperbolicTests.java27 * @summary Tests for {Math, StrictMath}.{sinh, cosh, tanh}
742 * Test accuracy of {Math, StrictMath}.tanh. The specified accuracy is 2.5 ulps.
744 * The definition of tanh(x) is
748 * The series expansion of tanh(x) =
754 * 1. For large values of x tanh(x) ~= signum(x)
756 * 2. For small values of x, tanh(x) ~= x.
758 * Additionally, tanh is an odd function; tanh(-x) = -tanh(x).
771 // x tanh(x) in testTanh()
970 // For values of x larger than 22, tanh(x) is 1.0 in double in testTanh()
988 Tests.testTolerance("Math.tanh(double", in testTanhCaseWithTolerance()
[all …]
/aosp_15_r20/external/arm-optimized-routines/pl/math/test/testcases/directed/
H A Dtanh.tst1 ; tanh.tst
6 func=tanh op1=7ff80000.00000001 result=7ff80000.00000001 errno=0
7 func=tanh op1=fff80000.00000001 result=7ff80000.00000001 errno=0
8 func=tanh op1=7ff00000.00000001 result=7ff80000.00000001 errno=0 status=i
9 func=tanh op1=fff00000.00000001 result=7ff80000.00000001 errno=0 status=i
10 func=tanh op1=7ff00000.00000000 result=3ff00000.00000000 errno=0
11 func=tanh op1=fff00000.00000000 result=bff00000.00000000 errno=0
12 func=tanh op1=00000000.00000000 result=00000000.00000000 errno=0
13 func=tanh op1=80000000.00000000 result=80000000.00000000 errno=0
17 func=tanh op1=00000000.00000001 result=00000000.00000001 errno=0 maybestatus=ux
[all …]
/aosp_15_r20/external/pytorch/benchmarks/fastrnns/
H A Dcells.py19 cellgate = cellgate.tanh()
23 hy = outgate * cy.tanh()
43 cellgate = torch.tanh(cellgate)
47 hy = outgate * torch.tanh(cy)
67 cellgate = torch.tanh(cellgate)
71 hy = outgate * torch.tanh(cy)
90 cellgate = torch.tanh(cellgate)
94 hy = outgate * torch.tanh(cy)
109 cellgate = torch.tanh(cellgate)
113 hy = outgate * torch.tanh(cy)
[all …]
/aosp_15_r20/external/arm-optimized-routines/pl/math/
H A Dv_tanh_3u.c2 * Double-precision vector tanh(x) function.
42 the scalar variant of tanh. */ in expm1_inline()
66 return v_call_f64 (tanh, x, y, special); in special_case()
69 /* Vector approximation for double-precision tanh(x), using a simplified
73 float64x2_t VPCS_ATTR V_NAME_D1 (tanh) (float64x2_t x) in V_NAME_D1() argument
92 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in V_NAME_D1()
101 PL_SIG (V, D, 1, tanh, -10.0, 10.0)
102 PL_TEST_ULP (V_NAME_D1 (tanh), 2.27)
103 PL_TEST_EXPECT_FENV (V_NAME_D1 (tanh), WANT_SIMD_EXCEPT)
104 PL_TEST_SYM_INTERVAL (V_NAME_D1 (tanh), 0, 0x1p-27, 5000)
[all …]
H A Dsv_tanh_3u.c2 * Double-precision SVE tanh(x) function.
40 the scalar variant of tanh. */ in expm1_inline()
65 return sv_call_f64 (tanh, x, y, special); in special_case()
68 /* SVE approximation for double-precision tanh(x), using a simplified
72 svfloat64_t SV_NAME_D1 (tanh) (svfloat64_t x, svbool_t pg) in SV_NAME_D1() argument
83 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in SV_NAME_D1()
92 PL_SIG (SV, D, 1, tanh, -10.0, 10.0)
93 PL_TEST_ULP (SV_NAME_D1 (tanh), 2.27)
94 PL_TEST_SYM_INTERVAL (SV_NAME_D1 (tanh), 0, 0x1p-27, 5000)
95 PL_TEST_SYM_INTERVAL (SV_NAME_D1 (tanh), 0x1p-27, 0x1.241bf835f9d5fp+4, 50000)
[all …]
H A Dtanh_3u.c2 * Double-precision tanh(x) function.
48 /* Approximation for double-precision tanh(x), using a simplified version of
50 tanh(-0x1.c4a4ca0f9f3b7p-3) got -0x1.bd6a21a163627p-3
53 tanh (double x) in tanh() function
69 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in tanh()
74 PL_SIG (S, D, 1, tanh, -10.0, 10.0)
75 PL_TEST_ULP (tanh, 2.27)
76 PL_TEST_SYM_INTERVAL (tanh, 0, TinyBound, 1000)
77 PL_TEST_SYM_INTERVAL (tanh, TinyBound, BoringBound, 100000)
78 PL_TEST_SYM_INTERVAL (tanh, BoringBound, inf, 1000)
H A Dv_tanhf_2u6.c2 * Single-precision vector tanh(x) function.
32 /* Approximation for single-precision vector tanh(x), using a simplified
36 float32x4_t VPCS_ATTR V_NAME_F1 (tanh) (float32x4_t x) in V_NAME_F1() argument
59 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in V_NAME_F1()
68 PL_SIG (V, F, 1, tanh, -10.0, 10.0)
69 PL_TEST_ULP (V_NAME_F1 (tanh), 2.09)
70 PL_TEST_EXPECT_FENV (V_NAME_F1 (tanh), WANT_SIMD_EXCEPT)
71 PL_TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0, 0x1p-23, 1000)
72 PL_TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0x1p-23, 0x1.205966p+3, 100000)
73 PL_TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0x1.205966p+3, inf, 100)
H A Dsv_tanhf_2u6.c2 * Single-precision SVE tanh(x) function.
31 /* Approximation for single-precision SVE tanh(x), using a simplified
35 svfloat32_t SV_NAME_F1 (tanh) (svfloat32_t x, const svbool_t pg) in SV_NAME_F1() argument
47 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in SV_NAME_F1()
55 PL_SIG (SV, F, 1, tanh, -10.0, 10.0)
56 PL_TEST_ULP (SV_NAME_F1 (tanh), 2.07)
57 PL_TEST_SYM_INTERVAL (SV_NAME_F1 (tanh), 0, 0x1p-23, 1000)
58 PL_TEST_SYM_INTERVAL (SV_NAME_F1 (tanh), 0x1p-23, 0x1.205966p+3, 100000)
59 PL_TEST_SYM_INTERVAL (SV_NAME_F1 (tanh), 0x1.205966p+3, inf, 100)
/aosp_15_r20/prebuilts/go/linux-x86/src/math/
Dtanh.go11 // tanh.c
17 // double x, y, tanh();
19 // y = tanh( x );
30 // tanh(x) = sinh(x)/cosh(x) = 1 - 2/(exp(2x) + 1).
68 // Tanh returns the hyperbolic tangent of x.
72 // Tanh(±0) = ±0
73 // Tanh(±Inf) = ±1
74 // Tanh(NaN) = NaN
75 func Tanh(x float64) float64 { func
79 return tanh(x)
[all …]
/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_tanh.py32 class Tanh(torch.nn.Module): class in TestTanh
35 self.tanh = torch.nn.Tanh()
38 return self.tanh(x)
50 .check(["torch.ops.aten.tanh.default"])
69 .check(["torch.ops.aten.tanh.default"])
93 .check_count({"torch.ops.aten.tanh.default": 1})
122 self._test_tanh_tosa_MI_pipeline(self.Tanh(), (test_data,))
126 self._test_tanh_tosa_BI_pipeline(self.Tanh(), (test_data,))
130 self._test_tanh_tosa_u55_BI_pipeline(self.Tanh(), (test_data,))
134 self._test_tanh_tosa_u85_BI_pipeline(self.Tanh(), (test_data,))
/aosp_15_r20/external/armnn/docs/
H A D05_03_delegate.dox44 - AVERAGE_POOL_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
46 - AVERAGE_POOL_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, SIGN_BIT, TANH, …
54 - CONCATENATION, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
56 - CONV_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
58 - CONV_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
62 - DEPTHWISE_CONV_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
82 - FULLY_CONNECTED, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
120 - MAX_POOL_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
122 - MAX_POOL_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, SIGN_BIT, TANH, NONE
192 - TANH
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/tests/
H A Dexhaustive_unary_test_complex.cc41 // TODO(b/138126045): Current libc++ implementation of the complex tanh in SetParamsForTanh()
44 // TODO(b/138750327): Current libc++ implementation of the complex tanh in SetParamsForTanh()
121 // The current libc++ implementation of the complex tanh function provides
122 // less accurate results when the denomenator of a complex tanh is small, due
124 // we cast it to and from a complex128 when computing tanh.
125 UNARY_TEST_COMPLEX_64(Tanh, {
128 // This implementation of Tanh becomes less accurate when the denominator in __anon9f6fcb090602()
137 Tanh,
139 return static_cast<complex64>(std::tanh(static_cast<complex128>(x))); in __anon9f6fcb090702()
192 // Similar to the Tanh bug.
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/reference/
H A Dlstm_cell.h120 const float new_input = std::tanh(activ_temp_data[Offset( in LstmCell()
136 output_gate * std::tanh(new_state); in LstmCell()
180 // for a fixed-point tanh() implementation for that format, which internally
190 // This array is only fed to Logistic and Tanh functions, for which
198 // Now, Logistic and Tanh
202 // Logistic(4) = 1 - 1.8e-2 Tanh(4) = 1 - 6.7e-4
203 // Logistic(8) = 1 - 3.4e-4 Tanh(8) = 1 - 2.3e-7
204 // Logistic(16) = 1 - 1.1e-7 Tanh(16) = 1 - 2.5e-14
345 // Rest of the LSTM cell: tanh and logistic math functions, and some adds in LstmCell()
355 // This is the return type of math functions such as tanh, logistic, in LstmCell()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
H A Dlayout_optimization_move_transposes_begin.mlir8 // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32>
9 // CHECK: return %[[TANH]]
11 %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32>
23 // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32>
24 // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x8x4x4xf32>
27 %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32>
57 // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32>
58 // CHECK: %[[ADD:[0-9]*]] = "tf.AddV2"(%[[TANH]], %[[TANH]]) {{.*}} tensor<1x8x4x4xf32>
61 %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32>
/aosp_15_r20/external/libopus/dnn/training_tf2/
H A Drdovae.py78 y = x - d*tf.math.tanh(x/(.1+d))
205 … enc_dense1 = Dense(cond_size2, activation='tanh', kernel_constraint=constraint, name='enc_dense1')
207 … enc_dense3 = Dense(cond_size2, activation='tanh', kernel_constraint=constraint, name='enc_dense3')
209 … enc_dense5 = Dense(cond_size2, activation='tanh', kernel_constraint=constraint, name='enc_dense5')
211 … enc_dense7 = Dense(cond_size, activation='tanh', kernel_constraint=constraint, name='enc_dense7')
212 … enc_dense8 = Dense(cond_size, activation='tanh', kernel_constraint=constraint, name='enc_dense8')
229 global_dense1 = Dense(128, activation='tanh', name='gdense1')
230 global_dense2 = Dense(nb_state_dim, activation='tanh', name='gdense2')
242 … dec_dense1 = Dense(cond_size2, activation='tanh', kernel_constraint=constraint, name='dec_dense1')
244 … dec_dense3 = Dense(cond_size2, activation='tanh', kernel_constraint=constraint, name='dec_dense3')
[all …]

12345678910>>...143