Home
last modified time | relevance | path

Searched defs:input_min (Results 1 – 25 of 41) sorted by relevance

12

/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dquantization_utils_test.cc34 void TestRequantizeMany(Eigen::ThreadPoolDevice* eigen_device, float input_min, in TestRequantizeMany()
76 void TestRequantizeMany8To32Bit(float input_min, float input_max, in TestRequantizeMany8To32Bit()
230 const float input_min = ranges[range_index][0]; in TestRequantizeManyInNewRangeEigenVsNonEigen() local
282 const float input_min = -100.0f; in TimeRequantizeManyInNewRange() local
525 const float input_min = ranges[range_index][0]; in TestRequantizeInNewRange() local
547 const float input_min = -0.739539f; in TestRequantizeInNewRangeRealData() local
582 const float input_min = ranges[range_index][0]; in TestRequantizeInNewRange32To8Bit() local
625 const float input_min = 0.0f; in TestFloatTensorToQuantized() local
655 const float input_min = 0.0f; in TestOverflowWithEigen() local
678 const float input_min = -128.0f; in TestQuantizedTensorToFloat() local
H A Dquantize_and_dequantize_op.h189 auto input_min = input_min_tensor->scalar<T>(); in Compute() local
230 auto input_min = input_min_tensor->vec<T>(); in Compute() local
301 auto input_min = input_min_tensor->vec<T>(); in Compute() local
H A Dquantized_batch_norm_op.cc31 void ReferenceBatchNorm(const Tensor& input, const float input_min, in ReferenceBatchNorm()
94 void FixedPointBatchNorm(const Tensor& input, const float input_min, in FixedPointBatchNorm()
179 const float input_min = input_min_tensor.flat<float>()(0); in Compute() local
H A Dquantized_activation_ops_test.cc45 const float input_min = -128.0f; in TEST_F() local
76 const float input_min = -128.0f; in TEST_F() local
H A Dquantized_pooling_ops_test.cc51 const float input_min = 0.0f; in TEST_F() local
96 const float input_min = 0.0f; in TEST_F() local
H A Dquantized_bias_add_op_test.cc51 const float input_min = 0.0f; in TEST_F() local
101 const float input_min = -2164.25f; in TEST_F() local
H A Dquantized_batch_norm_op_test.cc60 const float input_min = -128.0f; in TEST_F() local
157 const float input_min = -128.0f; in TEST_F() local
H A Dquantized_concat_op.cc41 const float input_min = (*input_min_and_max)[input_index].first; in Copy() local
87 const float input_min = input_mins[i].flat<float>()(0); in CalculateInputAndOutputRange() local
H A Dmeta_support.cc257 float input_min, float input_max, float output_min, in Requantize()
353 float input_min, float input_max, float bias_min, in QuantizedBiasAdd()
H A Dquantization_utils.cc20 void GetOutputMinAndMaxForQuantizedAdd(float input_min, float input_max, in GetOutputMinAndMaxForQuantizedAdd()
H A Drequantize.cc44 const Tensor& input_min = ctx->input(1); in Compute() local
H A Dquantize_down_and_shrink_range.cc43 const Tensor& input_min = ctx->input(1); in Compute() local
H A Dquantized_bias_add_op.cc62 const float input_min = min_input.flat<float>()(0); in Compute() local
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Dfake_quantize_ops.cc103 float input_min, input_max, scale; in FakeQuantWithMinMaxArgsGradOp() local
156 xla::XlaOp input_min = ctx->Input(1); in Compile() local
200 xla::XlaOp input_min = ctx->Input(2); in Compile() local
261 xla::XlaOp input_min = ctx->Input(1); in Compile() local
315 xla::XlaOp input_min = ctx->Input(2); in Compile() local
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/
H A Dquantize_training.cc52 float input_min; member
78 bool* range_given, float* input_min, float* input_max) { in FindType()
502 std::vector<Node*>* added_variables, Node** input_min, in MakeInputMinMax()
538 Node* input_min; in MakeQuantizeOp() local
632 float input_min = 0; in DoQuantizeTraining() local
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dtable_test.cc68 inline float GetLUTTolerance(float input_min, float input_max, float output_min, in GetLUTTolerance()
86 float input_min = -0.5f; in TableWithExpLUTToInt8Test() local
133 float input_min = -0.5f; in TableWithExpLUTToInt16Test() local
H A Dactivations_test.cc431 void TestQuantizedHardSwish(TensorType tensor_type, int size, float input_min, in TestQuantizedHardSwish()
463 void TestQuantizedHardSwishBias(TensorType tensor_type, float input_min, in TestQuantizedHardSwishBias()
528 float input_min = input_minmax.first; in TEST() local
H A Dreduce_test.cc901 const float input_min = (tensor_type == TensorType_INT16) ? -24.0 : 0.0; in ConstIntProdOpTestNotKeepDimsLarge() local
928 const float input_min = (tensor_type == TensorType_INT16) ? -12.0 : 0.0; in ConstIntProdOpTestDisContigReduction() local
954 const float input_min = (tensor_type == TensorType_INT16) ? -12.0 : 0.0; in ConstIntProdOpTestContigReduction() local
1115 const float input_min = (tensor_type == TensorType_INT16) ? -24.0 : 0.0; in DynamicIntProdOpTestKeepDims() local
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/
H A Dfake_quant_ops_test.py78 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
176 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
277 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
382 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/tests/
H A Dactivations_test.cc195 void TestQuantizedHardSwish(int size, float input_min, float input_max, in TestQuantizedHardSwish()
237 float input_min = input_minmax.first; in HardSwishTestImpl() local
259 float input_min = -11.654928f; in HardSwishBiasTestImpl() local
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/mkl/
H A Dmkl_quantized_pooling_ops_test.cc50 const float input_min = 0.0f; in TEST_F() local
107 const float input_min = 0.0f; in TEST_F() local
H A Dmkl_concat_op.cc67 const float input_min = (*input_min_and_max)[input_index].first; in Copy() local
111 const float input_min = input_mins[i].flat<float>()(0); in CalculateInputAndOutputRange() local
571 float input_min = input_mins[0].flat<float>()(0); in Compute() local
H A Dmkl_requantization_range_per_channel_op.cc48 const Tensor& input_min = ctx->input(kInputMinIndex); in Compute() local
/aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/
H A DQLSTMLayerNormalizationFixture.h68 constexpr int16_t input_min = -1000; in fill() local
/aosp_15_r20/external/tensorflow/tensorflow/core/grappler/optimizers/
H A Dpin_to_host_optimizer_test.cc91 Output input_min = ops::Const(s.WithOpName("input_min"), 0.0f); in TEST_F() local

12