/aosp_15_r20/external/pytorch/test/quantization/core/ |
H A D | test_utils.py | 138 float_tensor = torch.tensor([fp_min, fp_max]) 148 observer(float_tensor) 152 quantized_tensor = _quantize_weight(float_tensor, observer) 157 float_tensor *= 1.2 159 quantized_tensor = _quantize_weight(float_tensor, observer) 169 float_tensor = torch.tensor([[fp_min, fp_max]]) 180 observer(float_tensor) 184 quantized_tensor = _quantize_weight(float_tensor, observer) 189 float_tensor *= 1.2 191 quantized_tensor = _quantize_weight(float_tensor, observer)
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/tools/optimize/ |
H A D | quantize_weights_test.cc | 168 const auto float_tensor = float_graph->tensors()->Get(i); in TEST_F() local 170 EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); in TEST_F() 171 EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); in TEST_F() 173 GetAsVector(float_tensor->shape())); in TEST_F() 174 EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str()); in TEST_F() 175 EXPECT_EQ(quant_tensor->type(), float_tensor->type()); in TEST_F() 207 const auto float_tensor = float_graph->tensors()->Get(i); in TEST_F() local 208 EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); in TEST_F() 209 EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); in TEST_F() 211 GetAsVector(float_tensor->shape())); in TEST_F() [all …]
|
H A D | modify_model_interface.cc | 207 TensorT* float_tensor = subgraph->tensors[tot.input_index].get(); in SetInputTypeToUINT8() local 208 float_tensor->type = TensorType_UINT8; in SetInputTypeToUINT8() 209 if (float_tensor->quantization == nullptr) { in SetInputTypeToUINT8() 210 float_tensor->quantization = std::make_unique<QuantizationParametersT>(); in SetInputTypeToUINT8() 212 float_tensor->quantization->scale.push_back(quant_tensor_scale); in SetInputTypeToUINT8() 213 float_tensor->quantization->zero_point.push_back(quant_tensor_zp + 128); in SetInputTypeToUINT8() 234 TensorT* float_tensor = subgraph->tensors[tot.output_index].get(); in SetOutputTypeToUINT8() local 235 float_tensor->type = TensorType_UINT8; in SetOutputTypeToUINT8() 236 if (float_tensor->quantization == nullptr) { in SetOutputTypeToUINT8() 237 float_tensor->quantization = std::make_unique<QuantizationParametersT>(); in SetOutputTypeToUINT8() [all …]
|
H A D | quantize_model_test.cc | 174 const auto float_tensor = float_graph->tensors()->Get(i); in TEST_P() local 175 EXPECT_EQ(quant_tensor->buffer, float_tensor->buffer()); in TEST_P() 176 EXPECT_EQ(quant_tensor->is_variable, float_tensor->is_variable()); in TEST_P() 177 EXPECT_EQ(quant_tensor->shape, GetAsVector(float_tensor->shape())); in TEST_P() 178 EXPECT_EQ(quant_tensor->name, float_tensor->name()->str()); in TEST_P() 179 EXPECT_EQ(quant_tensor->type, float_tensor->type()); in TEST_P() 197 const auto float_tensor = float_graph->tensors()->Get(i); in TEST_P() local 198 EXPECT_EQ(quant_tensor->buffer, float_tensor->buffer()); in TEST_P() 199 EXPECT_EQ(quant_tensor->is_variable, float_tensor->is_variable()); in TEST_P() 200 EXPECT_EQ(quant_tensor->shape, GetAsVector(float_tensor->shape())); in TEST_P() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/ |
H A D | quantize_weights_test.cc | 171 const Tensor* float_tensor = possible_tensors->Get(i); in FindMatchingExpectedTensor() local 173 if (ExpectEqualTensor(quantized_tensor, float_tensor)) { in FindMatchingExpectedTensor() 179 return float_tensor; in FindMatchingExpectedTensor() 185 float_model->buffers[float_tensor->buffer()].get()->data; in FindMatchingExpectedTensor() 187 (quantized_tensor->type() == float_tensor->type())) { in FindMatchingExpectedTensor() 189 return float_tensor; in FindMatchingExpectedTensor() 239 const auto float_tensor = FindMatchingExpectedTensor( in TEST_F() local 244 EXPECT_NE(float_tensor, nullptr); in TEST_F() 277 const auto float_tensor = FindMatchingExpectedTensor( in TEST_F() local 282 EXPECT_NE(float_tensor, nullptr); in TEST_F() [all …]
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | tensor_attributes.rst | 71 >>> float_tensor = torch.ones(1, dtype=torch.float) 97 >>> (float_tensor + double_tensor).dtype 105 >>> torch.add(long_tensor, float_tensor).dtype 116 >>> float_tensor *= float_tensor 117 >>> float_tensor *= int_tensor 118 >>> float_tensor *= uint_tensor 119 >>> float_tensor *= bool_tensor 120 >>> float_tensor *= double_tensor 126 >>> int_tensor *= float_tensor 129 >>> float_tensor *= complex_float_tensor
|
/aosp_15_r20/external/executorch/backends/arm/_passes/ |
H A D | scalars_to_attribute_pass.py | 58 float_tensor = torch.tensor( 61 graph_module.register_buffer(tensor_constant_name, float_tensor) 69 float_tensor, static_shapes=True
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/python/ |
H A D | util.py | 681 float_tensor, quant_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]] 683 float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type) 691 float_tensor.name, get_tf_type_name(float_type))) 790 quant_tensor, float_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]] 791 float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type) 799 float_tensor.name, get_tf_type_name(float_type)))
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | DistributionTemplates.h | 208 auto float_tensor = at::view_as_real(self); in normal_impl_() local 211 normal_kernel<RNG>()(float_tensor, mean, std/(std::sqrt(2)), gen); in normal_impl_() 289 auto float_tensor = at::view_as_real(self); in uniform_impl_() local 290 uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator); in uniform_impl_()
|
/aosp_15_r20/external/federated-compute/fcp/client/engine/ |
H A D | example_query_plan_engine_test.cc | 383 tf::Tensor float_tensor = tensors.value()["float_tensor"]; in TEST_F() local 384 ASSERT_EQ(float_tensor.shape(), tf::TensorShape({3})); in TEST_F() 385 ASSERT_EQ(float_tensor.dtype(), tf::DT_FLOAT); in TEST_F() 386 auto float_data = static_cast<float*>(float_tensor.data()); in TEST_F()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/eager/ |
H A D | custom_device_test.cc | 167 Tensor float_tensor(DT_FLOAT, {}); in TEST() local 169 TensorHandle::CreateLocalHandle(std::move(float_tensor), physical_device, in TEST()
|
H A D | context_test.cc | 269 Tensor float_tensor = test::AsScalar<float>(3.0); in TEST_F() local 272 float_tensor, context()->HostCPUName().c_str())); in TEST_F()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_multiprocessing.py | 188 float_tensor = torch.ones(2, 2).float().cuda() 191 queue.put(float_tensor) 930 float_tensor = queue.get() 932 self.assertEqual(float_tensor, all_ones) 934 del float_tensor, byte_tensor
|
H A D | test_legacy_vmap.py | 1723 float_tensor = torch.tensor([1.0, 2.0, 3.0]) 1732 self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
|
H A D | test_torch.py | 8519 float_tensor = torch.FloatTensor([1.0, tiny_float]) 8522 self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) 8523 self.assertEqual(float_tensor[1], tiny_float, atol=tiny_float / 16, rtol=0) 8529 self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) 8530 self.assertEqual(float_tensor[1], 0.0, atol=0.0, rtol=0) # tiny_float to zero
|
H A D | test_jit.py | 7191 float_tensor = torch.randn(5, 5, device=device) 7196 tensors = [float_tensor, double_tensor, long_tensor]
|
/aosp_15_r20/external/pytorch/docs/cpp/source/notes/ |
H A D | tensor_creation.rst | 315 torch::Tensor float_tensor = source_tensor.to(torch::kFloat32); 319 The result of the conversion, ``float_tensor``, is a new tensor pointing to 326 torch::Tensor gpu_tensor = float_tensor.to(torch::kCUDA); 336 torch::Tensor gpu_two_tensor = float_tensor.to(torch::Device(torch::kCUDA, 1));
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/quantizer/ |
H A D | xnnpack_quantizer_utils.py | 1072 float_tensor = torch.tensor(float(args[i])) 1073 model.register_buffer(tensor_constant_name, float_tensor) 1080 float_tensor, static_shapes=True
|
/aosp_15_r20/external/tensorflow/tensorflow/tools/graph_transforms/ |
H A D | quantize_nodes_test.cc | 129 const Tensor& float_tensor = float_input.second; in TestGraphWithInputRange() local 130 Tensor quantized_tensor(DT_QUINT8, float_tensor.shape()); in TestGraphWithInputRange() 131 FloatTensorToQuantizedInPlace<quint8>(float_tensor, range_min, range_max, in TestGraphWithInputRange()
|
/aosp_15_r20/external/executorch/exir/tests/ |
H A D | test_passes.py | 132 float_tensor = torch.tensor([[1.0, 2.0, 3.0]]) 136 (int_tensor, float_tensor), 187 float_tensor_vert = float_tensor.T
|
/aosp_15_r20/external/tensorflow/tensorflow/python/feature_column/ |
H A D | feature_column_v2_test.py | 882 float_tensor = sparse_tensor.SparseTensor( 887 'a_float': float_tensor
|
H A D | feature_column_test.py | 745 float_tensor = sparse_tensor.SparseTensor( 752 'a_float': float_tensor
|
/aosp_15_r20/external/pytorch/test/functorch/ |
H A D | test_vmap.py | 2397 float_tensor = torch.tensor([1.0, 2.0, 3.0]) 2406 self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
|