/aosp_15_r20/external/tensorflow/tensorflow/python/framework/ |
H A D | python_api_parameter_converter_test.py | 63 input_specs, argument 68 api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names, 204 input_specs=dict(x="int32", y="float32"), 212 input_specs=dict(x="T"), 220 input_specs=dict(x="T"), 228 input_specs=dict(x="T", y="T"), 236 input_specs=dict(x="T", y="T"), 246 input_specs=dict(x="T"), 254 input_specs=dict(x="N * T", y="T"), 263 input_specs=dict(x="N * T", y="T"), [all …]
|
H A D | python_api_info_test.py | 49 input_specs, argument 54 api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names, 243 def testInitializeFromParamSpecs(self, api_name, param_names, input_specs, argument 246 input_specs, attr_specs)
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/_ops/ |
H A D | _common_rules.py | 23 input_specs: Tuple[DTensorSpec, ...], 28 for input_dim, input_spec in zip(input_dims, input_specs): 75 input_specs = op_schema.args_spec 104 for input_dim, input_spec in zip(input_dims, input_specs): 146 op_schema, input_dims, input_specs, dim_to_sharding, [] 154 if value != len(input_specs): 167 for input_dim, input_spec in zip(input_dims, input_specs): 189 op_schema, input_dims, input_specs, dim_to_sharding, pending_sums 213 assert input_specs[0].tensor_meta is not None 216 input_specs[0].tensor_meta.stride, [all …]
|
H A D | _matrix_ops.py | 50 input_specs=(input_strategy.output_spec,), 69 assert strtg.input_specs is not None 70 self_spec = strtg.input_specs[0] 71 mat2_spec = strtg.input_specs[1] 108 assert strtg.input_specs is not None 109 mat1_spec = strtg.input_specs[0] 110 mat2_spec = strtg.input_specs[1] 125 strtg.input_specs = (self_spec, mat1_spec, mat2_spec) 326 input_specs=(
|
H A D | utils.py | 241 input_specs: List[DTensorSpec] = [ 246 assert len(input_specs) == len(input_args_strategy) 249 if inplace_op and self_spec.placements != input_specs[0].placements: 257 for inp, s in zip(input_args_strategy, input_specs) 264 for input_strategy, input_spec in zip(input_args_strategy, input_specs) 275 input_specs=input_specs,
|
H A D | _tensor_ops.py | 163 PlacementStrategy(output_specs=output_spec, input_specs=(arg_spec,)) 199 input_specs=(input_spec,), 216 input_specs=(input_spec,), 236 output_specs=arg_spec, input_specs=(arg_spec, replica_spec) 522 input_specs = tuple( 532 input_specs=input_specs, 559 input_specs = tuple( 566 input_specs=input_specs,
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/python/ |
H A D | tflite_keras_util.py | 76 input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) 77 if input_specs is None: 82 input_specs = input_specs[0][0] 84 input_specs = model._get_save_spec( # pylint: disable=protected-access 86 if input_specs is None: 88 input_specs = _enforce_names_consistency(input_specs) 90 if isinstance(input_specs, 91 collections_abc.Sequence) and len(input_specs) == 1: 94 return input_specs 96 return [input_specs]
|
/aosp_15_r20/external/pytorch/torch/export/ |
H A D | graph_signature.py | 205 input_specs: List[InputSpec] 213 for s in self.input_specs 223 for s in self.input_specs 232 for s in self.input_specs 243 for s in self.input_specs 252 for s in self.input_specs 261 for s in self.input_specs: 297 for s in self.input_specs 309 for s in self.input_specs 342 for s in self.input_specs [all …]
|
H A D | exported_program.py | 484 for i, spec in enumerate(ep.graph_signature.input_specs) 488 input_specs = [ 495 for i, spec in enumerate(ep.graph_signature.input_specs) 509 assert len(graph_signature.user_inputs) == len(ep.graph_signature.input_specs) 512 for i, spec in enumerate(ep.graph_signature.input_specs) 537 input_specs=input_specs, output_specs=output_specs 859 for input_ in self.graph_signature.input_specs: 924 for spec in self.graph_signature.input_specs 978 for i, s in enumerate(self._graph_signature.input_specs) 981 len(self._graph_signature.input_specs), [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tfrt/benchmarks/ |
H A D | benchmark_mlir_function.cc | 51 llvm::ArrayRef<InputTensorSpec> input_specs) { in GetInputTensors() argument 54 for (const InputTensorSpec& spec : input_specs) { in GetInputTensors() 82 llvm::ArrayRef<InputTensorSpec> input_specs, in RunJitRtBenchmark() argument 105 llvm::SmallVector<Tensor> input_tensors = GetInputTensors(input_specs); in RunJitRtBenchmark() 171 ArrayRef<InputTensorSpec> input_specs) { in RunTfrtBenchmark() argument 179 llvm::SmallVector<Tensor> input_tensors = GetInputTensors(input_specs); in RunTfrtBenchmark() 195 llvm::ArrayRef<InputTensorSpec> input_specs) { in RunEigenBenchmark() argument 205 llvm::SmallVector<Tensor> input_tensors = GetInputTensors(input_specs); in RunEigenBenchmark()
|
H A D | benchmark_mlir_function.h | 38 llvm::ArrayRef<InputTensorSpec> input_specs, 45 llvm::ArrayRef<InputTensorSpec> input_specs); 53 llvm::ArrayRef<InputTensorSpec> input_specs);
|
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/saving/ |
H A D | saving_utils.py | 75 …input_specs = model._get_save_spec(dynamic_batch=not keep_original_batch_size) # pylint: disable=… 76 if input_specs is None: 78 input_specs = _enforce_names_consistency(input_specs) 80 if isinstance(input_specs, 81 collections.abc.Sequence) and len(input_specs) == 1: 84 return input_specs 86 return [input_specs]
|
/aosp_15_r20/external/executorch/exir/ |
H A D | lowered_backend_module.py | 298 input_specs = [ 315 input_specs=input_specs, output_specs=output_specs 478 input_specs = [] 488 {input_spec.arg.name: input_spec for input_spec in old_signature.input_specs} 502 input_specs.append( 514 input_specs.append(orig_input_spec) 517 input_specs.append(orig_input_spec) 551 input_specs.append( 644 input_specs=input_specs, output_specs=output_specs 854 original_program._graph_signature.input_specs = [ [all …]
|
/aosp_15_r20/external/pytorch/test/export/ |
H A D | test_lift_unlift.py | 109 input_specs = [] 112 input_specs.append( 124 return input_specs 134 input_specs=self.create_input_specs(), 232 tensor_constant_input_spec = graph_signature.input_specs[2] 239 obj_constant_input_spec = graph_signature.input_specs[3] 287 constant_input_spec = graph_signature.input_specs[0] 323 input_specs = builder.create_input_specs() 331 graph_signature = ExportGraphSignature(input_specs, output_specs) 350 self.assertEqual(len(graph_signature.input_specs), 3) [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/distribute/experimental/rpc/ |
H A D | rpc_ops.py | 298 input_specs=get_input_specs_from_function(concrete_fn), 306 input_specs=get_input_specs_from_function(func), 359 input_specs = nested_structure_coder.decode_proto(m.input_specs) 364 self._add_method(m.method, output_specs, input_specs, self._client_handle, 367 def _add_method(self, method_name, output_specs, input_specs, client_handle, argument 374 if input_specs: 375 nest.assert_same_structure(args, input_specs)
|
/aosp_15_r20/external/pytorch/torch/_export/passes/ |
H A D | lift_constants_pass.py | 137 inputs = graph_signature.input_specs 139 input_specs.kind == InputKind.CUSTOM_OBJ for input_specs in inputs 142 input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs 266 graph_signature.input_specs.insert(
|
/aosp_15_r20/external/tensorflow/tensorflow/python/compiler/tensorrt/test/ |
H A D | tf_trt_integration_test_base.py | 259 input_specs=[ 366 return [spec.name + ":0" for spec in params.input_specs] 414 params.input_specs[i].name: current_input_data[i] 415 for i in range(len(params.input_specs)) 446 assert len(params.input_specs) == len(data), ( 534 self._GetParamsCached().input_specs) 571 self._GetParamsCached().input_specs) 969 for spec in params.input_specs: 1000 params.graph_fn, input_signature=params.input_specs) 1022 input_specs = self._GetParamsCached().input_specs [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/ |
H A D | device_target.cc | 110 llvm::SmallVector<Type, 4> input_specs, out_specs; in DecomposeMultiplyAccumulateScale() local 112 input_specs.push_back(spec.cast<TypeAttr>().getValue()); in DecomposeMultiplyAccumulateScale() 118 auto in_spec = input_specs[0].dyn_cast<UniformQuantizedType>(); in DecomposeMultiplyAccumulateScale() 120 auto w_spec = input_specs[1].dyn_cast<UniformQuantizedType>(); in DecomposeMultiplyAccumulateScale() 121 auto b_spec = input_specs[2].dyn_cast<UniformQuantizedType>(); in DecomposeMultiplyAccumulateScale()
|
H A D | quantization_context.cc | 128 llvm::SmallVector<Attribute, 4> input_specs; in Finalize() local 134 input_specs.push_back(original_input_specs[i]); in Finalize() 136 input_specs.push_back(TypeAttr::get(requantize.params)); in Finalize() 138 input_specs.push_back(TypeAttr::get(state.params)); in Finalize() 141 op->setAttr("input_specs", ArrayAttr::get(context, input_specs)); in Finalize()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tfrt/saved_model/ |
H A D | saved_model.cc | 84 input_specs; in MapFunctionSignaturesFromTFSavedModelMLIR() local 97 input_specs.push_back(std::move(statusor_spec).ValueOrDie()); in MapFunctionSignaturesFromTFSavedModelMLIR() 133 sig_info.input_specs = input_specs; in MapFunctionSignaturesFromTFSavedModelMLIR()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/tfrt/saved_model/ |
H A D | saved_model.cc | 167 DCHECK(signature.input_specs.empty()); in GetFunctionSignaturesFromTFSavedModelMLIR() 168 signature.input_specs.reserve(sig_info.input_specs.size()); in GetFunctionSignaturesFromTFSavedModelMLIR() 169 for (auto& spec : sig_info.input_specs) { in GetFunctionSignaturesFromTFSavedModelMLIR() 170 signature.input_specs.push_back(TensorSpec(spec.first, spec.second)); in GetFunctionSignaturesFromTFSavedModelMLIR() 399 signature.input_specs.reserve(signature_def.inputs().size()); in GetSignaturesFromSignatureDef() 404 signature.input_specs.push_back( in GetSignaturesFromSignatureDef() 649 TF_RET_CHECK(signature.input_specs.size() == inputs.size()) in IsInputSpecsCorrect() 651 << " input size is wrong, expected: " << signature.input_specs.size() in IsInputSpecsCorrect() 654 const auto& expected_input_spec = signature.input_specs[i]; in IsInputSpecsCorrect()
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/ |
H A D | _op_schema.py | 83 input_specs: Optional[Sequence[DTensorSpec]] = None variable in PlacementStrategy 105 assert self.input_specs is not None, "input_specs of PlacementStrategy is None!" 106 assert len(self.input_specs) > index, ( 110 return self.input_specs[index] 113 if self.input_specs is not None:
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/experimental/ |
H A D | _tp_transform.py | 212 input_specs=_get_input_node_specs(node, placement_strategies), 237 input_specs=output_sharding.redistribute_schema.args_spec 269 input_specs: Optional[Sequence[DTensorSpec]] = None, 275 input_specs=input_specs, 366 expected_input_specs = node_sharding.input_specs
|
/aosp_15_r20/external/executorch/exir/passes/ |
H A D | insert_write_back_for_buffers_pass.py | 78 for in_spec in ep.graph_signature.input_specs: 136 input_specs=ep.graph_signature.input_specs,
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/mobile/nnc/ |
H A D | context.h | 147 const std::vector<InputSpec>& input_specs() const { in input_specs() function 151 void set_input_specs(const std::vector<InputSpec>& input_specs) { in set_input_specs() argument 152 input_specs_ = input_specs; in set_input_specs()
|