Home
last modified time | relevance | path

Searched full:as_tensor (Results 1 – 25 of 98) sorted by relevance

1234

/aosp_15_r20/external/pytorch/test/
H A Dtest_numba_integration.py247 """torch.as_tensor() and torch.tensor() supports the __cuda_array_interface__ protocol.
249 If an object exposes the __cuda_array_interface__, .as_tensor() and .tensor()
278 # Zero-copy when using `torch.as_tensor()`
281 torch_ary = torch.as_tensor(numba_ary, device="cuda")
299 torch_ary = torch.as_tensor(numba_ary, device="cpu")
329 """torch.as_tensor(numba_ary) should have correct inferred (contiguous) strides"""
346 torch_ary = torch.as_tensor(numba_ary, device="cuda")
356 …"""torch.as_tensor(obj) tensor grabs a reference to obj so that the lifetime of obj exceeds the te…
358 torch_ary = torch.as_tensor(numba_ary, device="cuda")
375 """torch.as_tensor() tensor device must match active numba context."""
[all …]
H A Dtest_tensor_creation_ops.py2161 # test torch.as_tensor()
2162 …check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not …
2163 … check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
2252 self.assertEqual(torch.tensor(x), torch.as_tensor(x))
2253 … self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
2259 torch.as_tensor(z)
2266 torch.as_tensor(z)
2271 torch.as_tensor(z)
2275 self.assertIs(y, torch.as_tensor(y))
2276 self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
[all …]
/aosp_15_r20/external/pytorch/torch/_export/serde/
H A Dserialize.py447 graph_input = Argument.create(as_tensor=TensorArgument(name=node.name))
749 return Argument.create(as_tensor=TensorArgument(name=dedup_name))
751 return Argument.create(as_tensor=TensorArgument(name=arg.name))
759 return Argument.create(as_tensor=TensorArgument(name=arg_name))
862 as_tensor=TensorArgument(name=a.name)
884 as_tensor=TensorArgument(name=a.get_name())
1085 return Argument.create(as_tensor=TensorArgument(name=x.name))
1269 as_tensor=self.serialize_tensor_output(name, meta_val)
1596 if output.type == "as_tensor":
1597 return self.serialized_name_to_node[output.as_tensor.name]
[all …]
H A Dschema.py134 # "as_tensor" field, and None values serialized to the "as_none" field.
137 as_tensor: TensorArgument
157 as_tensor: TensorArgument
H A Dschema.yaml8 as_tensor:
288 as_tensor:
/aosp_15_r20/external/executorch/exir/serde/
H A Dexport_serialize.py449 graph_input = Argument.create(as_tensor=TensorArgument(name=node.name))
721 return Argument.create(as_tensor=TensorArgument(name=arg.name))
729 return Argument.create(as_tensor=TensorArgument(name=arg_name))
832 as_tensor=TensorArgument(name=a.name)
854 as_tensor=TensorArgument(name=a.get_name())
1055 return Argument.create(as_tensor=TensorArgument(name=x.name))
1274 as_tensor=self.serialize_tensor_output(name, meta_val)
1569 if output.type == "as_tensor":
1570 return self.serialized_name_to_node[output.as_tensor.name]
1603 if input_.type in ("as_tensor", "as_sym_int", "as_custom_obj"):
[all …]
H A Dserialize.py180 as_tensor=self.serialize_tensor_output(node.name, meta_val)
215 return [schema.Argument.create(as_tensor=arg) for arg in arg_list]
396 and serialized_node.outputs[0].type == "as_tensor"
511 and serialized_node.outputs[0].type == "as_tensor"
513 return self.sync_fx_node(serialized_node.outputs[0].as_tensor.name, fx_node)
H A Dschema.py142 # "as_tensor" field, and None values serialized to the "as_none" field.
145 as_tensor: TensorArgument
165 as_tensor: TensorArgument
/aosp_15_r20/external/pytorch/test/edge/
H A DEvalue.h126 at::Tensor as_tensor;
255 new (&payload.as_tensor) at::Tensor(t);
264 return std::move(payload.as_tensor);
269 return payload.as_tensor;
274 return payload.as_tensor;
403 new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor));
404 rhs.payload.as_tensor.~Tensor();
419 payload.as_tensor.~Tensor();
433 new (&payload.as_tensor) at::Tensor(p.as_tensor);
/aosp_15_r20/external/executorch/runtime/core/
H A Devalue.h111 executorch::aten::Tensor as_tensor;
239 new (&payload.as_tensor) executorch::aten::Tensor(t);
268 auto res = std::move(payload.as_tensor);
275 return payload.as_tensor;
280 return payload.as_tensor;
433 new (&payload.as_tensor)
434 executorch::aten::Tensor(std::move(rhs.payload.as_tensor));
435 rhs.payload.as_tensor.~Tensor();
450 payload.as_tensor.~Tensor();
464 new (&payload.as_tensor) executorch::aten::Tensor(p.as_tensor);
/aosp_15_r20/external/pytorch/aten/src/ATen/core/
H A Divalue.h382 return payload.as_tensor.use_count(); in use_count()
398 std::swap(payload.as_tensor, rhs.payload.as_tensor); in swap()
400 at::Tensor t = std::move(payload.as_tensor); in swap()
409 // payload.as_tensor.~Tensor(); in swap()
411 new (&rhs.payload.as_tensor) at::Tensor(std::move(t)); in swap()
426 new (&payload.as_tensor) at::Tensor(std::move(t)); in IValue()
442 return payload.as_tensor.unsafeGetTensorImpl(); in unsafeToTensorImpl()
1056 return payload.as_tensor.defined(); in isPtrType()
1066 return payload.as_tensor.unsafeGetTensorImpl(); in internalToPointer()
1189 // the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable in destroy()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/strings_ops/
H A Dregex_replace_op_test.py94 def as_tensor(s): function
101 (as_string, as_tensor),
102 (as_tensor, as_string),
103 (as_tensor, as_tensor))
/aosp_15_r20/external/tensorflow/tensorflow/python/training/
H A Dcheckpoint_ops_test.py156 self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
189 self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
226 self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
261 self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
295 remapped_embeddings.as_tensor())
341 remapped_embeddings.as_tensor())
379 remapped_embeddings.as_tensor())
/aosp_15_r20/external/pytorch/torch/optim/
H A Dasgd.py103 torch.as_tensor(
265 new_eta = torch.as_tensor(lr / ((1 + lambd * lr * step) ** alpha))
267 new_mu = torch.as_tensor(1 / max(1, step - t0))
399 torch.as_tensor(lr / ((1 + lambd * lr * step) ** alpha), device=device)
403 torch.as_tensor(1 / max(1, _get_value(step) - t0), device=device)
/aosp_15_r20/external/pytorch/torch/_numpy/
H A D_ufuncs.py95 return torch.as_tensor(x, dtype=dtype)
167 x1 = torch.as_tensor(x1, dtype=dtype)
170 x1 = torch.as_tensor(x1)
173 x2 = torch.as_tensor(x2)
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/containers/
H A DValue.h61 api::vTensor as_tensor; member
117 TypeTag::TENSOR, api::vTensor, as_tensor, vTensor); in Value()
161 payload.as_tensor.~vTensor(); in ~Value()
259 as_tensor);
/aosp_15_r20/external/pytorch/
H A Dpt_ops.bzl577 "aten::as_tensor.float",
579 "aten::as_tensor.int",
581 "aten::as_tensor.bool",
585 "aten::as_tensor",
586 "aten::as_tensor.list",
/aosp_15_r20/external/pytorch/test/typing/pass/
H A Dcreation_ops.py34 # torch.as_tensor
36 torch.as_tensor(a)
37 torch.as_tensor(a, device=torch.device("cuda"))
/aosp_15_r20/external/pytorch/torch/_dynamo/
H A Dcodegen.py181 # as_tensor here, because we memoize as_tensor calls on
183 graph_outputs_key = self.add_graph_output(value.as_tensor(self.tx))
472 self.create_load_attr("as_tensor"),
/aosp_15_r20/external/pytorch/test/typing/reveal/
H A Dtensor_constructors.py40 # torch.as_tensor
43 reveal_type(torch.as_tensor(a)) # E: {Tensor}
44 reveal_type(torch.as_tensor(a, device=torch.device("cuda"))) # E: {Tensor}
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/
H A Dregister_special_ops.cpp187 // torch.tensor has a fourth requires_grad arg but torch.as_tensor not, so in createTensorFromList()
279 "aten::as_tensor." #operator_type "(" #operator_type \
354 … "aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)"),
372 … "aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor"),
/aosp_15_r20/external/pytorch/test/optim/
H A Dtest_lrscheduler.py2425 lr = torch.as_tensor(0.1)
2441 lr = torch.as_tensor(0.1)
2442 max_lr = torch.as_tensor(0.2)
2443 base_momentum = torch.as_tensor(0.8)
2444 max_momentum = torch.as_tensor(0.9)
2473 lr = torch.as_tensor(0.1)
2474 base_momentum = torch.as_tensor(0.85)
2475 max_momentum = torch.as_tensor(0.95)
2504 lr = torch.as_tensor(0.1)
2505 swa_lr = torch.as_tensor(0.05)
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A Dpython_torch_functions_manual.cpp116 // implemented on python object to allow torch.as_tensor to be constructed with
124 "as_tensor(PyObject* data, *, ScalarType dtype=None, Device? device=None)", in THPVariable_as_tensor()
133 jit::tracer::warn("torch.as_tensor", jit::tracer::WARN_CONSTRUCTOR); in THPVariable_as_tensor()
134 return THPVariable_Wrap(torch::utils::as_tensor( in THPVariable_as_tensor()
372 {"as_tensor",
/aosp_15_r20/external/pytorch/torch/utils/data/_utils/
H A Dcollate.py70 return torch.as_tensor(data)
285 return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)
293 return torch.as_tensor(batch)
/aosp_15_r20/external/pytorch/torch/csrc/inductor/aoti_torch/
H A Doss_proxy_executor.cpp29 TORCH_CHECK(serialized_arg_type == "as_tensor"); in prefill_stack_with_static_arguments()
267 serialized_output_type == "as_tensor", in get_output_info_from_serialized()
412 if (item_type == "as_tensor") { in call_function()

1234