/aosp_15_r20/external/pytorch/test/dynamo/ |
H A D | test_debug_utils.py | 41 _to_copy = torch.ops.aten._to_copy.default(x_1, dtype = torch.float16); x_1 = None 44 return (convert_element_type, _to_copy, full, empty) 56 _to_copy = torch.ops.aten._to_copy.default(x_1, dtype = torch.float64); x_1 = None 59 return (convert_element_type, _to_copy, full, empty)
|
/aosp_15_r20/external/pytorch/torch/csrc/lazy/ts_backend/ |
H A D | ts_native_functions.cpp | 140 at::Tensor LazyNativeFunctions::_to_copy( in _to_copy() function in torch::lazy::LazyNativeFunctions 148 if (force_eager_fallback(at::aten::_to_copy)) { in _to_copy() 151 …"Fallback is currently impossible for _to_copy since the fallback helper itself reinvokes _to_copy… in _to_copy() 171 "Pinned memory used in lazy _to_copy, check if the behavior is as intended"); in _to_copy() 209 // implemented _to_copy in _to_copy() 231 // Case 4: lazy->lazy (special case: keep the _to_copy INSIDE the lazy in _to_copy() 234 // Note: captured _to_copy will be executed with real eager tensors, not in _to_copy()
|
/aosp_15_r20/external/executorch/exir/passes/ |
H A D | dim_order_ops_registry.py | 34 return _op_impl(torch.ops.aten._to_copy, *args, **kwargs) 39 return _op_impl(torch.ops.aten._to_copy.out, *args, **kwargs) 46 "aten._to_copy.default": exir_ops.edge.dim_order_ops._to_dim_order_copy.default, 53 "dim_order_ops._to_dim_order_copy.default": exir_ops.edge.aten._to_copy.default,
|
H A D | memory_format_ops_pass.py | 58 f"_to_copy = rank: {ndim}, memory_format: {mem_format}." 106 f"_to_copy = rank: {ndim}, memory_format: {nkwargs['memory_format']}."
|
H A D | remove_noop_pass.py | 99 Removes _to_copy that pass through arguments. 107 if node.target not in (torch.ops.aten._to_copy.default,):
|
/aosp_15_r20/external/executorch/backends/xnnpack/test/passes/ |
H A D | test_channels_last_tagged_reshape.py | 145 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten._to_copy.default](args = (… 149 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten._to_copy.default](args = (… 157 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten._to_copy.default](args = (… 159 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten._to_copy.default](args = (…
|
/aosp_15_r20/external/executorch/backends/apple/mps/operators/ |
H A D | op_clone.py | 18 target = ["aten.clone.default", "aten._to_copy.default"] 28 if node.target == exir_ops.edge.aten._to_copy.default: 32 "aten._to_copy not supported with more than one argument currently"
|
/aosp_15_r20/external/pytorch/torch/csrc/lazy/ts_backend/ops/ |
H A D | to_copy.h | 8 // This IR was copied from code-generated output, but the entire _to_copy 10 // capture IR for certain permutaions of _to_copy (e.g. dtype), and for the 16 return OpKind(at::aten::_to_copy); in ClassOpKind()
|
/aosp_15_r20/external/pytorch/torch/_subclasses/ |
H A D | functional_tensor.py | 31 # preserve the tensor conversion by forcing a non-semantic-breaking aten::_to_copy 375 # because it can get optimized away. Instead we always replace it with _to_copy(). 379 torch.ops.aten._to_copy.default, types, args, kwargs 387 torch.ops.aten._to_copy.default, types, args[:1], kwargs 539 # We don't allow any mutation on result of dropout or _to_copy 543 torch.ops.aten._to_copy.default,
|
/aosp_15_r20/external/executorch/extension/training/ |
H A D | README.md | 160 …%_to_copy : [num_users=2] = call_function[target=torch.ops.aten._to_copy.default](args = (%sum_1,)… 162 …users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %_to_copy), kwargs = {}) 168 …s=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%full_like, %_to_copy), kwargs = {})
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | TensorConversions.cpp | 27 #include <ATen/ops/_to_copy.h> 230 Tensor _to_copy( in _to_copy() function 273 true, // force copy since we are in _to_copy in _to_copy() 282 true, // force copy since we are in _to_copy in _to_copy() 310 true, // force copy since we are in _to_copy in _to_copy() 320 true, // force copy since we are in _to_copy in _to_copy() 330 true, // force copy since we are in _to_copy in _to_copy() 431 return at::_to_copy( in to_impl()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | FunctionalizeFallbackKernel.cpp | 16 #include <ATen/ops/_to_copy.h> 255 auto out = at::_to_copy(self_, dtype, layout, device, pin_memory, non_blocking, memory_format); in _to_copy_functionalize() 358 m.impl("_to_copy", TORCH_FN(_to_copy_functionalize)); in TORCH_LIBRARY_IMPL()
|
H A D | FunctionalTensorWrapper.cpp | 17 #include <ATen/ops/_to_copy.h> 233 // and we want _to_copy() to show up in the graph, not the composite .to() operator in replace_() 235 value_ = at::_to_copy(value_, c10::TensorOptions().dtype(dtype()).layout(layout())); in replace_()
|
/aosp_15_r20/external/pytorch/test/profiler/ |
H A D | test_profiler_tree.py | 296 aten::_to_copy 349 aten::_to_copy 408 aten::_to_copy 517 aten::_to_copy
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/ |
H A D | _shards_wrapper.py | 89 aten._to_copy.default: cls.handle_to_copy, 128 aten._to_copy.default(shard, *args[1:], **kwargs)
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_autocast.py | 218 func is torch.ops.aten._to_copy.default 327 func is torch.ops.aten._to_copy.default
|
H A D | test_functionalization.py | 1187 …_to_copy = torch.ops.aten._to_copy.default(ge, dtype = torch.float32, layout = torch.strided); ge… 1188 return _to_copy 1204 …_to_copy = torch.ops.aten._to_copy.default(ge, dtype = torch.float32, layout = torch.strided); ge… 1205 return _to_copy
|
/aosp_15_r20/external/executorch/backends/xnnpack/_passes/ |
H A D | channels_last_tagged_reshape_pass.py | 289 target=exir_ops.edge.aten._to_copy.default, 336 target=exir_ops.edge.aten._to_copy.default,
|
/aosp_15_r20/external/pytorch/test/expect/ |
H A D | HasDecompTest.test_aten_core_operators.expect | 23 aten::_to_copy 24 aten::_to_copy.out
|
/aosp_15_r20/external/pytorch/torch/_functorch/ |
H A D | compilers.py | 38 op="call_function", target=torch.ops.aten._to_copy 73 op="call_function", target=torch.ops.aten._to_copy
|
/aosp_15_r20/external/pytorch/torch/masked/maskedtensor/ |
H A D | _ops_refs.py | 351 @register_dispatch_func([torch.ops.aten._to_copy]) 352 def _to_copy(func, *args, **kwargs): function
|
/aosp_15_r20/external/pytorch/torch/profiler/ |
H A D | _pattern_matcher.py | 156 aten::fill_/aten::zero_ | aten::_to_copy 189 if event.name != "aten::_to_copy":
|
/aosp_15_r20/external/executorch/backends/qualcomm/_passes/ |
H A D | utils.py | 38 if attr_n.target == exir_ops.edge.aten._to_copy.default:
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | ToCopy.cpp | 48 VK_REGISTER_OP(aten._to_copy.default, to_copy);
|
/aosp_15_r20/external/executorch/backends/xnnpack/operators/ |
H A D | op_to_copy.py | 33 target = "aten._to_copy.default"
|