/aosp_15_r20/external/pytorch/aten/src/ATen/test/ |
H A D | cpu_rng_test.cpp | 211 auto expected = torch::empty_like(actual); in TEST_F() 225 auto expected = torch::empty_like(actual); in TEST_F() 239 auto expected = torch::empty_like(actual); in TEST_F() 253 auto expected = torch::empty_like(actual); in TEST_F() 266 auto expected = torch::empty_like(actual); in TEST_F() 279 auto expected = torch::empty_like(actual); in TEST_F() 292 auto expected = torch::empty_like(actual); in TEST_F() 308 auto expected = torch::empty_like(actual); in TEST_F() 325 auto expected = torch::empty_like(actual); in TEST_F() 342 auto expected = torch::empty_like(actual); in TEST_F() [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | FakeQuantPerTensorAffine.cpp | 83 auto Y = at::empty_like(self, self.options(), MemoryFormat::Preserve); in fake_quantize_per_tensor_affine_cachemask() 84 auto mask = at::empty_like(self, at::kBool, MemoryFormat::Preserve); in fake_quantize_per_tensor_affine_cachemask() 103 auto Y = at::empty_like(self, self.options(), MemoryFormat::Preserve); in _fake_quantize_per_tensor_affine_cachemask_tensor_qparams() 104 auto mask = at::empty_like(self, at::kBool, MemoryFormat::Preserve); in _fake_quantize_per_tensor_affine_cachemask_tensor_qparams() 207 auto dX = at::empty_like(X, X.options(), MemoryFormat::Preserve); in _fake_quantize_learnable_per_tensor_affine_backward() 208 auto dScale_vec = at::empty_like(X, X.options(), MemoryFormat::Preserve); in _fake_quantize_learnable_per_tensor_affine_backward() 209 auto dZeroPoint_vec = at::empty_like(X, X.options(), MemoryFormat::Preserve); in _fake_quantize_learnable_per_tensor_affine_backward()
|
H A D | FakeQuantPerChannelAffine.cpp | 79 auto Y = at::empty_like(self, self.options(), MemoryFormat::Preserve); in fake_quantize_per_channel_affine_cachemask() 80 auto mask = at::empty_like(self, at::kBool, MemoryFormat::Preserve); in fake_quantize_per_channel_affine_cachemask() 214 auto dX = at::empty_like(X, X.options(), MemoryFormat::Preserve); in _fake_quantize_learnable_per_channel_affine_backward() 215 auto dScale_vec = at::empty_like(X, X.options(), MemoryFormat::Preserve); in _fake_quantize_learnable_per_channel_affine_backward() 216 auto dZeroPoint_vec = at::empty_like(X, X.options(), MemoryFormat::Preserve); in _fake_quantize_learnable_per_channel_affine_backward()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | layer_norm.cpp | 15 #include <ATen/ops/empty_like.h> 97 Tensor Y = at::native::empty_like( in layer_norm_cpu() 140 dX = at::native::empty_like( in layer_norm_backward_cpu() 149 dgamma = M > 0 ? at::native::empty_like( in layer_norm_backward_cpu() 165 dbeta = M > 0 ? at::native::empty_like( in layer_norm_backward_cpu() 230 at::empty_like(input), in math_native_layer_norm() 231 at::empty_like(input, c10::TensorOptions().dtype(result_type)), in math_native_layer_norm() 232 at::empty_like(input, c10::TensorOptions().dtype(result_type)) in math_native_layer_norm()
|
H A D | Activation.cpp | 34 #include <ATen/ops/empty_like.h> 434 Tensor result = at::empty_like(self); in hardtanh() 637 auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in rrelu_with_noise_cpu() 673 …return at::rrelu_with_noise(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, up… in rrelu() 678 …return at::rrelu_with_noise_(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, u… in rrelu_() 724 auto result = at::empty_like(self); in _prelu_kernel() 758 auto result = at::empty_like(input, at::MemoryFormat::Contiguous); in log_sigmoid_forward_cpu() 759 auto buffer = at::empty_like(input, at::MemoryFormat::Contiguous); in log_sigmoid_forward_cpu() 768 …Tensor result_tmp = result.is_contiguous() ? result : at::empty_like(result, at::MemoryFormat::Con… in log_sigmoid_forward_out_cpu() 786 auto grad_input = at::empty_like(grad_output); in log_sigmoid_backward_cuda() [all …]
|
H A D | Dropout.cpp | 13 #include <ATen/ops/empty_like.h> 73 auto noise = feature_dropout ? make_feature_noise(input) : at::empty_like(input); in _dropout_impl() 107 return std::make_tuple(input, at::empty_like(input, input.options())); in native_dropout_cpu() 117 mask = at::empty_like(input, input.options().dtype(c10::CppTypeToScalarType<bool>::value)); in native_dropout_cpu()
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_triton_kernels.py | 294 out = torch.empty_like(x) 302 out = torch.empty_like(grad) 514 torch.empty_like(output), 802 output = torch.empty_like(x) 935 y = torch.empty_like(x) 994 out = torch.empty_like(left) 1018 out = torch.empty_like(right) 1043 out = torch.empty_like(left) 1167 out = torch.empty_like(x) 1193 out = torch.empty_like(x) [all …]
|
H A D | test_inplacing_pass.py | 140 out = torch.empty_like(x) 159 out = torch.empty_like(x) 315 out1 = torch.empty_like(x) 316 out2 = torch.empty_like(x) 346 out = torch.empty_like(x) 415 subtest(torch.empty_like, name="empty_like"),
|
/aosp_15_r20/external/pytorch/torch/ |
H A D | _meta_registrations.py | 193 return torch.empty_like(self, memory_format=torch.contiguous_format) 214 return torch.empty_like(self).contiguous() 559 return torch.empty_like(self, memory_format=torch.contiguous_format) 662 return torch.empty_like(self, dtype=result_dtype) 921 return torch.empty_like(self, memory_format=torch.legacy_contiguous_format) 1525 solution = torch.empty_like(self) 2015 return torch.empty_like(self, memory_format=torch.legacy_contiguous_format) 2049 return torch.empty_like(self).contiguous() 2060 return torch.empty_like(self).contiguous() 2066 return torch.empty_like(self) [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | Normalization.cpp | 13 #include <ATen/ops/empty_like.h> 89 Tensor alpha = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in q_batch_norm1d_impl() 90 Tensor beta = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in q_batch_norm1d_impl() 198 Tensor alpha = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in q_batch_norm2d_impl() 199 Tensor beta = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in q_batch_norm2d_impl() 294 Tensor alpha = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in q_batch_norm3d_impl() 295 Tensor beta = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in q_batch_norm3d_impl()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/ |
H A D | Activation.mm | 55 …Tensor output = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::Pr… 98 out = at::empty_like(self, MemoryFormat::Contiguous); 142 …Tensor output_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::P… 192 Tensor output_ = at::empty_like(self, self.suggest_memory_format()); 332 …Tensor output_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::P… 366 auto output = at::empty_like(self); 391 …Tensor grad_input_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryForma… 445 auto grad_input = at::empty_like(grad_output); 681 output_ = at::empty_like(output, MemoryFormat::Contiguous); 726 grad_input_ = at::empty_like(grad_input, MemoryFormat::Contiguous); [all …]
|
H A D | WeightNorm.mm | 40 auto w = at::empty_like(v, LEGACY_CONTIGUOUS_MEMORY_FORMAT); 41 auto norms = at::empty_like(g, LEGACY_CONTIGUOUS_MEMORY_FORMAT); 101 auto grad_v = at::empty_like(saved_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT); 102 auto grad_g = at::empty_like(saved_g, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
H A D | RnnOps.mm | 311 Tensor hy = at::empty_like(hx[0], input.options()); 312 Tensor cy = at::empty_like(hx[1], input.options()); 718 Tensor output_out = at::empty_like(input); 719 Tensor grad_state_out = at::empty_like(hx[0]); 720 Tensor grad_cell_state_out = at::empty_like(hx[1]); 745 Tensor grad_rec_weights = at::empty_like(recurrent_kernel_weights[i]); 746 Tensor grad_weights = at::empty_like(kernel_weights[i]);
|
/aosp_15_r20/external/pytorch/torch/_library/ |
H A D | triton.py | 73 >>> output = torch.empty_like(x) 198 >>> output = torch.empty_like(x) 212 >>> # empty_like = torch.ops.aten.empty_like.default(x_1, pin_memory = False) 216 >>> # 'in_ptr0': x_1, 'in_ptr1': y_1, 'out_ptr': empty_like, 219 >>> # return empty_like
|
/aosp_15_r20/external/pytorch/test/torch_np/ |
H A D | test_basic.py | 27 w.empty_like, 173 w.empty_like, 477 out = (w.empty_like(x1), w.empty_like(x1)) 492 out = (w.empty_like(x1), w.empty_like(x1)) 504 out1, out2 = w.empty_like(x1), w.empty_like(x1)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Clone.cpp | 7 #include <ATen/ops/empty_like.h> 29 // Copy all strides, this is marginally faster than calling empty_like in clone() 32 self = at::empty_like(src); in clone() 35 self = at::empty_like(src, src.options(), memory_format); in clone()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | TensorOperators.h | 9 #include <ATen/ops/empty_like.h> 19 ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).sub_(y)) \ 22 ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).div_(y)) \ 25 ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).remainder_(y)) \
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/ |
H A D | _decomposed.py | 101 return torch.empty_like(input, dtype=dtype) 157 return torch.empty_like(input, dtype=dtype) 287 return torch.empty_like(input, dtype=out_dtype) 354 return torch.empty_like(input, dtype=out_dtype) 622 return torch.empty_like(input, dtype=dtype) 717 return torch.empty_like(input, dtype=out_dtype) 930 return torch.empty_like(input, dtype=dtype) 984 return torch.empty_like(input, dtype=output_dtype) 1068 return torch.empty_like(input, dtype=dtype) 1186 return torch.empty_like(input)
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_binary_ufuncs.py | 536 out = torch.empty_like(lhs_i64) 540 out = torch.empty_like(lhs_i16) 545 op(lhs_i16, rhs_i32, out=torch.empty_like(lhs_i64)) 553 out=torch.empty_like(lhs_i64, dtype=torch.bool), 557 out = torch.empty_like(lhs_i64, dtype=torch.float16) 560 out = torch.empty_like(lhs_i64, dtype=torch.bfloat16) 563 out = torch.empty_like(lhs_i64, dtype=torch.float32) 567 out = torch.empty_like(lhs_i64, dtype=torch.complex64) 589 out = torch.empty_like(lhs_f64, dtype=torch.float16) 592 out = torch.empty_like(lhs_f64, dtype=torch.bfloat16) [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | RNN.cu | 15 #include <ATen/ops/empty_like.h> 530 auto workspace = at::empty_like(input_gates, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_lstm_cell_cuda() 531 auto hy = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_lstm_cell_cuda() 532 auto cy = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_lstm_cell_cuda() 582 auto grad_gates = at::empty_like(workspace, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_lstm_cell_backward_impl_cuda() 583 auto grad_cx = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_lstm_cell_backward_impl_cuda() 617 auto hy = at::empty_like(hx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_gru_cell_cuda() 646 auto grad_hx = at::empty_like(grad_hy, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in _thnn_fused_gru_cell_backward_cuda()
|
H A D | RangeFactories.cu | 16 #include <ATen/ops/empty_like.h> 79 Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result; in linspace_cuda_out() 130 Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result; in logspace_cuda_out() 196 Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result; in range_cuda_out() 258 Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result; in arange_cuda_out()
|
/aosp_15_r20/external/pytorch/test/distributed/ |
H A D | test_multi_threaded_pg.py | 45 torch.empty_like(input_tensor) for _ in range(dist.get_world_size()) 62 torch.empty_like(input_tensor) for _ in range(dist.get_world_size()) 79 torch.empty_like(input_tensor) for _ in range(dist.get_world_size()) 153 torch.empty_like(input_tensor) for _ in range(self.world_size) 210 output_tensor_list = [torch.empty_like(tensor) for tensor in input_tensor_list]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/hip/flash_attn/ |
H A D | flash_api.hip | 48 #include <ATen/ops/empty_like.h> 147 if (head_size_og % 8 != 0) { out = at::empty_like(q_padded); } 149 out = at::empty_like(q_padded); 372 dq = at::empty_like(q); 381 dk = at::empty_like(k); 390 dv = at::empty_like(k); 432 at::Tensor delta = at::empty_like(softmax_lse).contiguous();
|
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/ |
H A D | test_fully_shard_overlap.py | 83 dummy_ag_output = torch.empty_like(lin.weight) 105 dummy_ag_output = torch.empty_like(lin.weight) 114 dummy_ag_output = torch.empty_like(lin.weight) 122 dummy_rs_input = torch.empty_like(lin.weight)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/flash_attn/ |
H A D | flash_api.cpp | 22 #include <ATen/ops/empty_like.h> 437 if (head_size_og % 8 != 0) { out = at::empty_like(q_padded); } in mha_fwd() 439 out = at::empty_like(q_padded); in mha_fwd() 676 if (head_size_og % 8 != 0) { out = at::empty_like(q_padded); } in mha_varlen_fwd() 678 out = at::empty_like(q_padded); in mha_varlen_fwd() 904 dq = at::empty_like(q); in mha_bwd() 913 dk = at::empty_like(k); in mha_bwd() 922 dv = at::empty_like(v); in mha_bwd() 1128 dq = at::empty_like(q); in mha_varlen_bwd() 1137 dk = at::empty_like(k); in mha_varlen_bwd() [all …]
|