/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | SortingUtils.h | 24 auto result_sizes = self.sizes().vec(); in _reduction_with_indices_allocate_or_resize_output() local 64 auto result_sizes = self.sizes().vec(); in _allocate_or_resize_output_with_indices() local
|
H A D | ReduceOpsUtils.h | 46 std::vector<int64_t> result_sizes; in _dimreduce_setup() local
|
H A D | TensorShape.cpp | 2687 auto result_sizes = tensors[0].sizes().vec(); in maybe_native_stack() local 2765 auto result_sizes = tensors[0].sizes().vec(); in stack() local 2795 auto result_sizes = tensors[0].sizes().vec(); in stack_out() local 3066 auto result_sizes = DimVector(self.sizes()); in sparse_compressed_transpose() local
|
H A D | LinearAlgebra.cpp | 304 const auto result_sizes = result.sizes(); in common_checks_baddbmm_bmm() local 1416 const auto result_sizes = result.sizes(); in addmm_impl_cpu_() local 1687 const auto result_sizes = result.sizes(); in baddbmm_with_gemm_() local
|
H A D | TensorAdvancedIndexing.cpp | 913 auto result_sizes = result_nonzero.sizes().vec(); in TORCH_IMPL_FUNC() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | Blas.cpp | 347 IntArrayRef result_sizes = result.sizes(); in addmm_out_cuda_impl() local 526 IntArrayRef result_sizes = result.sizes(); in baddbmm_out_cuda_impl() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/ |
H A D | SparseBlas.cpp | 132 auto result_sizes = DimVector(mat1.sizes().slice(0, mat1.dim() - 2)); in sparse_sampled_addmm_out_sparse_csr_cpu() local
|
H A D | SparseCsrTensorMath.cpp | 225 const auto result_sizes = infer_size(sparse.sizes(), scalar.sizes()); in intersection_binary_op_with_wrapped_scalar() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/ |
H A D | SparseBlas.cpp | 50 auto result_sizes = DimVector(mat1.sizes().slice(0, mat1.dim() - 2)); in sparse_sampled_addmm_out_sparse_csr_cuda() local
|
/aosp_15_r20/external/pytorch/torch/csrc/lazy/ts_backend/ |
H A D | ts_backend_impl.cpp | 234 at::IntArrayRef result_sizes = result.sizes(); in ExecuteComputation() local
|
/aosp_15_r20/external/pytorch/torch/csrc/autograd/ |
H A D | python_variable_indexing.cpp | 199 std::optional<SymIntArrayRef> result_sizes = result.is_nested() in applySlicing() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/xpu/ |
H A D | Blas.cpp | 40 IntArrayRef result_sizes = result.sizes(); in addmm_out() local
|
/aosp_15_r20/external/eigen/bench/tensors/ |
H A D | tensor_benchmarks.h | 501 Eigen::array<TensorIndex, 2> result_sizes; in convolution() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | TensorIndexing.h | 536 std::optional<SymIntArrayRef> result_sizes = result.is_nested() in applySlicing() local
|
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/ |
H A D | shape_inference.cpp | 1113 auto result_sizes = tensors[0].sizes().vec(); in compute_shape_stack() local
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | ops.cpp | 775 auto result_sizes = inputs[0].sizes().vec(); in varStackSerialOut() local
|