/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | ScatterGatherKernel.cpp | 35 constexpr void operator() (at::opmath_type<scalar_t> * self_data, scalar_t * src_data) const { in operator ()() 40 constexpr void operator() (bool * self_data, bool * src_data) const { in operator ()() 49 constexpr void operator() (at::opmath_type<scalar_t> * self_data, scalar_t * src_data) const { in operator ()() 59 constexpr void operator() (at::opmath_type<scalar_t> * self_data, scalar_t * src_data) const { in operator ()() 69 constexpr void operator() (at::opmath_type<scalar_t> * self_data, scalar_t * src_data) const { in operator ()() 79 constexpr void operator() (at::opmath_type<scalar_t> * self_data, scalar_t * src_data) const { in operator ()() 89 constexpr void operator() (at::opmath_type<scalar_t> * self_data, scalar_t * src_data) const { in operator ()() 233 auto* self_data = self_data_bytes; in operator ()() local 327 auto* self_data = self_data_bytes; in operator ()() local 423 auto* self_data = self_data_bytes; in operator ()() local [all …]
|
H A D | ReduceOpsKernel.cpp | 86 const scalar_t* self_data, auto self_dim_stride, scalar_t init_val) { in cumsum_cpu_kernel() 105 const scalar_t* self_data, auto self_dim_stride, scalar_t init_val) { in cumprod_cpu_kernel() 124 const scalar_t* self_data, auto self_dim_stride, scalar_t init_val) { in logcumsumexp_cpu_kernel() 238 scalar_t* self_data = (scalar_t*)self_data_bytes; in norm_kernel_tensor_iterator_impl() local 395 scalar_t* self_data = (scalar_t*)self_data_bytes; in argmax_kernel_impl() local 419 scalar_t* self_data = (scalar_t*)self_data_bytes; in argmin_kernel_impl() local
|
H A D | TensorCompareKernel.cpp | 112 const scalar_t* self_data, auto self_dim_stride) { in min_kernel_impl() 145 const scalar_t* self_data, auto self_dim_stride) { in max_kernel_impl() 192 const scalar_t* self_data, auto self_dim_stride) { in aminmax_kernel() 259 const scalar_t* self_data = (scalar_t*)self_data_bytes; in mode_kernel_impl() local
|
H A D | IndexKernel.cpp | 207 auto* self_data = reinterpret_cast<scalar_t*>(self_data_bytes); in index_fill_kernel() local 233 auto* self_data = reinterpret_cast<scalar_t*>(self_data_bytes); in index_fill_kernel() local 266 auto* self_data = reinterpret_cast<scalar_t*>(self_data_bytes); in index_copy_kernel() local 289 auto* self_data = reinterpret_cast<scalar_t*>(self_data_bytes); in index_copy_kernel() local
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_constant_pad_nd.cpp | 33 const CTYPE* self_data, in apply_padding_to_dim() 109 const CTYPE* self_data = self.const_data_ptr<CTYPE>(); in constant_pad_nd_out_impl() local
|
H A D | op_to_copy.cpp | 21 auto self_data = self.mutable_data_ptr<SELF_CTYPE>(); in _to_impl() local
|
H A D | op_scatter_add.cpp | 91 const CTYPE* self_data = self.const_data_ptr<CTYPE>(); in scatter_add_out() local
|
H A D | op__to_dim_order_copy.cpp | 51 auto self_data = self.mutable_data_ptr<SELF_CTYPE>(); in _to_dim_order_copy_impl() local
|
H A D | vec_ops.h | 149 const T* __restrict__ self_data, in vec_addmm()
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | Linear.cpp | 89 const ValueRef self_data, in add_addmm_naive_node() 139 const ValueRef self_data, in add_addmm_optimized_node()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | QTensor.cpp | 161 underlying_t* self_data = self_contig.data_ptr<underlying_t>(); in make_per_channel_quantized_tensor_cpu() local 258 void* self_data = self_contig.data_ptr(); in equal_quantized_cpu() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | IndexKernel.cu | 134 auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); in index_fill_kernel_impl() local 172 auto* const __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); in index_copy_kernel_impl() local
|
H A D | UniqueCub.cu | 260 const bool* self_data = self.const_data_ptr<bool>(); in operator ()() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | MakePerTensorQuantizedTensor.cpp | 30 underlying_t* self_data = self_contig.data_ptr<underlying_t>(); in make_per_tensor_quantized_tensor_cpu() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | TensorDimApply.h | 13 const T1* self_data = self.const_data_ptr<T1>(); in tensor_dim_apply3() local
|
H A D | TensorAdvancedIndexing.cpp | 1026 auto self_data = static_cast<char*>(selfSlice.data_ptr()) + self_i * self_stride_bytes; in TORCH_IMPL_FUNC() local 1123 auto self_data = static_cast<char*>(selfSlice.data_ptr()) + self_i * self_stride_bytes; in index_reduce_func_impl() local 1376 auto self_data = static_cast<const char*>(selfSlice_data) + self_i * self_stride_bytes; in index_select_out_cpu_() local 1405 … auto self_data = static_cast<const char*>(selfSlice_data) + self_i * self_stride_bytes; in index_select_out_cpu_() local
|
H A D | TriangularOps.cpp | 89 auto self_data = self.const_data_ptr<scalar_t>(); in apply_triu_tril() local
|
H A D | ReduceOps.cpp | 772 void cummax_cummin_helper(const T1* self_data, T1* values_data, T2* indices_data, in cummax_cummin_helper() 2253 char* self_data = data[0]; in cpu_equal() local 2279 char* self_data = data[0]; in cpu_equal() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/linalg/ |
H A D | BatchLinearAlgebraLib.cpp | 1151 auto self_data = self.data_ptr<scalar_t>(); in apply_orgqr() local 1471 const auto self_data = self.data_ptr<scalar_t>(); in lu_factor_looped_cusolver() local
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | ops.cpp | 113 const void* self_data = self_contig->const_data_ptr(); in reshape_copy_out() local
|