Home
last modified time | relevance | path

Searched defs:numel (Results 1 – 25 of 147) sorted by relevance

123456

/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DInferSize.h22 NumelType numel, in infer_size_impl()
67 inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) { in infer_size()
73 inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) { in infer_size_dv()
81 c10::SymInt numel) { in infer_size_dv()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DKernelUtils.cuh48 const index_t numel, in fastSpecializedAtomicAdd()
88 const index_t numel, in fastSpecializedAtomicAdd()
129 const index_t numel, in fastSpecializedAtomicAdd()
138 const index_t numel, in fastAtomicAdd()
H A DScatterGatherKernel.cu24 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
34 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
43 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
52 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
62 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
72 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
H A DEmbeddingBackwardKernel.cu48 const int64_t *num_of_segments_ptr, int64_t numel) { in krn_partials_per_segment()
83 const index_t *offset2bag, const index_t *count, ptrdiff_t numel, in compute_grad_weight_bags()
128 ptrdiff_t numel, in compute_grad_weight()
225 const ptrdiff_t numel = sorted_indices.numel(); in embedding_backward_cuda_kernel() local
H A DDistributionTemplates.h50 const uint64_t numel = static_cast<uint64_t>(total_elements); in calc_execution_policy() local
121 int64_t numel = iter.numel(); in distribution_nullary_kernel() local
180 int numel, in distribution_binary_elementwise_kernel()
250 int64_t numel = iter.numel(); in distribution_binary_kernel() local
H A DSortStable.cu143 const auto numel = nsort * nsegments; in segmented_sort_pairs_by_full_sort() local
191 const auto numel = nsort * nsegments; in segmented_sort_pairs() local
224 const auto numel = self.numel(); in launch_stable_sort_kernel() local
H A DIndexing.cu52 int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) { in indexing_backward_kernel()
130 int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) { in indexing_backward_kernel_stride_1()
186 int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) { in indexing_backward_kernel_small_stride()
230 int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, in indexing_backward_kernel_quantized()
297 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
307 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
316 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
326 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
H A DMultiTensorApply.cuh160 const auto numel = tensor_lists[0][t].numel(); in multi_tensor_apply() local
245 const auto numel = tensor_lists[0][t].numel(); in multi_tensor_apply() local
327 const auto numel = tensor_lists[0][tensor_index].numel(); in multi_tensor_apply_for_fused_optimizer() local
H A DRNN.cu57 void getLaunchConfig(dim3* block, dim3* grid, int64_t numel) { in getLaunchConfig()
377 int64_t numel = cx.numel(); in lstm_forward_impl() local
414 int64_t numel = cx.numel(); in lstm_backward_impl() local
450 int64_t numel = hx.numel(); in gru_forward_impl() local
484 int64_t numel = grad_hy.numel(); in gru_backward_impl() local
/aosp_15_r20/external/pytorch/c10/xpu/test/impl/
H A DXPUTest.h5 static inline void initHostData(int* hostData, int numel) { in initHostData()
11 static inline void clearHostData(int* hostData, int numel) { in clearHostData()
17 static inline void validateHostData(int* hostData, int numel) { in validateHostData()
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dquantized_test.cpp107 int numel = 10; in TEST() local
125 int numel = 10; in TEST() local
218 auto numel = c10::multiply_integers(shape); in TEST() local
261 auto numel = c10::multiply_integers(shape); in TEST() local
295 const int numel = 132; in TEST() local
/aosp_15_r20/external/executorch/runtime/core/exec_aten/testing_util/
H A Dtensor_util.cpp41 size_t numel, in data_is_close()
242 std::ostream& print_data(std::ostream& os, const T* data, size_t numel) { in print_data()
262 std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) { in print_data()
/aosp_15_r20/external/pytorch/torch/_inductor/codegen/
H A Dsimd.py1065 def generate_node_schedule(self, nodes, numel, rnumel): argument
1206 def select_index_dtype(cls, node_schedule, numel, reduction_numel): argument
1235 def has_non_contiguous_pw_in_reduction_kernel(self, node_schedule, numel, rnumel): argument
1258 def get_kernel_args(self, node_schedule, numel, reduction_numel): argument
1296 self, node_schedule, buf_accesses, numel, reduction_numel argument
1675 def select_tiling(cls, node_schedule, numel, reduction_numel=sympy.Integer(1)): argument
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/
H A DCUDASymmetricMemoryOps.cu52 size_t numel, in init_elementwise_launch_config()
80 size_t numel, in multimem_all_reduce_kernel()
173 size_t numel, in multimem_one_shot_all_reduce_kernel()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DUnique.cpp42 int64_t numel = input.numel(); in unique_cpu_bool_template() local
165 int64_t numel = input.numel(); in unique_cpu_sorted_template() local
273 int64_t numel = input.numel(); in unique_consecutive_cpu_template() local
391 int64_t numel = input_flat.size(1); in _unique_dim_cpu_template() local
H A DEmbeddingBag.cpp129 auto numel = add_indices.numel(); in index_select_add() local
321 auto numel = add_indices.numel(); in index_select_add() local
466 auto numel = add_indices.numel(); in index_select_add() local
517 auto numel = add_indices.numel(); in index_select_scale_add() local
705 auto numel = add_indices.numel(); in index_select_scale_add() local
842 auto numel = add_indices.numel(); in index_select_scale_add() local
1534 int64_t numel = indices.numel(); in _embedding_bag_dense_backward_cpu_sum_mean() local
/aosp_15_r20/external/executorch/extension/android/src/main/java/org/pytorch/executorch/
H A DTensor.java351 public long numel() { in numel() method in Tensor
356 public static long numel(long[] shape) { in numel() method in Tensor
648 final long numel = numel(shape); in checkShapeAndDataCapacityConsistency() local
759 long numel = 1; in fromByteArray() local
/aosp_15_r20/external/executorch/backends/vulkan/runtime/api/containers/
H A DStagingBuffer.h36 const size_t numel) in StagingBuffer()
70 inline size_t numel() { in numel() function
/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_exp.cpp35 const size_t numel, in exp_data()
57 const size_t numel, in exp_data()
H A Dop_sigmoid.cpp34 const size_t numel, in sigmoid_data()
56 const size_t numel, in sigmoid_data()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DPixelShuffleKernel.cpp28 int64_t numel = input.numel(); in cpu_pixel_shuffle() local
126 int64_t numel = input.numel(); in cpu_pixel_unshuffle() local
169 int64_t numel = input.numel(); in cpu_pixel_unshuffle_channels_last() local
/aosp_15_r20/external/pytorch/torch/_inductor/codegen/aoti_runtime/
H A Dimplementation.cpp39 int64_t numel; in convert_handle_to_arrayref_tensor() local
79 void assert_numel(const ArrayRefTensor<T>& tensor, uint64_t numel) { in assert_numel()
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/
H A DPrepackNode.cpp54 size_t numel = utils::multiply_integers(packed->sizes()); in create_staging_buffer() local
61 size_t numel = utils::multiply_integers(tref->sizes); in create_staging_buffer() local
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/
H A DSparseCsrTensorMath.cu53 …_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { in convert_indices_from_coo_to_csr_cuda_kernel()
69 int64_t numel = input.numel(); in convert_indices_from_coo_to_csr_cuda() local
572 auto numel = values.numel(); in reduce_sparse_csr_dim1_cuda_template() local
631 auto numel = values.numel(); in reduce_sparse_csr_dim01_cuda_template() local
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/
H A DComputeGraph.cpp347 const size_t numel) { in add_staging()
536 const size_t numel) { in copy_into_staging()
545 const size_t numel) { in copy_from_staging()

123456