/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | Split.cpp | 23 const std::vector<int64_t>& split_sizes, in add_split_with_sizes_default_node() 104 std::vector<int64_t> split_sizes = *(graph.get_int_list(split_sizes_ref)); in add_split_with_sizes_default_node() local 127 std::vector<int64_t> split_sizes(size / split_size, split_size); in add_split_tensor_node() local
|
/aosp_15_r20/external/executorch/kernels/test/ |
H A D | op_split_with_sizes_copy_test.cpp | 24 exec_aten::ArrayRef<int64_t> split_sizes, in op_split_with_sizes_copy_out() 43 exec_aten::ArrayRef<int64_t> split_sizes = exec_aten::ArrayRef<int64_t>( in test_tensor_shape_dynamism() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | TensorShape.cu | 188 at::IntArrayRef split_sizes, in get_split_base_addrs() 215 at::IntArrayRef split_sizes, in get_split_chunk_sizes() 620 at::IntArrayRef split_sizes, in split_with_sizes_copy_out_cuda_contiguous_no_cast() 704 IntArrayRef split_sizes, in split_with_sizes_copy_out_cuda()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | LegacyBatchingRegistrations.cpp | 249 std::vector<Tensor> split_with_sizes_batching_rule(const Tensor& self, SymIntArrayRef split_sizes, … in split_with_sizes_batching_rule() 261 …ensor> split_with_sizes_copy_batching_rule(const Tensor& self, SymIntArrayRef split_sizes, int64_t… in split_with_sizes_copy_batching_rule()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | TensorShape.cpp | 951 std::vector<c10::SymInt> split_sizes(chunks, split_size); in chunk() local 1042 std::vector<int64_t> split_sizes(chunks, split_size); in unsafe_chunk() local 2621 std::vector<Tensor> split_with_sizes(const Tensor& self, IntArrayRef split_sizes, int64_t dim) { in split_with_sizes() 2643 std::vector<Tensor> unsafe_split_with_sizes(const Tensor& self, IntArrayRef split_sizes, int64_t di… in unsafe_split_with_sizes() 2654 std::vector<Tensor> hsplit(const Tensor& self, IntArrayRef split_sizes) { in hsplit() 2659 std::vector<Tensor> vsplit(const Tensor& self, IntArrayRef split_sizes) { in vsplit() 2664 std::vector<Tensor> dsplit(const Tensor& self, IntArrayRef split_sizes) { in dsplit() 4058 void split_with_sizes_copy_out(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, a… in split_with_sizes_copy_out()
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset13.py | 119 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): argument 132 g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None argument
|
H A D | symbolic_opset11.py | 638 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): argument
|
H A D | symbolic_opset9.py | 1060 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): argument 1070 g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None argument
|
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/ |
H A D | Utils.hpp | 499 const std::vector<int64_t>& split_sizes, in checkSplitSizes() 519 const std::vector<int64_t>& split_sizes, in computeLengthsAndOffsets()
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_split_with_sizes_copy.cpp | 26 exec_aten::ArrayRef<int64_t> split_sizes, in split_with_sizes_copy_out()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
H A D | split_op.cc | 147 std::vector<int64_t> split_sizes; in Compile() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | FunctionalInverses.cpp | 254 …rseReturnMode inverse_return_mode, int64_t mutated_view_idx, c10::SymIntArrayRef split_sizes, int6… in split_with_sizes_inverse() 450 std::vector<c10::SymInt> split_sizes(chunks, split_size); in chunk_inverse() local
|
H A D | LegacyBatchingRegistrations.cpp | 440 std::vector<Tensor> split_with_sizes_batching_rule(const Tensor& self, IntArrayRef split_sizes, int… in split_with_sizes_batching_rule()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorUtils.cpp | 115 c10::IntArrayRef split_sizes, in split_with_sizes_nested()
|
H A D | NestedTensorMath.cpp | 276 std::vector<int64_t> split_sizes; in NestedTensor_to_padded_tensor_generic() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | split_v_op.cc | 199 absl::Span<const Tlen> split_sizes) { in SplitHasAlignedOutputsInFirstDimension()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | native_ops.cpp | 733 const auto& split_sizes = p_node->Input(1).toIntList(); in __anon75e5f0514602() local 757 const auto& split_sizes = p_node->Input(1).toIntList(); in __anon75e5f0514902() local
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/ |
H A D | copy_ops_util.cpp | 418 exec_aten::ArrayRef<int64_t> split_sizes, in check_split_with_sizes_copy_args()
|
/aosp_15_r20/external/pytorch/test/dynamo/ |
H A D | test_aot_autograd.py | 920 def fn(result, split_sizes): argument
|
H A D | test_repros.py | 3956 def fn(result, split_sizes): argument
|
/aosp_15_r20/external/pytorch/torch/csrc/autograd/ |
H A D | FunctionsManual.cpp | 2112 c10::SymIntArrayRef split_sizes, in split_with_sizes_backward() 2138 c10::SymIntArrayRef split_sizes, in _nested_split_with_sizes_backward() 2180 std::vector<c10::SymInt> split_sizes(num_splits, split_size); in split_backward() local
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/ir/ |
H A D | tf_ops_n_z.cc | 1597 SmallVector<int64_t, 4> split_sizes; in verify() local
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/xla/transforms/ |
H A D | legalize_tf.cc | 3511 SmallVector<int64_t, 4> split_sizes; in matchAndRewrite() local
|