Home
last modified time | relevance | path

Searched full:dim (Results 1 – 25 of 6567) sorted by relevance

12345678910>>...263

/aosp_15_r20/external/sdv/vsomeip/third_party/boost/numeric/odeint/include/boost/numeric/odeint/algebra/
Darray_algebra.hpp36 //template< typename T , size_t dim , class Op >
38 size_t dim, class Op >
39 static void for_each1( Array< T, dim > &s1, Op op ) in for_each1()
41 for( size_t i=0 ; i<dim ; ++i ) in for_each1()
46 size_t dim, class Op >
47 static void for_each2( Array< T, dim > &s1, const Array< T, dim > &s2, in for_each2()
50 for( size_t i=0 ; i<dim ; ++i ) in for_each2()
55 size_t dim, class Op >
56 static void for_each3( Array< T , dim > &s1 , in for_each3()
57 const Array< T , dim > &s2 , in for_each3()
[all …]
/aosp_15_r20/external/eigen/Eigen/src/Geometry/
H A DTransform.h24 Dim = Transform::Dim, enumerator
42 int Dim,
58 int Dim,
95 * - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
97 * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
98 * - #Projective: the transformation is stored as a (Dim+1)^2 matrix
129 * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
136 * \b Translation t (Dim)x(1):
142 * \b Rotation R (Dim)x(Dim):
148 * \b Linear \b Matrix L (Dim)x(Dim):
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DReduceOps.cpp193 TORCH_META_FUNC2(all, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
194 allany_meta(*this, "all", self, dim, keepdim); in TORCH_META_FUNC2()
197 TORCH_META_FUNC2(all, dims)(const Tensor& self, OptionalIntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
198 allany_meta(*this, "all", self, dim, keepdim); in TORCH_META_FUNC2()
205 TORCH_META_FUNC2(any, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
206 allany_meta(*this, "any", self, dim, keepdim); in TORCH_META_FUNC2()
209 TORCH_META_FUNC2(any, dims)(const Tensor& self, OptionalIntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
210 allany_meta(*this, "any", self, dim, keepdim); in TORCH_META_FUNC2()
220 const std::optional<int64_t>& dim) { in check_argmax_argmin() argument
221 if (dim.has_value()) { in check_argmax_argmin()
[all …]
H A DSpectralOps.cpp162 IntArrayRef dim, int64_t norm, bool onesided) { in fft_r2c_maybe_out() argument
167 return at::_fft_r2c_outf(input, dim, norm, onesided, out_mut); in fft_r2c_maybe_out()
169 return at::_fft_r2c(input, dim, norm, onesided); in fft_r2c_maybe_out()
174 IntArrayRef dim, int64_t norm, SymInt last_dim_size) { in fft_c2r_maybe_out() argument
181 return at::_fft_c2r_symint_outf(input, dim, norm, last_dim_size, out_mut); in fft_c2r_maybe_out()
183 return at::_fft_c2r_symint(input, dim, norm, last_dim_size); in fft_c2r_maybe_out()
188 IntArrayRef dim, int64_t norm, bool forward) { in fft_c2c_maybe_out() argument
193 return at::_fft_c2c_outf(input, dim, norm, forward, out_mut); in fft_c2c_maybe_out()
195 return at::_fft_c2c(input, dim, norm, forward); in fft_c2c_maybe_out()
206 const auto input_dim = input.dim(); in fft_c2r()
[all …]
H A DTensorAdvancedIndexing.cpp159 (const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad) { in TORCH_META_FUNC()
161 int64_t wrapped_dim = at::maybe_wrap_dim(dim, self.dim()); in TORCH_META_FUNC()
191 int64_t dim, in scatter_meta_impl() argument
195 int64_t wrapped_dim = at::maybe_wrap_dim(dim, self.dim()); in scatter_meta_impl()
216 (const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { in TORCH_META_FUNC2()
217 scatter_meta_impl(*this, self, dim, index, src); in TORCH_META_FUNC2()
221 (const Tensor& self, int64_t dim, const Tensor& index, const Scalar& value) { in TORCH_META_FUNC2()
222 scatter_meta_impl(*this, self, dim, index); in TORCH_META_FUNC2()
227 int64_t dim, in TORCH_META_FUNC2()
235 scatter_meta_impl(*this, self, dim, index, src, reduce); in TORCH_META_FUNC2()
[all …]
H A DSorting.cpp56 int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); in TORCH_META_FUNC() local
58 k >= 0 && k <= (self.dim() > 0 ? self.size(dim) : 1), in TORCH_META_FUNC()
60 int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim); in TORCH_META_FUNC()
63 // Build the output size, which is the dim being selected set to in TORCH_META_FUNC()
67 topKSize[dim] = k; in TORCH_META_FUNC()
74 (const Tensor& self, std::optional<bool> stable, int64_t dim, bool descending) { in TORCH_META_FUNC2()
75 maybe_wrap_dim(dim, self.dim()); in TORCH_META_FUNC2()
95 void _fill_indices(const TensorBase &indices, int64_t dim) { in _fill_indices() argument
96 auto ndim = indices.dim(); in _fill_indices()
97 assert(0 <= dim && dim < ndim); in _fill_indices()
[all …]
H A DTensorShape.cpp223 t.dim() > 0, in cat_check_no_zero_dim()
244 TORCH_PRECOMPUTE_META_FUNC(cat)(const ITensorListRef& tensors, int64_t dim) { in TORCH_PRECOMPUTE_META_FUNC()
252 dim = at::legacy_cat_wrap_dim(dim, materialized); in TORCH_PRECOMPUTE_META_FUNC()
303 dim <= materialized[valid].get().dim(), "torch.cat(): dimension ", dim, "out of range"); in TORCH_PRECOMPUTE_META_FUNC()
307 // except in the dimension 'dim'. in TORCH_PRECOMPUTE_META_FUNC()
313 at::native::check_cat_shape_except_dim(materialized[valid], t, dim, i); in TORCH_PRECOMPUTE_META_FUNC()
314 size_at_dim += t.size(dim); in TORCH_PRECOMPUTE_META_FUNC()
326 sizes[dim] = size_at_dim; in TORCH_PRECOMPUTE_META_FUNC()
342 .set_dim(dim) in TORCH_PRECOMPUTE_META_FUNC()
357 TORCH_CHECK(shape_tensor.dim() == 1); in _reshape_from_tensor()
[all …]
H A DIntegration.cpp32 Tensor do_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_trapezoid() argument
33 Tensor left = y.slice(dim, 0, -1); in do_trapezoid()
34 Tensor right = y.slice(dim, 1); in do_trapezoid()
37 return ((left + right) * dx).sum(dim) / 2.; in do_trapezoid()
42 Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) { in do_trapezoid() argument
43 return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx; in do_trapezoid()
46 Tensor zeros_like_except(const Tensor& y, int64_t dim) { in zeros_like_except() argument
48 dim = maybe_wrap_dim(dim, y.dim()); in zeros_like_except()
49 sizes.erase(sizes.begin() + dim); in zeros_like_except()
53 Tensor do_cumulative_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_cumulative_trapezoid() argument
[all …]
/aosp_15_r20/external/pytorch/torch/export/
H A Ddynamic_shapes.py34 "Dim",
56 Metaclass for :func:`Dim` types.
68 return f"Dim('{name}')"
70 return f"Dim('{name}', max={max_})"
72 return f"Dim('{name}', min={min_})"
73 return f"Dim('{name}', min={min_}, max={max_})"
76 # e.g., dim + 1
88 # e.g., dim - 1
103 # e.g., dim * 2
125 Meta class for static :func:`Dim` types.
[all …]
/aosp_15_r20/external/pytorch/torch/_refs/
H A Dfft.py120 dim: int,
126 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
127 last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
139 output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
147 dim: int,
158 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
159 dim_size = n if n is not None else input.shape[dim]
167 ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
176 dim: int,
185 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
[all …]
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/
H A Dcopy_ops_util.cpp76 int64_t dim, in check_cat_args() argument
107 tensor_is_rank(tensors[ref_i], tensors[i].dim())); in check_cat_args()
109 for (size_t d = 0; d < tensors[i].dim(); ++d) { in check_cat_args()
110 if (d != dim) { in check_cat_args()
117 // Ensure dim is in range. in check_cat_args()
119 tensors[ref_i].numel() == 0 || tensors[ref_i].dim() > dim); in check_cat_args()
120 ET_LOG_AND_RETURN_IF_FALSE(dim >= 0); in check_cat_args()
127 int64_t dim, in get_cat_out_target_size() argument
132 // calculate out dim in get_cat_out_target_size()
137 cat_dim_size += tensors[i].size(dim); in get_cat_out_target_size()
[all …]
H A Dindex_util.cpp17 int64_t dim, in check_gather_args() argument
22 ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); in check_gather_args()
33 // Normalize dim to non-negative value in check_gather_args()
34 if (dim < 0) { in check_gather_args()
35 dim += nonzero_dim(in); in check_gather_args()
39 if (d != dim) { in check_gather_args()
42 …d of index should be smaller than the size of that dimension of input if dimension %zd != dim %zd", in check_gather_args()
45 (size_t)dim); in check_gather_args()
51 index_data[i] >= 0 && index_data[i] < nonempty_size(in, dim), in check_gather_args()
53 (size_t)dim, in check_gather_args()
[all …]
H A Dreduce_util.h52 // Compute innermost dim from dim list in apply_on_flat_ix_with_dim_mask_and_base()
53 size_t inner_dim = in.dim() - 1; in apply_on_flat_ix_with_dim_mask_and_base()
60 // Only the dims that are in the dim list are relevant. in apply_on_flat_ix_with_dim_mask_and_base()
62 for (int64_t d = 0; d < in.dim(); d++) { in apply_on_flat_ix_with_dim_mask_and_base()
93 // the index of the next innermost dimension from the dim list by 1. in apply_on_flat_ix_with_dim_mask_and_base()
98 // curr_dim will be the dim from the dim list we are currently updating in apply_on_flat_ix_with_dim_mask_and_base()
115 // Decrease current dim in apply_on_flat_ix_with_dim_mask_and_base()
118 // Stop if curr_dim is in the dim list in apply_on_flat_ix_with_dim_mask_and_base()
122 // Keep decreasing if curr_dim is not in the dim list in apply_on_flat_ix_with_dim_mask_and_base()
152 const size_t dim,
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/grappler/costs/graph_properties_testdata/
H A Dlarge_graph.pbtxt.html115 dim {
137 dim {
186 name: "ExpandDims/dim"
211 input: "ExpandDims/dim"
239 dim {
247 name: "ExpandDims_1/dim"
272 input: "ExpandDims_1/dim"
302 dim {
310 name: "ExpandDims_2/dim"
335 input: "ExpandDims_2/dim"
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_split_cat_fx_passes.py39 return [torch.relu(s) for s in torch.split(x, 2, dim=1)]
43 torch.relu(s) for s in torch.split(x, split_size_or_sections=2, dim=1)
49 for s in torch.split(tensor=x, split_size_or_sections=2, dim=-1)
53 return [torch.relu(s) for s in torch.split(x, [16, 16], dim=1)]
65 return [torch.relu(s) for s in x.split(2, dim=1)]
68 return [torch.relu(s) for s in x.split(split_size=2, dim=1)]
77 return [torch.relu(s) for s in x.split([16, 16], dim=-1)]
126 return [torch.split(s, 2, dim=1) for s in torch.split(x, 2, dim=1)]
130 torch.split(s, split_size_or_sections=2, dim=1)
131 for s in torch.split(x, split_size_or_sections=2, dim=1)
[all …]
/aosp_15_r20/external/pytorch/torch/_refs/linalg/
H A D__init__.py14 Dim,
73 def cross(a: Tensor, b: Tensor, dim: int = -1):
79 a.size(dim) == 3 and b.size(dim) == 3,
80 … lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}",
83 dim = utils.canonicalize_dim(a.ndim, dim)
85 return a.index_select(dim, (idx + 1) % 3) * b.index_select(
86 dim, (idx + 2) % 3
87 ) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3)
105 dim: Optional[DimsType] = None,
115 if isinstance(dim, Dim):
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Dsharding_util_ops.cc80 for (int dim = 0; dim < expected_rank; ++dim) { in GetAndValidateAttributes() local
81 if (paddings[dim] < 0) { in GetAndValidateAttributes()
83 "'padding' must be all non-negative, but got ", paddings[dim], in GetAndValidateAttributes()
84 " at index ", dim, "."); in GetAndValidateAttributes()
86 if (paddings[dim] > 0) { in GetAndValidateAttributes()
108 auto divisor = [&](const int dim) { in GetSliceIndices() argument
110 for (int i = num_partitions.size() - 1; i > dim; --i) { in GetSliceIndices()
116 for (int dim = num_partitions.size() - 1; dim > 0; --dim) { in GetSliceIndices() local
117 slice_indices[dim] = in GetSliceIndices()
118 ((index / divisor(dim)) % num_partitions[dim]) * slice_shape[dim]; in GetSliceIndices()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/profiler/internal/testdata/
H A Dgraph.pbtxt9 dim {
12 dim {
15 dim {
18 dim {
37 dim {
40 dim {
43 dim {
46 dim {
71 dim {
89 dim {
[all …]
/aosp_15_r20/external/executorch/runtime/core/exec_aten/util/
H A Dtensor_util.h42 #define ET_CHECK_VALID_DIM(DIM, UPPER_BOUND) \ argument
44 DIM >= -static_cast<int64_t>(UPPER_BOUND) && \
45 DIM < static_cast<int64_t>(UPPER_BOUND), \
46 "dim %" PRId64 " must be within range [-%zd, %zd)", \
47 DIM, \
51 #define ET_CHECK_NON_ZERO_DIM_SIZE(DIM, T) \ argument
52 const size_t udim = ET_NORMALIZE_IX(DIM, T.dim()); \
54 T.size(udim) != 0, "Expected dim %zd to have non-zero size.", udim);
67 const size_t a_dim__ = (a__).dim(); \
68 const size_t b_dim__ = (b__).dim(); \
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_reductions.py61 def _rand_shape(dim, min_size, max_size): argument
63 for i in range(dim):
67 def _reduced_shape(shape, dim=None, keepdim=False): argument
68 """Computes the expected reduced shape given dim and keepdim
72 dim : The dimensions to reduce
79 if dim is None:
83 dim = dim if isinstance(dim, Sequence) else [dim]
84 dim = {i if i >= 0 else len(shape) + i for i in dim}
88 if i not in dim:
102 """Tests output shape for input with ndim and dim and keepdim kwargs"""
[all …]
/aosp_15_r20/frameworks/base/media/mca/filterpacks/native/base/
H A Dvec_types.h23 template < class T, int dim>
26 T data[dim];
28 VecBase<T,dim>& operator = (const VecBase<T, dim> &x) {
29 memcpy(data, x.data, sizeof(T)*dim);
42 for (int i = 0; i < dim; ++i) in Length()
48 template < class T, int dim>
49 class Vec : public VecBase<T,dim> {
52 Vec<T,dim>& operator = (const Vec<T, dim> &x) {
53 memcpy(this->data, x.data, sizeof(T)*dim);
58 template <class T, int dim>
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DScatterGatherKernel.cpp103 int64_t dim, int64_t index_dim_size, in operator ()()
114 " is out of bounds for dimension ", dim, in operator ()()
130 int64_t dim, int64_t index_dim_size, in operator ()()
141 " is out of bounds for dimension ", dim, in operator ()()
165 void operator()(const Tensor& self, int64_t dim, in operator ()()
176 // `dim` is traversed in the kernel, in operator ()()
177 // that is why index.stride(dim) = 0 and index.size(dim) = 1. in operator ()()
178 // Also, index.size(dim) = 1 makes sure that TensorIterator.DimCounter in operator ()()
179 // has the following form : (i_1,..., i_{dim-1}, 0, i_{dim+1},...,i_n). in operator ()()
180 index_sizes[dim] = 1; in operator ()()
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/tensor/
H A Dplacement_types.py34 def is_shard(self, dim: Optional[int] = None) -> bool:
36 if dim is not None and is_shard_instance:
37 return cast(Shard, self).dim == dim
51 The ``Shard(dim)`` placement describes the DTensor sharding on tensor dimension
52 ``dim`` over a corresponding ``DeviceMesh`` dimension, where each rank on the
54 ``Shard(dim)`` placement follows the ``torch.chunk(dim)`` semantic, where the
60 dim (int): The tensor dimension that describes the DTensor is sharded over its
67 dim: int
87 self.dim <= tensor.ndim
88 ), f"Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}"
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/spmd/
H A Dspmd_partitioner_util.cc88 if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) { in EvenlyPartitions()
174 if (sharding.tile_assignment().dim(i) == 1 || in MakePartitionOffsets()
213 i, shard_shape.dimensions(i) * sharding.tile_assignment().dim(i)); in GetPaddedShapeForUnevenPartitioning()
235 gid *= partial_sharding.tile_assignment().dim(i); in PartialReplicateReshardCompatibleSharding()
248 for (int64_t dim = 0; dim < rank; dim++) { in PartialReplicateReshardCompatibleSharding() local
249 int64_t partial_tile_size = partial_sharding.tile_assignment().dim(dim); in PartialReplicateReshardCompatibleSharding()
250 int64_t target_tile_size = target_sharding.tile_assignment().dim(dim); in PartialReplicateReshardCompatibleSharding()
257 expand_tile_dims_indices[dim] = num_expand_dims++; in PartialReplicateReshardCompatibleSharding()
289 for (int64_t dim = 0; dim < rank; dim++) { in PartialReplicateReshardCompatibleSharding() local
290 perm.emplace_back(dim); in PartialReplicateReshardCompatibleSharding()
[all …]
/aosp_15_r20/external/mesa3d/src/amd/common/
H A Dac_nir_lower_image_opcodes_cdna.c31 static unsigned get_coord_components(enum glsl_sampler_dim dim, bool is_array) in get_coord_components() argument
33 switch (dim) { in get_coord_components()
50 enum glsl_sampler_dim dim, bool is_array, in lower_image_coords() argument
53 unsigned num_coord_components = get_coord_components(dim, is_array); in lower_image_coords()
64 if (dim == GLSL_SAMPLER_DIM_1D && is_array) { in lower_image_coords()
112 enum gl_access_qualifier access, enum glsl_sampler_dim dim, in emulated_image_load() argument
119 lower_image_coords(b, desc, coord, dim, is_array, in emulated_image_load()
128 enum glsl_sampler_dim dim, bool is_array) in emulated_image_store() argument
133 lower_image_coords(b, desc, coord, dim, is_array, true), in emulated_image_store()
139 /* Return the width, height, or depth for dim=0,1,2. */
[all …]

12345678910>>...263