Home
last modified time | relevance | path

Searched full:sizes (Results 1 – 25 of 11037) sorted by relevance

12345678910>>...442

/aosp_15_r20/external/deqp/android/cts/main/vk-main-2024-03-01/
H A Dtexture.txt47 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_linear_mipmap_linear_clamp
48 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_linear_mipmap_linear_repeat
49 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_linear_mipmap_nearest_clamp
50 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_linear_mipmap_nearest_repeat
51 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_nearest_mipmap_linear_clamp
52 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_nearest_mipmap_linear_repeat
53 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_nearest_mipmap_nearest_clamp
54 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_linear_nearest_mipmap_nearest_repeat
55 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_nearest_linear_mipmap_linear_clamp
56 dEQP-VK.texture.explicit_lod.2d.sizes.128x128_nearest_linear_mipmap_linear_repeat
[all …]
/aosp_15_r20/external/executorch/runtime/core/exec_aten/testing_util/
H A Dtensor_factory.h30 * sizes, assuming contiguous data.
32 inline size_t sizes_to_numel(const std::vector<int32_t>& sizes) { in sizes_to_numel() argument
34 for (auto s : sizes) { in sizes_to_numel()
41 * Check if given strides is legal under given sizes. In the `make` function,
43 * - a. strides.size() == sizes.size()
50 * @param[in] sizes The sizes of the dimensions of the Tensor.
56 const std::vector<int32_t> sizes, in check_strides() argument
58 if (sizes.size() != strides.size()) { in check_strides()
64 // Both sizes and strides are empty vector. Legal! in check_strides()
72 // check if strides[i] == strides[i + 1] * sizes[i + 1] for all i in in check_strides()
[all …]
/aosp_15_r20/external/deqp-deps/glslang/glslang/Include/
Darrays.h72 // TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
79 // Almost all arrays could be handled by two sizes each fitting
81 // are more than 3 sizes or a size needing more than 16 bits.
85 TSmallArrayVector() : sizes(nullptr) { } in TSmallArrayVector()
91 if (from.sizes == nullptr)
92 sizes = nullptr;
95 *sizes = *from.sizes;
103 if (sizes == nullptr) in size()
105 return (int)sizes->size(); in size()
110 assert(sizes != nullptr && sizes->size() > 0); in frontSize()
[all …]
/aosp_15_r20/external/angle/third_party/glslang/src/glslang/Include/
H A Darrays.h72 // TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
79 // Almost all arrays could be handled by two sizes each fitting
81 // are more than 3 sizes or a size needing more than 16 bits.
85 TSmallArrayVector() : sizes(nullptr) { } in TSmallArrayVector()
91 if (from.sizes == nullptr)
92 sizes = nullptr;
95 *sizes = *from.sizes;
103 if (sizes == nullptr) in size()
105 return (int)sizes->size(); in size()
110 assert(sizes != nullptr && sizes->size() > 0); in frontSize()
[all …]
/aosp_15_r20/external/zxing/core/src/test/java/com/google/zxing/datamatrix/encoder/
H A DHighLevelEncodeTestCase.java385 int[] sizes = new int[2]; in testSizes() local
386 encodeHighLevel("A", sizes); in testSizes()
387 assertEquals(3, sizes[0]); in testSizes()
388 assertEquals(3, sizes[1]); in testSizes()
390 encodeHighLevel("AB", sizes); in testSizes()
391 assertEquals(3, sizes[0]); in testSizes()
392 assertEquals(3, sizes[1]); in testSizes()
394 encodeHighLevel("ABC", sizes); in testSizes()
395 assertEquals(3, sizes[0]); in testSizes()
396 assertEquals(3, sizes[1]); in testSizes()
[all …]
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/glsl/
H A Dindexing_utils.h81 ivec4 tidx_to_nchwi(const ivec4 tidx, const ivec4 sizes, const int packed_dim) { in tidx_to_nchwi() argument
83 ivec4(1, sizes.x, sizes.x * sizes.y, sizes.x * sizes.y * sizes.z); in tidx_to_nchwi()
106 ivec4 nchwi_to_tidx(const int nchwi, const ivec4 sizes) { in nchwi_to_tidx() argument
108 nchwi % sizes.x, in nchwi_to_tidx()
109 (nchwi / (sizes.x)) % sizes.y, in nchwi_to_tidx()
110 (nchwi / (sizes.x * sizes.y)) % sizes.z, in nchwi_to_tidx()
111 (nchwi / (sizes.x * sizes.y * sizes.z))); in nchwi_to_tidx()
114 int tidx_to_nchwi(const ivec4 tidx, const ivec4 sizes) { in tidx_to_nchwi() argument
115 return tidx.w * sizes.x * sizes.y * sizes.z + tidx.z * sizes.x * sizes.y + in tidx_to_nchwi()
116 tidx.y * sizes.x + tidx.x; in tidx_to_nchwi()
[all …]
/aosp_15_r20/external/executorch/kernels/portable/test/
H A Dop_allclose_test.cpp31 Tensor a = tf_float.ones(/*sizes=*/{2, 2}); in TEST()
32 Tensor b = tf_float.ones(/*sizes=*/{2, 2}); in TEST()
34 Tensor out = tf_bool.zeros(/*sizes=*/{1}); in TEST()
51 Tensor a = tf_double.ones(/*sizes=*/{2, 2}); in TEST()
52 Tensor b = tf_double.ones(/*sizes=*/{2, 2}); in TEST()
54 Tensor out = tf_bool.zeros(/*sizes=*/{1}); in TEST()
71 Tensor a = tf_float.make(/*sizes=*/{2, 2}, /*data=*/{1., 2., 3., 4.}); in TEST()
72 Tensor b = tf_float.make(/*sizes=*/{2, 2}, /*data=*/{5., 6., 7., 8.}); in TEST()
74 Tensor out = tf_bool.zeros(/*sizes=*/{1}); in TEST()
91 Tensor a = tf_double.make(/*sizes=*/{2, 2}, /*data=*/{1., 2., 3., 4.}); in TEST()
[all …]
/aosp_15_r20/external/executorch/runtime/core/exec_aten/testing_util/test/
H A Dtensor_factory_test.cpp119 Tensor actual = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4}); in TEST_F()
124 std::vector<int64_t> sizes = {2, 2}; in TEST_F() local
126 at::zeros(at::IntArrayRef(sizes), at::dtype(ScalarType::Int)); in TEST_F()
130 int32_t sizes[dim] = {2, 2}; in TEST_F() local
134 TensorImpl(ScalarType::Int, dim, sizes, data, dim_order, strides); in TEST_F()
148 Tensor actual = tf.make(/*sizes=*/{2, 2}, /*data=*/{1.1, 2.2, 3.3, 4.4}); in TEST_F()
153 std::vector<int64_t> sizes = {2, 2}; in TEST_F() local
155 at::zeros(at::IntArrayRef(sizes), at::dtype(ScalarType::Float)); in TEST_F()
159 int32_t sizes[dim] = {2, 2}; in TEST_F() local
163 TensorImpl(ScalarType::Float, dim, sizes, data, dim_order, strides); in TEST_F()
[all …]
H A Dtensor_util_test.cpp212 int32_t size_to_numel(std::vector<int32_t> sizes) { in size_to_numel() argument
214 for (auto size : sizes) { in size_to_numel()
227 Tensor a = tf_int.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}); in TEST()
230 Tensor b = tf_long.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}); in TEST()
239 Tensor a = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}); in TEST()
240 Tensor b = tf.make(/*sizes=*/{4}, /*data=*/{1, 2, 4, 8}); in TEST()
251 /*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}, /*strided=*/{1, 2}); in TEST()
253 /*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}, /*strided=*/{2, 1}); in TEST()
267 Tensor t = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}); in TEST()
276 Tensor a = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 4, 8}); in TEST()
[all …]
/aosp_15_r20/external/fonttools/Lib/fontTools/
H A DtfmLib.py182 sizes = SimpleNamespace()
183 unpack2(SIZES_FORMAT, data, sizes)
190 if sizes.lf < 0:
193 if len(data) < sizes.lf * 4:
196 for name, length in vars(sizes).items():
200 if sizes.lh < 2:
201 raise TFMException(f"The header length is only {sizes.lh}!")
203 if sizes.bc > sizes.ec + 1 or sizes.ec > 255:
205 f"The character code range {sizes.bc}..{sizes.ec} is illegal!"
208 if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0:
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/
H A Dshape_inference.cpp173 return {Shape(float_type, self.sizes().vec())}; in compute_shape_abs()
175 return {Shape(self.scalar_type(), self.sizes().vec())}; in compute_shape_abs()
181 return {Shape(self.scalar_type(), self.sizes().vec())}; in compute_shape_bernoulli()
197 return {Shape(self.scalar_type(), self.sizes().vec())}; in compute_shape_binary_cross_entropy()
208 return {Shape(self.scalar_type(), self.sizes().vec())}; in compute_shape_binary_cross_entropy_backward()
221 auto input_sizes = self.sizes(); in compute_shape_constant_pad_nd()
274 Shape(input.scalar_type(), input.sizes().vec()), in compute_shape_convolution_backward()
275 Shape(weight.scalar_type(), weight.sizes().vec()), in compute_shape_convolution_backward()
281 Shape(input.scalar_type(), input.sizes().vec()), in compute_shape_convolution_backward()
282 Shape(weight.scalar_type(), weight.sizes().vec())}; in compute_shape_convolution_backward()
[all …]
/aosp_15_r20/external/executorch/kernels/test/
H A Dop_gather_test.cpp42 const std::vector<int32_t> sizes = {2, 3}; in test_gather_out() local
45 /*sizes=*/{2, 5}, in test_gather_out()
51 Tensor out = tf_data.zeros(sizes); in test_gather_out()
54 Tensor index = tf_index.make(sizes, in test_gather_out()
66 sizes, in test_gather_out()
77 out, tf_data.make(sizes, in test_gather_out()
84 /*sizes=*/{2, 3, 3}, in test_gather_out()
97 /*sizes=*/{1, 3, 2}, in test_gather_out()
104 out = tf_data.zeros(/*sizes=*/{1, 3, 2}); in test_gather_out()
111 /*sizes=*/{1, 3, 2}, in test_gather_out()
[all …]
H A Dop_scatter_add_test.cpp42 const std::vector<int32_t> sizes = {3, 5}; in test_scatter_add_out() local
45 /*sizes=*/{2, 5}, in test_scatter_add_out()
51 Tensor self = tf_data.zeros(sizes); in test_scatter_add_out()
52 Tensor out = tf_data.zeros(sizes); in test_scatter_add_out()
55 /*sizes=*/{2, 3}, in test_scatter_add_out()
67 sizes, in test_scatter_add_out()
79 out, tf_data.make(sizes, in test_scatter_add_out()
87 /*sizes=*/{2, 3, 3}, in test_scatter_add_out()
100 self = tf_data.ones(/*sizes=*/{2, 3, 3}); in test_scatter_add_out()
101 out = tf_data.zeros(/*sizes=*/{2, 3, 3}); in test_scatter_add_out()
[all …]
H A Dop_add_test.cpp44 const std::vector<int32_t> sizes = {2, 2}; in test_add() local
47 Tensor out = tf_out.zeros(sizes); in test_add()
51 tf_a.make(sizes, /*data=*/{1, 2, 4, 8}), in test_add()
52 tf_b.ones(sizes), in test_add()
57 EXPECT_TENSOR_EQ(out, tf_out.make(sizes, /*data=*/{2, 3, 5, 9})); in test_add()
98 const std::vector<int32_t> sizes = {2, 2}; in test_floating_point_add_out() local
101 Tensor out = tf.zeros(sizes); in test_floating_point_add_out()
105 tf.make(sizes, /*data=*/{1.25, 2.25, 4.5, 8.875}), in test_floating_point_add_out()
106 tf.ones(sizes), in test_floating_point_add_out()
113 EXPECT_TENSOR_CLOSE(out, tf.make(sizes, /*data=*/{2.5, 3.5, 5.75, 10.125})); in test_floating_point_add_out()
[all …]
H A Dop_scatter_test.cpp42 const std::vector<int32_t> sizes = {3, 5}; in test_scatter_src_out() local
45 /*sizes=*/{2, 5}, in test_scatter_src_out()
51 Tensor in = tf_data.zeros(sizes); in test_scatter_src_out()
52 Tensor out = tf_data.zeros(sizes); in test_scatter_src_out()
55 /*sizes=*/{2, 3}, in test_scatter_src_out()
67 sizes, in test_scatter_src_out()
79 out, tf_data.make(sizes, in test_scatter_src_out()
87 /*sizes=*/{2, 3, 3}, in test_scatter_src_out()
100 in = tf_data.ones(/*sizes=*/{2, 3, 3}); in test_scatter_src_out()
101 out = tf_data.zeros(/*sizes=*/{2, 3, 3}); in test_scatter_src_out()
[all …]
/aosp_15_r20/external/executorch/docs/source/
H A Dextension-tensor.md5 …main/runtime/core/portable_type/tensor.h) class doesn’t own its metadata (sizes, strides, dim_orde…
7 …e `forward()` method. You would need to declare and maintain at least the sizes array and data sep…
15 SizesType sizes[] = {2, 3};
21 std::size(sizes),
22 sizes,
30 You must ensure `sizes`, `dim_order`, `strides`, and `data` stay valid. This makes code maintenance…
47 {2, 3}, // sizes
53 …nd `dim_order` are computed automatically to default values based on the `sizes` if not specified …
67 You can create a scalar tensor, i.e. a tensor with zero dimensions or with one of the sizes being z…
87 When you provide sizes and data vectors, `TensorPtr` takes ownership of both the data and the sizes.
[all …]
/aosp_15_r20/external/pytorch/c10/core/
H A DMemoryFormat.h66 inline std::vector<T> get_channels_last_strides_2d(ArrayRef<T> sizes) { in get_channels_last_strides_2d() argument
67 std::vector<T> strides(sizes.size()); in get_channels_last_strides_2d()
68 switch (sizes.size()) { in get_channels_last_strides_2d()
71 strides[3] = sizes[1]; in get_channels_last_strides_2d()
72 strides[2] = strides[3] * sizes[3]; in get_channels_last_strides_2d()
73 strides[0] = strides[2] * sizes[2]; in get_channels_last_strides_2d()
77 strides[2] = sizes[0]; in get_channels_last_strides_2d()
78 strides[1] = strides[2] * sizes[2]; in get_channels_last_strides_2d()
82 false, "ChannelsLast2d doesn't support size ", sizes.size()); in get_channels_last_strides_2d()
86 inline std::vector<int64_t> get_channels_last_strides_2d(IntArrayRef sizes) { in get_channels_last_strides_2d() argument
[all …]
/aosp_15_r20/external/executorch/extension/tensor/
H A Dtensor_ptr_maker.h22 * properties, such as type, sizes, data pointer, dimension order, strides, and
124 std::vector<executorch::aten::SizesType> sizes, in TensorPtrMaker() argument
126 : sizes_(std::move(sizes)), data_(data), type_(type) {} in TensorPtrMaker()
132 std::vector<executorch::aten::SizesType> sizes,
148 * pointer and tensor sizes.
156 * @param sizes A vector specifying the size of each dimension.
162 std::vector<executorch::aten::SizesType> sizes,
164 return TensorPtrMaker(data, std::move(sizes), type);
168 * Creates a TensorPtr from a raw data pointer and tensor sizes, with an
177 * @param sizes A vector specifying the size of each dimension.
[all …]
/aosp_15_r20/packages/apps/Camera2/src/com/android/camera/settings/
DResolutionUtil.java80 * A resolution bucket holds a list of sizes that are of a given aspect
86 * This is a sorted list of sizes, going from largest to smallest.
88 public List<Size> sizes = new LinkedList<Size>(); field in ResolutionUtil.ResolutionBucket
90 * This is the head of the sizes array.
101 * into the sizes array and update appropriate members.
106 sizes.add(size); in add()
107 Collections.sort(sizes, new Comparator<Size>() { in add()
115 maxPixels = sizes.get(0).width() * sizes.get(0).height(); in add()
120 * Given a list of camera sizes, this uses some heuristics to decide which
121 * options to present to a user. It currently returns up to 3 sizes for each
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DNestedTensorImpl.cpp25 TORCH_INTERNAL_ASSERT(nested_sizes.sizes() == nested_strides.sizes()); in validate_nested_tensor_metadata()
74 inline std::vector<int64_t> construct_opt_sizes(const at::Tensor& sizes) { in construct_opt_sizes() argument
77 if (sizes.dim() == 0) { in construct_opt_sizes()
80 TORCH_INTERNAL_ASSERT_DEBUG_ONLY(sizes.dim() == 2); in construct_opt_sizes()
81 std::vector<int64_t> result(1, sizes.sizes()[0]); in construct_opt_sizes()
82 if (sizes.dim() > 0) { in construct_opt_sizes()
84 const int64_t* sizes_ptr = sizes.const_data_ptr<int64_t>(); in construct_opt_sizes()
85 result.resize(nested_dim + sizes.sizes()[1]); in construct_opt_sizes()
86 int64_t sizes_size_0 = sizes.sizes()[0]; in construct_opt_sizes()
87 int64_t sizes_size_1 = sizes.sizes()[1]; in construct_opt_sizes()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/api/
H A DTensor.cpp15 const std::vector<int64_t>& sizes) { in calc_contiguous_strides() argument
16 int64_t ndim = static_cast<int64_t>(sizes.size()); in calc_contiguous_strides()
22 for (int i = static_cast<int>(sizes.size()) - 2; i >= 0; --i) { in calc_contiguous_strides()
23 running_product *= sizes.at(i + 1); in calc_contiguous_strides()
32 const std::vector<int64_t>& sizes) { in calc_channels_last_strides() argument
33 std::vector<int64_t> strides(sizes.size()); in calc_channels_last_strides()
35 switch (sizes.size()) { in calc_channels_last_strides()
38 strides.at(3) = sizes.at(1); in calc_channels_last_strides()
39 strides.at(2) = strides.at(3) * sizes.at(3); in calc_channels_last_strides()
40 strides.at(0) = strides.at(2) * sizes.at(2); in calc_channels_last_strides()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dcuda_apply_test.cpp22 int sizes[] = {4, 4}; in TEST() local
24 ::at::cuda::detail::TensorInfo<void, int> ti{nullptr, 2, sizes, strides}; in TEST()
27 ASSERT_EQ_CUDA(ti.sizes[0], (4 * 4)); in TEST()
34 int sizes[] = {6, 3, 7}; in TEST() local
36 ::at::cuda::detail::TensorInfo<void, int> ti{nullptr, 3, sizes, strides}; in TEST()
39 ASSERT_EQ_CUDA(ti.sizes[0], (6 * 3 * 7)); in TEST()
45 int sizes[] = {4, 3, 2}; in TEST() local
47 ::at::cuda::detail::TensorInfo<void, int> ti{nullptr, 3, sizes, strides}; in TEST()
50 ASSERT_EQ_CUDA(ti.sizes[0], (4 * 3)); in TEST()
51 ASSERT_EQ_CUDA(ti.sizes[1], 2); in TEST()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/proto/
H A Dproto_op_test_base.py55 test_case.sizes.append(0)
60 test_case.sizes.append(0)
65 test_case.sizes.append(0)
70 test_case.sizes.append(0)
75 test_case.sizes.append(0)
80 test_case.sizes.append(0)
85 test_case.sizes.append(0)
90 test_case.sizes.append(0)
95 test_case.sizes.append(0)
100 test_case.sizes.append(0)
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/core/
H A Dtensor_type.cpp14 bool possible_cross_dimension_overlap(c10::IntArrayRef sizes, c10::IntArrayRef strides) { in possible_cross_dimension_overlap() argument
15 int n_dim = static_cast<int>(sizes.size()); in possible_cross_dimension_overlap()
33 …if (sizes[stride_indices[i]] != 1 && strides[stride_indices[i]] < sizes[stride_indices[i-1]] * str… in possible_cross_dimension_overlap()
106 auto sizes = ss.sizes().value(); in operator <<() local
113 if(sizes[i].is_static()) { in operator <<()
114 os << sizes[i]; in operator <<()
151 at::IntArrayRef sizes, in computeStrideProps() argument
154 int n_dim = static_cast<int>(sizes.size()); in computeStrideProps()
157 // 1. input sizes/strides fails format check; in computeStrideProps()
165 // sizes: [8, 1, 10, 16] in computeStrideProps()
[all …]
/aosp_15_r20/external/skia/src/core/
H A DSkVertices.cpp41 struct SkVertices::Sizes { struct in SkVertices
42 Sizes(const Desc& desc) { in Sizes() argument
111 Sizes sizes(desc); in init() local
112 if (!sizes.isValid()) { in init()
117 void* storage = ::operator new (sizes.fTotal); in init()
118 if (sizes.fBuilderTriFanISize) { in init()
119 fIntermediateFanIndices.reset(new uint8_t[sizes.fBuilderTriFanISize]); in init()
134 fVertices->fPositions = (SkPoint*) advance(sizes.fVSize); in init()
135 fVertices->fTexs = (SkPoint*) advance(sizes.fTSize); in init()
136 fVertices->fColors = (SkColor*) advance(sizes.fCSize); in init()
[all …]

12345678910>>...442