Home
last modified time | relevance | path

Searched full:contiguous (Results 1 – 25 of 5836) sorted by relevance

12345678910>>...234

/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dmemory_format_test.cpp13 for (auto memory_format : {at::MemoryFormat::ChannelsLast, at::MemoryFormat::Contiguous}) { in TEST()
20 EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); in TEST()
23 // Ambiguous case where we fallback to Contiguous; in TEST()
25 EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); in TEST()
30 EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); in TEST()
81 sliceStepTwo(t, 1, MemoryFormat::Contiguous); in TEST()
82 sliceStepTwo(t, 2, MemoryFormat::Contiguous); in TEST()
83 sliceStepTwo(t, 3, MemoryFormat::Contiguous); in TEST()
86 sliceStepTwo(t, 2, MemoryFormat::Contiguous); in TEST()
87 sliceStepTwo(t, 3, MemoryFormat::Contiguous); in TEST()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DBucketizationUtils.h15 // original values given by raw_*. If an original value is not contiguous, will make a contiguous c…
19 // corresponding raw_* version should be used since it was already contiguous of the right type.
32 …TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the p… in searchsorted_maybe_trim_input_tensors()
33 …"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous inp… in searchsorted_maybe_trim_input_tensors()
35 trimmed_input = raw_input.contiguous(); in searchsorted_maybe_trim_input_tensors()
38 …TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the perf… in searchsorted_maybe_trim_input_tensors()
39 …"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous bou… in searchsorted_maybe_trim_input_tensors()
41 trimmed_boundaries = raw_boundaries.contiguous(); in searchsorted_maybe_trim_input_tensors()
44 …TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the perfor… in searchsorted_maybe_trim_input_tensors()
45 …"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sor… in searchsorted_maybe_trim_input_tensors()
[all …]
H A DWeightNorm.cpp32 // I assume tensor.contiguous(), view(), norm(), etc. here will dispatch through VariableType. in norm_except_dim()
38 return v.contiguous().view({v.size(0), -1}).norm(pow, 1).view(output_size); in norm_except_dim()
42 return v.contiguous().view({-1, v.size(v.dim() - 1)}).norm(pow, 0).view(output_size); in norm_except_dim()
54 auto w = at::empty_like(v, at::MemoryFormat::Contiguous); in weight_norm_cpu()
71 TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous"); in weight_norm_backward_cpu()
72 TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous"); in weight_norm_backward_cpu()
73 TORCH_CHECK(saved_norm.is_contiguous(), "saved_norm must be contiguous"); in weight_norm_backward_cpu()
75 auto grad_v = at::empty_like(saved_v, at::MemoryFormat::Contiguous); in weight_norm_backward_cpu()
76 auto grad_g = at::empty_like(saved_g, at::MemoryFormat::Contiguous); in weight_norm_backward_cpu()
93 auto v = v_in.contiguous(); in _weight_norm()
[all …]
H A DNaiveDilatedConvolution.cpp536 … memory_format = use_channels_last ? at::MemoryFormat::ChannelsLast : at::MemoryFormat::Contiguous; in slow_conv_dilated2d_cpu()
556 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cpu()
557 const Tensor weight_ = weight.contiguous(memory_format); in slow_conv_dilated2d_cpu()
558 const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); in slow_conv_dilated2d_cpu()
608 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cpu()
609 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated3d_cpu()
610 const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); in slow_conv_dilated3d_cpu()
640 … memory_format = use_channels_last ? at::MemoryFormat::ChannelsLast : at::MemoryFormat::Contiguous; in slow_conv_dilated2d_backward_cpu()
657 (is_batch ? grad_output.contiguous(memory_format) in slow_conv_dilated2d_backward_cpu()
658 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu()
[all …]
H A DConvolution.cpp444 if (cudnn_conv_suggest_memory_format(input, weight) == at::MemoryFormat::Contiguous) { in use_cudnn()
462 …if (cudnn_conv_suggest_memory_format(input, weight) != at::MemoryFormat::Contiguous && use_cudnn(i… in use_cudnn_depthwise()
473 …input.ndimension() == 4 && // TODO: 5-D contiguous depthwise is not supported yet, need benchmar… in use_cudnn_depthwise()
487 …input.ndimension() == 4 && // TODO: 5-D contiguous depthwise is not supported yet, need benchmar… in use_cudnn_depthwise()
832 return tensor.narrow(dim, n * g, n).contiguous(memory_format); in subtensor()
1341 input = input.contiguous(); in select_conv_backend()
1416 at::MemoryFormat backend_memory_format = at::MemoryFormat::Contiguous; in determine_backend_memory_format()
1433 …backend_memory_format = (k == 5) ? at::MemoryFormat::Contiguous /*at::MemoryFormat::ChannelsLast3d… in determine_backend_memory_format()
1455 backend_memory_format = at::MemoryFormat::Contiguous; in determine_backend_memory_format()
1505 input = input.contiguous(); in _convolution()
[all …]
H A DSegmentReduce.cpp136 // data and lengths should be contiguous from the call to .contiguous in segment_reduce_kernel in _segment_reduce_lengths_cpu_kernel()
137 TORCH_CHECK(data.is_contiguous(), "Expected data to be contiguous."); in _segment_reduce_lengths_cpu_kernel()
138 TORCH_CHECK(lengths.is_contiguous(), "Expected lengths to be contiguous."); in _segment_reduce_lengths_cpu_kernel()
162 // data and lengths should be contiguous from the call to .contiguous in segment_reduce_kernel in _segment_reduce_offsets_cpu_kernel()
163 TORCH_CHECK(data.is_contiguous(), "Expected data to be contiguous."); in _segment_reduce_offsets_cpu_kernel()
164 TORCH_CHECK(offsets.is_contiguous(), "Expected offsets to be contiguous."); in _segment_reduce_offsets_cpu_kernel()
409 const auto data_contig = data.contiguous(); in segment_reduce_kernel()
422 const auto offsets_contig = offsets_value.contiguous(); in segment_reduce_kernel()
449 const auto lengths_contig = lengths_value.contiguous(); in segment_reduce_kernel()
501 const auto grad_contig = grad.contiguous(); in _segment_reduce_backward_kernel()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DAdaptiveMaxPoolKernel.cpp23 auto input = input_.contiguous(); in cpu_adaptive_max_pool2d()
24 auto output = output_.contiguous(); in cpu_adaptive_max_pool2d()
25 auto indices = indices_.contiguous(); in cpu_adaptive_max_pool2d()
94 auto input = input_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
95 auto output = output_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
96 auto indices = indices_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
211 auto input = input_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
212 auto output = output_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
213 auto indices = indices_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
346 auto grad_output = grad_output_.contiguous(); in cpu_adaptive_max_pool2d_backward()
[all …]
H A DPaddingKernel.cpp136 auto input = input_.contiguous(); in cpu_padding()
137 auto output = output_.contiguous(); in cpu_padding()
243 auto input = input_.contiguous(memory_format); in cpu_padding_channels_last()
244 auto output = output_.contiguous(memory_format); in cpu_padding_channels_last()
317 auto grad_output = grad_output_.contiguous(); in cpu_padding_backward()
318 auto grad_input = grad_input_.contiguous(); in cpu_padding_backward()
405 auto grad_input = grad_input_.contiguous(memory_format); in cpu_padding_backward_channels_last()
406 auto grad_output = grad_output_.contiguous(memory_format); in cpu_padding_backward_channels_last()
476 // non-batch mode 4d input will be considered as Contiguous in format of CDHW
478 return input.dim() == 4 ? at::MemoryFormat::Contiguous : input.suggest_memory_format(); in padding_memory_format_3d()
[all …]
H A DAdaptiveAvgPoolKernel.cpp22 auto input = input_.contiguous(); in cpu_adaptive_avg_pool2d()
23 auto output = output_.contiguous(); in cpu_adaptive_avg_pool2d()
77 auto input = input_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
78 auto output = output_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
164 auto input = input_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
165 auto output = output_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
260 auto grad_output = grad_output_.contiguous(); in cpu_adaptive_avg_pool2d_backward()
261 auto grad_input = grad_input_.contiguous(); in cpu_adaptive_avg_pool2d_backward()
311 auto grad_input = grad_input_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_backward_channels_last()
312 auto grad_output = grad_output_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_backward_channels_last()
[all …]
H A DMaxPoolKernel.cpp261 auto input = input_.contiguous(); in cpu_max_pool()
262 auto output = output_.contiguous(); in cpu_max_pool()
263 auto indices = indices_.contiguous(); in cpu_max_pool()
391 auto input = input_.contiguous(memory_format); in cpu_max_pool_channels_last()
392 auto output = output_.contiguous(memory_format); in cpu_max_pool_channels_last()
393 auto indices = indices_.contiguous(memory_format); in cpu_max_pool_channels_last()
477 auto grad_output = grad_output_.contiguous(); in cpu_max_pool_backward()
478 auto indices = indices_.contiguous(); in cpu_max_pool_backward()
479 auto grad_input = grad_input_.contiguous(); in cpu_max_pool_backward()
550 auto grad_input = grad_input_.contiguous(memory_format); in cpu_max_pool_backward_channels_last()
[all …]
H A DAvgPoolKernel.cpp27 auto input = input_.contiguous(); in cpu_avg_pool2d()
28 auto output = output_.contiguous(); in cpu_avg_pool2d()
115 auto input = input_.contiguous(memory_format); in cpu_avg_pool2d_channels_last()
116 auto output = output_.contiguous(memory_format); in cpu_avg_pool2d_channels_last()
229 auto input = input_.contiguous(memory_format); in cpu_avg_pool2d_channels_last()
230 auto output = output_.contiguous(memory_format); in cpu_avg_pool2d_channels_last()
357 auto grad_output = grad_output_.contiguous(); in cpu_avg_pool2d_backward()
358 auto grad_input = grad_input_.contiguous(); in cpu_avg_pool2d_backward()
426 auto grad_input = grad_input_.contiguous(memory_format); in cpu_avg_pool2d_backward_channels_last()
427 auto grad_output = grad_output_.contiguous(memory_format); in cpu_avg_pool2d_backward_channels_last()
[all …]
/aosp_15_r20/external/executorch/backends/example/example_backend_delegate_passes/
H A Dpermute_memory_formats_pass.py22 after pass: x -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> out
25 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
28 …-> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> linear -> to_dim…
53 … the pattern is conv, x -> conv -> out will become x -> conv -> to_dim(contiguous) -> out when per…
54 …conv -> conv -> out, it will become x -> conv -> to_dim(contiguous) -> conv -> to_dim(contiguous) …
59 … # like, x -> conv -> out will become x -> conv -> to_dim(contiguous) -> out
77 … # like, x -> conv -> conv -> out will become x -> conv -> to_dim(contiguous) -> conv -> out
103 …tern is conv, x -> conv -> to_dim(contiguous) -> out will become x -> to_dim(channel_last) -> conv…
104contiguous) -> conv -> to_dim(contiguous) -> out, it will become x -> to_dim(channel_last) -> conv…
H A Dmerge_to_dim_pass.py19 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
20 after pass: x -> to_dim(channel_last) -> conv -> conv -> to_dim_(contiguous) -> out
23 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
24 … |-------------> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> out
25 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
26 … |--------------> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> out
29 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
30 y -> to_dim(channel_last) -> conv -> to_dim_(contiguous) ---------|
31 after pass: x -> to_dim(channel_last) -> conv -> conv -> to_dim_(contiguous) -> out
/aosp_15_r20/external/cronet/third_party/rust/chromium_crates_io/vendor/bytemuck-1.15.0/src/
H A Dcontiguous.rs20 /// # use bytemuck::Contiguous;
30 /// unsafe impl Contiguous for Foo {
48 /// Precisely, the guarantees you must uphold when implementing `Contiguous` for
65 /// gets a `C` that implements `Contiguous`, it is in the appropriate range.
68 /// `Contiguous::from_integer` and `Contiguous::into_integer`.
78 pub unsafe trait Contiguous: Copy + 'static { trait
82 /// Contiguous is broadly intended for use with fieldless enums, and for
85 /// *unsound* to implement `Contiguous`!).
109 /// `Contiguous` on your type you **must not** override this method.
113 /// We will not panic for any correct implementation of `Contiguous`, but
[all …]
/aosp_15_r20/external/rust/android-crates-io/crates/bytemuck/src/
Dcontiguous.rs22 /// # use bytemuck::Contiguous;
32 /// unsafe impl Contiguous for Foo {
50 /// Precisely, the guarantees you must uphold when implementing `Contiguous` for
67 /// gets a `C` that implements `Contiguous`, it is in the appropriate range.
70 /// `Contiguous::from_integer` and `Contiguous::into_integer`.
80 pub unsafe trait Contiguous: Copy + 'static { trait
84 /// Contiguous is broadly intended for use with fieldless enums, and for
87 /// *unsound* to implement `Contiguous`!).
111 /// `Contiguous` on your type you **must not** override this method.
115 /// We will not panic for any correct implementation of `Contiguous`, but
[all …]
/aosp_15_r20/external/python/cpython3/Modules/clinic/
Daudioop.c.h33 _PyArg_BadArgument("getsample", "argument 1", "contiguous buffer", args[0]); in audioop_getsample()
89 _PyArg_BadArgument("max", "argument 1", "contiguous buffer", args[0]); in audioop_max()
133 _PyArg_BadArgument("minmax", "argument 1", "contiguous buffer", args[0]); in audioop_minmax()
177 _PyArg_BadArgument("avg", "argument 1", "contiguous buffer", args[0]); in audioop_avg()
221 _PyArg_BadArgument("rms", "argument 1", "contiguous buffer", args[0]); in audioop_rms()
266 _PyArg_BadArgument("findfit", "argument 1", "contiguous buffer", args[0]); in audioop_findfit()
273 _PyArg_BadArgument("findfit", "argument 2", "contiguous buffer", args[1]); in audioop_findfit()
318 _PyArg_BadArgument("findfactor", "argument 1", "contiguous buffer", args[0]); in audioop_findfactor()
325 _PyArg_BadArgument("findfactor", "argument 2", "contiguous buffer", args[1]); in audioop_findfactor()
370 _PyArg_BadArgument("findmax", "argument 1", "contiguous buffer", args[0]); in audioop_findmax()
[all …]
/aosp_15_r20/external/executorch/runtime/core/exec_aten/testing_util/
H A Dtensor_factory.h30 * sizes, assuming contiguous data.
70 // contiguous style, in where the strides should be sorted from high to low. in check_strides()
247 * or not specificed, the function will return a contiguous tensor based
295 * out. If empty or not specificed, the function will use a contiguous dim
348 * Given data in contiguous memory format, returns a new Tensor with the
352 * @param[in] data The data in contiguous memory format that the Tensor should
378 "Input tensor is not contiguous");
414 * Returns a new Tensor with the specified shape, containing contiguous
432 * contiguous data will all elements set to `value`.
449 * Returns a new Tensor with the specified shape, containing contiguous data
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DNaiveDilatedConvolution.cu428 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cuda()
429 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated2d_cuda()
430 const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); in slow_conv_dilated2d_cuda()
474 (is_batch ? grad_output.contiguous() in slow_conv_dilated2d_backward_cuda()
475 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
477 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
478 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated2d_backward_cuda()
534 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cuda()
535 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated3d_cuda()
536 const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); in slow_conv_dilated3d_cuda()
[all …]
H A DCUDAJitLoops.cuh86 bool contiguous, in launch_jitted_unrolled_kernel() argument
101 desc, contiguous, dynamic_casting, scalar_pos); in launch_jitted_unrolled_kernel()
144 desc, /*contiguous=*/true, /*dynamic_casting=*/false, in launch_jitted_vectorized_kernel()
196 bool contiguous = iter.is_contiguous(); in jitted_gpu_kernel_generic() local
200 // - Case 1: no dynamic casting and contiguous in jitted_gpu_kernel_generic()
202 // - Case 3: dynamic casting and contiguous in jitted_gpu_kernel_generic()
207 if (contiguous) { in jitted_gpu_kernel_generic()
208 // Case 1: no dynamic casting and contiguous in jitted_gpu_kernel_generic()
223 storer, contiguous, scalar_pos, scalar_val, extra_args); in jitted_gpu_kernel_generic()
236 if (contiguous) { in jitted_gpu_kernel_generic()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/cuda/
H A DNestedTensorTransformerUtils.cpp77 * needing to call contiguous on the nested tensor input.
83 * @return A boolean indicating of contiguous needs to be called for input
161 * (1) get the storage of the contiguous nested tensor
306 q_t = q_t.contiguous(); in sdpa_nested_preprocessing_with_broadcast()
326 // to call contiguous in sdpa_nested_preprocessing_with_broadcast()
329 k_t = k_t.contiguous(); in sdpa_nested_preprocessing_with_broadcast()
332 v_t = v_t.contiguous(); in sdpa_nested_preprocessing_with_broadcast()
426 // to call contiguous in sdpa_nested_preprocessing()
428 q_t = q_t.contiguous(); in sdpa_nested_preprocessing()
431 k_t = k_t.contiguous(); in sdpa_nested_preprocessing()
[all …]
/aosp_15_r20/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/AArch64/
H A DAArch64ExpandImm.cpp104 /// starts a contiguous sequence of ones if we look at the bits from the LSB
114 /// ends a contiguous sequence of ones if we look at the bits from the LSB
137 /// Check whether the constant contains a sequence of contiguous ones,
139 /// sequence of contiguous ones with an ORR instruction.
148 /// We are also looking for constants like |S|A|B|E| where the contiguous
157 // Try to find the chunks which start/end a contiguous sequence of ones. in trySequenceOfOnes()
173 // Outside of the contiguous sequence of ones everything needs to be zero. in trySequenceOfOnes()
178 // If our contiguous sequence of ones wraps around from the MSB into the LSB, in trySequenceOfOnes()
179 // just swap indices and pretend we are materializing a contiguous sequence in trySequenceOfOnes()
180 // of zeros surrounded by a contiguous sequence of ones. in trySequenceOfOnes()
[all …]
/aosp_15_r20/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/
H A DAArch64ExpandImm.cpp106 /// starts a contiguous sequence of ones if we look at the bits from the LSB
116 /// ends a contiguous sequence of ones if we look at the bits from the LSB
139 /// Check whether the constant contains a sequence of contiguous ones,
141 /// sequence of contiguous ones with an ORR instruction.
150 /// We are also looking for constants like |S|A|B|E| where the contiguous
159 // Try to find the chunks which start/end a contiguous sequence of ones. in trySequenceOfOnes()
175 // Outside of the contiguous sequence of ones everything needs to be zero. in trySequenceOfOnes()
180 // If our contiguous sequence of ones wraps around from the MSB into the LSB, in trySequenceOfOnes()
181 // just swap indices and pretend we are materializing a contiguous sequence in trySequenceOfOnes()
182 // of zeros surrounded by a contiguous sequence of ones. in trySequenceOfOnes()
[all …]
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_pooling.py223 input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
227 ref_input = input.detach().clone().contiguous().requires_grad_(True)
228 ref_grad = grad.detach().clone().contiguous()
248 input = input.contiguous(memory_format=torch.channels_last)
254 ref_input = input.detach().clone().contiguous().requires_grad_(True)
255 ref_grad = grad.detach().clone().contiguous()
319 input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
324 ref_input = input.detach().clone().contiguous().requires_grad_(True)
325 ref_grad = grad.detach().clone().contiguous()
344 input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/tensor/
H A Dplacement_types.py75 contiguous: bool = True,
97 if contiguous:
98 tensor_list = [t.contiguous() for t in tensor_list]
124 shard = shard.contiguous() if contiguous else shard
171 tensor, num_chunks, with_padding=True, contiguous=True
204 tensor, num_chunks, with_padding=True, contiguous=True
208 tensor = tensor.contiguous()
242 local_tensor = local_tensor.contiguous()
270 contiguous=False,
314 local_tensor = local_tensor.contiguous()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/cuda/
H A Djiterator.cu60 /*contiguous=*/true, /*dynamic_casting=*/false, in launch_jitted_vectorized_kernel_dynamic()
121 void* ic_ptr, void* oc_ptr, void* l_ptr, void* s_ptr, bool contiguous, bool dynamic_casting, in launch_jitted_unrolled_kernel_dynamic() argument
140 ss << contiguous << dynamic_casting; in launch_jitted_unrolled_kernel_dynamic()
155 contiguous, dynamic_casting, in launch_jitted_unrolled_kernel_dynamic()
200 bool contiguous = iter.is_contiguous(); in jitted_gpu_kernel_dynamic_impl() local
204 // - Case 1: no dynamic casting and contiguous in jitted_gpu_kernel_dynamic_impl()
206 // - Case 3: dynamic casting and contiguous in jitted_gpu_kernel_dynamic_impl()
211 if (contiguous) { in jitted_gpu_kernel_dynamic_impl()
212 // Case 1: no dynamic casting and contiguous in jitted_gpu_kernel_dynamic_impl()
231 ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref); in jitted_gpu_kernel_dynamic_impl()
[all …]

12345678910>>...234