/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | PixelShuffleKernel.cpp | 34 int64_t stride_c = S * S * height * width; in cpu_pixel_shuffle() local 132 int64_t stride_c = height * S * width * S; in cpu_pixel_unshuffle() local 178 int64_t stride_c = 1; in cpu_pixel_unshuffle_channels_last() local
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_pixel_unshuffle.cpp | 36 const auto stride_c = S * S * height * width; in pixel_unshuffle_impl() local
|
H A D | op_pixel_shuffle.cpp | 33 const auto stride_c = S * S * height * width; in pixel_shuffle_impl() local
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/stream_executor/ |
H A D | stream.h | 923 int64_t stride_c, int batch_count, blas::ComputationType computation_type, in ThenBlasGemmStridedBatchedWithAlgorithm() 1038 int64_t stride_c, int batch_count) { in ThenBlasGemmStridedBatched()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | depthwise_conv_grad_op.cc | 571 const int64_t stride_c = GetTensorDim(strides_, data_format_, 'C'); in DepthwiseConv2dNativeBackpropInputOp() local 1074 const int64_t stride_c = GetTensorDim(strides_, data_format_, 'C'); in DepthwiseConv2dNativeBackpropFilterOp() local
|
H A D | depthwise_conv_op.cc | 289 const int64_t stride_c = GetTensorDim(strides_, data_format_, 'C'); in DepthwiseConv2dNativeOp() local
|
H A D | conv_ops_using_gemm.cc | 447 const int64_t stride_c = GetTensorDim(strides_, data_format_, 'C'); in Conv2DUsingGemmOp() local
|
H A D | pooling_ops_3d.cc | 696 const int32_t stride_c = GetTensorDim(stride_, data_format_, 'C'); in MaxPooling3dGradGradOp() local
|
H A D | conv_grad_filter_ops.cc | 275 int stride_c = GetTensorDim(strides_, data_format_, 'C'); in Conv2DBackpropFilterOp() local
|
H A D | conv_ops_fused_image_transform.cc | 639 const int64_t stride_c = GetTensorDim(strides_, FORMAT_NHWC, 'C'); in FusedResizeConv2DUsingGemmOp() local
|
H A D | conv_ops.cc | 517 const int64_t stride_c = GetTensorDim(strides, data_format, 'C'); in InitConv2DParameters() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/mkl/ |
H A D | mkl_conv_ops.h | 625 int stride_c = GetTensorDim(strides_, data_format_, 'C'); in MklConvBackpropCommonOp() local
|
H A D | mkl_conv_ops.cc | 576 const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); in MklConvOp() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/ |
H A D | eval_const_tensor.cc | 139 InferenceContext* stride_c = refiner.GetContext(stride_node); in TryToInferTensorOutputFromStridedSliceNode() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/cuda/tunable/ |
H A D | GemmHipblaslt.h | 436 int64_t stride_c = GetStrideCFromParams<CT>(params); in Call() local
|
H A D | GemmCommon.h | 237 int64_t stride_c; member
|
/aosp_15_r20/external/tensorflow/tensorflow/stream_executor/rocm/ |
H A D | rocm_blas.cc | 645 DeviceMemoryBase *c, blas::DataType type_c, int ldc, int64_t stride_c, in DoBlasGemmStridedBatchedWithAlgorithm() 1074 DeviceMemoryBase *c, int ldc, int64_t stride_c, int batch_count) { in DoBlasGemmStridedBatched()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/stream_executor/cuda/ |
H A D | cuda_blas.cc | 1049 DeviceMemoryBase *c, blas::DataType type_c, int ldc, int64_t stride_c, in DoBlasInternalImpl() 1412 DeviceMemoryBase *c, int ldc, int64_t stride_c, int batch_count) { in DoBlasInternalImpl()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
H A D | legacy_optimized_ops.h | 2490 int stride_c = n; in Conv() local
|
H A D | optimized_ops.h | 970 int stride_c = n; in Conv() local
|