/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | ReplicationPadding.cpp | 181 Tensor& gradInput, in replication_pad2d_backward_out_cpu_template() 220 Tensor& gradInput, in replication_pad3d_backward_out_cpu_template() 300 Tensor& gradInput) in replication_pad2d_backward_out_cpu() 314 auto gradInput = at::zeros_like(input, input.suggest_memory_format()); in replication_pad2d_backward_cpu() local 332 Tensor& gradInput) in replication_pad3d_backward_out_cpu() 346 auto gradInput = at::zeros_like(input, input.suggest_memory_format()); in replication_pad3d_backward_cpu() local
|
H A D | FractionalMaxPool3d.cpp | 263 scalar_t* gradInput, in fractional_max_pool3d_backward_out_single_batch_frame() 295 scalar_t* gradInput, in fractional_max_pool3d_backward_out_frame() 329 Tensor& gradInput, in fractional_max_pool3d_backward_out_cpu_template() 400 at::Tensor& gradInput) { in fractional_max_pool3d_backward_out_cpu() 417 Tensor gradInput = at::empty({0}, input.options()); in fractional_max_pool3d_backward_cpu() local
|
H A D | DilatedMaxPool3d.cpp | 113 Tensor& gradInput, in max_pool3d_with_indices_backward_out_cpu_template() 258 Tensor& gradInput) in max_pool3d_with_indices_backward_out_cpu() 283 auto gradInput = at::empty({0}, input.options()); in max_pool3d_with_indices_backward_cpu() local
|
H A D | AdaptiveAveragePooling3d.cpp | 231 Tensor& gradInput, in adaptive_avg_pool3d_backward_out_cpu_template() 334 Tensor& gradInput) { in adaptive_avg_pool3d_backward_out_cpu() 342 auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in adaptive_avg_pool3d_backward_cpu() local
|
H A D | FractionalMaxPool2d.cpp | 224 scalar_t* gradInput, in fractional_max_pool2d_backward_out_single_batch_frame() 251 scalar_t* gradInput, in fractional_max_pool2d_backward_out_frame()
|
H A D | Activation.cpp | 101 const Tensor& gradInput = maybe_get_output(); in TORCH_META_FUNC() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | ReplicationPadding.cu | 66 PackedTensorAccessor64<scalar_t, 3> gradInput, in replication_pad_backward_kernel() 119 PackedTensorAccessor64<scalar_t, 4> gradInput, in replication_pad_backward_kernel() 187 PackedTensorAccessor64<scalar_t, 5> gradInput, in replication_pad_backward_kernel() 229 Tensor& gradInput, in replication_pad2d_backward_out_cuda_template() 362 Tensor& gradInput, in replication_pad3d_backward_out_cuda_template() 577 Tensor& gradInput) in replication_pad2d_backward_out_cuda() 595 auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in replication_pad2d_backward_cuda() local 674 Tensor& gradInput) in replication_pad3d_backward_out_cuda() 692 auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in replication_pad3d_backward_cuda() local
|
H A D | FractionalMaxPool3d.cu | 122 PackedTensorAccessor64<scalar_t, 5> gradInput, in fractional_max_pool3d_backward_out_frame() 156 Tensor& gradInput, in fractional_max_pool3d_backward_out_cuda_template() 314 at::Tensor& gradInput) { in fractional_max_pool3d_backward_out_cuda() 337 Tensor gradInput = at::empty({0}, input.options()); in fractional_max_pool3d_backward_cuda() local
|
H A D | AdaptiveAveragePooling3d.cu | 154 scalar_t *gradInput, const scalar_t *gradOutput, in adaptiveaveragegradinput() 252 scalar_t *gradInput, const scalar_t *gradOutput, in atomicadaptiveaveragegradinput() 423 Tensor& gradInput, in adaptive_avg_pool3d_backward_out_cuda_template() 526 Tensor& gradInput) { in adaptive_avg_pool3d_backward_out_cuda() 540 auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in adaptive_avg_pool3d_backward_cuda() local
|
H A D | AdaptiveAveragePooling.cu | 114 T *gradInput, const T *gradOutput, in adaptive_average_gradinput() 170 T *gradInput, const T *gradOutput, in atomic_adaptive_average_gradinput() 602 Tensor& gradInput, in adaptive_avg_pool2d_backward_out_cuda_template() 787 Tensor& gradInput, in adaptive_avg_pool2d_backward_out_cuda() 809 auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); in adaptive_avg_pool2d_backward_cuda() local
|
H A D | AveragePool3d.cu | 173 PackedTensorAccessor64<scalar_t, 4> gradInput, in avg_pool3d_single_backward_out_frame_stride1() 218 PackedTensorAccessor64<scalar_t, 4> gradInput, in avg_pool3d_cuda_update_grad_input_atomic() 277 PackedTensorAccessor64<scalar_t, 4> gradInput, in avg_pool3d_cuda_update_grad_input()
|
H A D | DilatedMaxPool3d.cu | 412 Tensor& gradInput, in max_pool3d_with_indices_backward_out_cuda_template() 607 Tensor& gradInput) in max_pool3d_with_indices_backward_out_cuda() 638 auto gradInput = at::empty(input.sizes(), input.options()); in max_pool3d_with_indices_backward_cuda() local
|
H A D | SoftMax.cu | 314 scalar_t *gradInput, const outscalar_t *output, const outscalar_t *gradOutput, in cunn_SpatialSoftMaxBackward() 557 scalar_t *gradInput, in WriteBpropResultsVectorized() 630 scalar_t *gradInput, in WriteBpropResults() 775 cunn_SoftMaxBackward(scalar_t *gradInput, const outscalar_t *output, const outscalar_t *gradOutput,… in cunn_SoftMaxBackward()
|
H A D | AdaptiveMaxPooling2d.cu | 112 __global__ void adaptivemaxgradinput(T *gradInput, const T *gradOutput, const int64_t *indices, in adaptivemaxgradinput() 160 T *gradInput, const T *gradOutput, const int64_t *indices, in atomicadaptivemaxgradinput()
|
H A D | AdaptiveMaxPooling3d.cu | 161 T *gradInput, const T *gradOutput, const int64_t *indices, in adaptivemaxgradinput() 237 T *gradInput, const T *gradOutput, const int64_t *indices, in atomicadaptivemaxgradinput()
|
H A D | FractionalMaxPool2d.cu | 105 PackedTensorAccessor<scalar_t, 4> gradInput, in fractional_max_pool2d_backward_out_cuda_frame()
|
H A D | PersistentSoftmax.cuh | 215 __global__ void softmax_warp_backward(output_t *gradInput, const input_t *grad, const input_t *outp… in softmax_warp_backward()
|
H A D | MultiMarginLoss.cu | 70 scalar_t *gradInput, const scalar_t *gradOutput, const scalar_t *input, const int64_t *target, in MultiMarginLoss_backward_kernel()
|
H A D | MaxUnpooling.cu | 101 PackedTensorAccessor64<T, 4> gradInput, in max_unpooling3d_backward_kernel()
|