xref: /aosp_15_r20/external/pytorch/test/cpp_extensions/cuda_extension.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 // NOTE: This is a copy of cuda_extension_kernel.cu. It's kept here to test
2 // collision handling when a C++ file and CUDA file share the same filename.
3 // Setuptools can't deal with this at all, so the setup.py-based test uses
4 // cuda_extension_kernel.cu and the JIT test uses this file. Symlinks don't
5 // work well on Windows, so this is the most thorough solution right now.
6 
7 #include <cuda.h>
8 #include <cuda_runtime.h>
9 #include <c10/cuda/CUDAException.h>
10 
11 #include <ATen/ATen.h>
12 
sigmoid_add_kernel(const float * __restrict__ x,const float * __restrict__ y,float * __restrict__ output,const int size)13 __global__ void sigmoid_add_kernel(
14     const float* __restrict__ x,
15     const float* __restrict__ y,
16     float* __restrict__ output,
17     const int size) {
18   const int index = blockIdx.x * blockDim.x + threadIdx.x;
19   if (index < size) {
20     const float sigmoid_x = 1.0f / (1.0f + __expf(-x[index]));
21     const float sigmoid_y = 1.0f / (1.0f + __expf(-y[index]));
22     output[index] = sigmoid_x + sigmoid_y;
23   }
24 }
25 
sigmoid_add_cuda(const float * x,const float * y,float * output,int size)26 void sigmoid_add_cuda(const float* x, const float* y, float* output, int size) {
27   const int threads = 1024;
28   const int blocks = (size + threads - 1) / threads;
29   sigmoid_add_kernel<<<blocks, threads>>>(x, y, output, size);
30   C10_CUDA_KERNEL_LAUNCH_CHECK();
31 }
32