xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/UnarySignKernels.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_NO_OPERATORS
2 #include <ATen/native/UnaryOps.h>
3 #include <ATen/native/cuda/Loops.cuh>
4 #include <ATen/native/cuda/JitLoops.cuh>
5 #include <ATen/AccumulateType.h>
6 #include <ATen/Dispatch.h>
7 #include <ATen/native/DispatchStub.h>
8 #include <ATen/native/TensorIterator.h>
9 #include <ATen/native/cuda/Math.cuh>
10 #include <c10/util/TypeSafeSignMath.h>
11 #include <ATen/OpMathType.h>
12 
13 #include <type_traits>
14 
15 namespace at::native {
16 
logical_not_kernel_cuda(TensorIteratorBase & iter)17 void logical_not_kernel_cuda(TensorIteratorBase& iter) {
18   // error check -- this is just ensuring we don't dispatch on types that aren't in ALL_TYPES_AND_COMPLEX_AND3(...)
19   // so we don't have to maintain a separate list or to do double dispatch.
20   AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kHalf, kBFloat16, iter.dtype(0), "logical_not_cuda", [&]() {});
21 
22   AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kHalf, kBFloat16, iter.dtype(1), "logical_not_cuda", [&]() {
23     gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return !a; });
24   });
25 }
26 
27 // NB: Ignores the negative bit on tensors
28 CONSTEXPR_EXCEPT_WIN_CUDA char neg_name[] = "neg_kernel";
neg_kernel_cuda(TensorIteratorBase & iter)29 void neg_kernel_cuda(TensorIteratorBase& iter) {
30   auto dtype = iter.dtype();
31   if (at::isComplexType(dtype)) {
32 #if AT_USE_JITERATOR()
33   static const auto neg_string = jiterator_stringify(
34       template <typename T>
35       T neg_kernel(T a) {
36         return -a;
37       }
38   ); // neg_string
39   AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "neg_cuda", [&]() {
40       jitted_gpu_kernel<
41         /*name=*/ neg_name,
42         /*return_dtype=*/ scalar_t,
43         /*common_dtype=*/ scalar_t,
44         /*arity=*/ 1>(iter, neg_string);
45   });
46 #else
47   AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "neg_cuda", [&]() {
48       gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
49         return -a;
50       });
51   });
52 #endif
53   } else {
54   AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, dtype, "neg_cuda", [&]() {
55     gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
56       return -a;
57     });
58   });
59   }
60 }
61 
sign_kernel_cuda(TensorIteratorBase & iter)62 void sign_kernel_cuda(TensorIteratorBase& iter){
63   if (iter.dtype() == ScalarType::Bool) {
64     gpu_kernel(iter, []GPU_LAMBDA(bool a){
65       return a;
66     });
67   } else {
68     AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "sign_cuda", [&]() {
69         gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
70             return c10::signum(a);
71         });
72     });
73   }
74 }
75 
signbit_kernel_cuda(TensorIteratorBase & iter)76 void signbit_kernel_cuda(TensorIteratorBase& iter){
77   // NOTE: signbit does not always support integral arguments.
78   if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) {
79     AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cuda", [&]() {
80       gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return is_negative(a); });
81     });
82   } else {
83     AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cuda", [&]() {
84       using opmath_t = at::opmath_type<scalar_t>;
85       gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return signbit(opmath_t{a}); });
86     });
87   }
88 }
89 
90 template<typename T>
sgn_wrapper(c10::complex<T> z)91 C10_HOST_DEVICE static inline c10::complex<T> sgn_wrapper(c10::complex<T> z) {
92   if (z == c10::complex<T>(0, 0)) {
93     return c10::complex<T>(0, 0);
94   } else {
95     return z / std::abs(z);
96   }
97 }
98 
99 CONSTEXPR_EXCEPT_WIN_CUDA char sgn_name[] = "sgn_kernel";
sgn_kernel_cuda(TensorIteratorBase & iter)100 void sgn_kernel_cuda(TensorIteratorBase& iter){
101   auto dtype = iter.dtype();
102   #if AT_USE_JITERATOR()
103     static const auto sgn_string = jiterator_stringify(
104         template <typename T>
105         T sgn_kernel(T z) {
106           const T zero = T(0);
107           if (z == zero) {
108             return zero;
109           } else {
110             return z / std::abs(z);
111           }
112         }
113       ); // sgn_string
114     AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sgn_cuda", [&]() {
115       jitted_gpu_kernel<
116         /*name=*/ sgn_name,
117         /*return_dtype=*/ scalar_t,
118         /*common_dtype=*/ scalar_t,
119         /*arity=*/ 1>(iter, sgn_string);
120       });
121   #else
122     AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sgn_cuda", [&]() {
123       using opmath_t = at::opmath_type<scalar_t>;
124       gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
125         return sgn_wrapper(opmath_t{a});
126       });
127   });
128   #endif
129 }
130 
131 REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
132 REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
133 REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
134 REGISTER_DISPATCH(signbit_stub, &signbit_kernel_cuda);
135 REGISTER_DISPATCH(sgn_stub, &sgn_kernel_cuda);
136 
137 } // namespace at::native
138