xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/SoftMax.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3 #include <ATen/Config.h>
4 
5 #ifndef AT_PER_OPERATOR_HEADERS
6 #include <ATen/NativeFunctions.h>
7 #else
8 #include <ATen/ops/_softmax_native.h>         // for mkldnn_softmax
9 #endif
10 
11 #if !AT_MKLDNN_ENABLED()
12 
13 namespace at {
14 namespace native {
15 
mkldnn_softmax(const Tensor & self,const int64_t dim,const bool half_to_float)16 Tensor mkldnn_softmax(
17     const Tensor& self,
18     const int64_t dim,
19     const bool half_to_float) {
20   TORCH_CHECK(false, "mkldnn_softmax: ATen not compiled with MKLDNN support");
21 }
22 
23 } // namespace native
24 } // namespace at
25 
26 #else // AT_MKLDNN_ENABLED
27 
28 #include <ATen/native/mkldnn/MKLDNNCommon.h>
29 
30 namespace at {
31 namespace native {
32 
mkldnn_softmax(const Tensor & self,const int64_t dim,const bool half_to_float)33 Tensor mkldnn_softmax(
34     const Tensor& self,
35     const int64_t dim,
36     const bool half_to_float) {
37   TORCH_CHECK(
38       !half_to_float,
39       "softmax with half to float conversion is not supported on Mkldnn");
40   const int64_t wrapped_dim = maybe_wrap_dim(dim, self.dim());
41   ideep::tensor& x = itensor_from_mkldnn(self);
42   ideep::tensor y;
43   ideep::softmax_forward::compute(x, y, wrapped_dim);
44   return new_with_itensor_mkldnn(std::move(y), optTypeMetaToScalarType(self.options().dtype_opt()),
45                                  self.options().device_opt());
46 }
47 
48 } // namespace native
49 } // namespace at
50 
51 #endif // AT_MKLDNN_ENABLED
52