1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/native/ReduceAllOps.h>
3 #include <ATen/native/Resize.h>
4
5 #include <ATen/core/Tensor.h>
6
7 #ifndef AT_PER_OPERATOR_HEADERS
8 #include <ATen/Functions.h>
9 #include <ATen/NativeFunctions.h>
10 #else
11 #include <ATen/ops/_aminmax_native.h>
12 #include <ATen/ops/aminmax.h>
13 #include <ATen/ops/empty.h>
14 #include <ATen/ops/max.h>
15 #include <ATen/ops/max_native.h>
16 #include <ATen/ops/min.h>
17 #include <ATen/ops/min_native.h>
18 #endif
19
20 namespace at::native {
21
22 DEFINE_DISPATCH(min_all_stub);
23 DEFINE_DISPATCH(max_all_stub);
24
min(const Tensor & self)25 Tensor min(const Tensor &self) {
26 TORCH_CHECK(self.numel() > 0,
27 "min(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.");
28 Tensor result = at::empty({}, self.options());
29 min_all_stub(self.device().type(), result, self.contiguous());
30 return result;
31 }
32
min_unary_out(const Tensor & self,Tensor & out)33 Tensor& min_unary_out(const Tensor &self, Tensor& out) {
34 // First check if the devices match (CPU vs GPU)
35 TORCH_CHECK(self.device() == out.device());
36
37 TORCH_CHECK(canCast(
38 typeMetaToScalarType(self.dtype()),
39 typeMetaToScalarType(out.dtype())));
40
41 at::native::resize_output(out, {});
42
43 min_all_stub(self.device().type(), out, self.contiguous());
44 return out;
45 }
46
max(const Tensor & self)47 Tensor max(const Tensor &self) {
48 TORCH_CHECK(self.numel() > 0,
49 "max(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.");
50 Tensor result = at::empty({}, self.options());
51 max_all_stub(self.device().type(), result, self.contiguous());
52 return result;
53 }
54
max_unary_out(const Tensor & self,Tensor & out)55 Tensor& max_unary_out(const Tensor &self, Tensor& out) {
56 // First check if the devices match (CPU vs GPU)
57 TORCH_CHECK(self.device() == out.device());
58
59 TORCH_CHECK(canCast(
60 typeMetaToScalarType(self.dtype()),
61 typeMetaToScalarType(out.dtype())));
62
63 at::native::resize_output(out, {});
64
65 max_all_stub(self.device().type(), out, self.contiguous());
66 return out;
67 }
68
69 // DEPRECATED: Use at::aminmax instead
_aminmax_all(const Tensor & self)70 std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
71 TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
72 " This warning will only appear once per process.");
73 return at::aminmax(self);
74 }
75
76 } // namespace at::native
77