xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/MathBitsFallback.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <ATen/core/Tensor.h>
2 #include <ATen/core/dispatch/Dispatcher.h>
3 #include <ATen/core/op_registration/op_registration.h>
4 #include <ATen/native/UnaryOps.h>
5 #include <ATen/native/Resize.h>
6 #include <c10/util/irange.h>
7 #include <torch/library.h>
8 
9 #ifndef AT_PER_OPERATOR_HEADERS
10 #include <ATen/Functions.h>
11 #else
12 #include <ATen/ops/clone.h>
13 
14 #include <utility>
15 #endif
16 
17 namespace at::native {
18 // This fallback should only be used for operations that are self inverse and have a corresponding tensor
19 // bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit.
20 // Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit.
21 // Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called.
22 
23 // NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit.
24 struct MathOpFallback {
MathOpFallbackMathOpFallback25   MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {}
26   virtual bool is_bit_set(const Tensor&) = 0;
fallback_implMathOpFallback27   void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) {
28     /*
29       Situations to handle:
30         1. Out-of-place operation.  Easy: materialize all inputs and
31           call it a day.
32         2. Inplace operation.  Desugar x.add_(2) into x.conj_().add_(2).conj_().
33           Materialize other inputs as in (1).
34         3. out= operation.  Desugar add(x, 2, out=y) into y.copy_(add(x, 2))
35         Materialize other inputs as in (1).
36 
37         It is important to be able to tell if we READ from an argument and if we
38         WRITE to an argument.  Conservative approach is to assume that we always
39         READ from an argument, but in out= operations you can skip
40         conjugating inputs on entry that never get used. In the current schema we
41         can't easily tell if the operation is in in-place or out= operation.
42 
43         Note:
44         1. Mutable tensorlists containing tensors whose math bit set to true are disallowed.
45         2. Mutable tensors with math bit set to true are unconditionally cloned to ensure
46            correct behavior in the case when the mutable tensor shares memory with non mutable arguments.
47 
48            If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory
49            with these mutable inputs would read into wrong values in the following cases:
50            1. Non mutable inputs have their math bit set to false.
51            2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory
52               with one or more mutable arg(s)) are cloned.
53            At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs.
54     */
55     const auto& arguments = op.schema().arguments();
56     const auto num_arguments = arguments.size();
57     const auto stack_start = stack->size() - num_arguments;
58 
59     std::optional<bool> is_write;
60     for (const auto i : c10::irange(num_arguments)) {
61       // Three possible states:
62       // 1. alias_info has no value --> out-of-place operation
63       // 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation
64       // 3. alias_info does have a value, alias_info->is_write=False --> view operation
65       const AliasInfo* alias_info = arguments[i].alias_info();
66       if (alias_info != nullptr) {
67         if (is_write.has_value()) {
68           TORCH_CHECK(*is_write == alias_info->isWrite(),
69             "Unsupported operator for ", op_name, " fallback: ", op.schema().name(),
70             op_name, " fallback doesn't work for operators with a mix "
71             "mutable and non-mutable inputs that alias with outputs, "
72             "this must be implemented manually.  "
73             "If you got this error on a core op, please report a bug to PyTorch.");
74         } else {
75           is_write = alias_info->isWrite();
76         }
77       }
78     }
79 
80     if (is_write.has_value() && !*is_write) {
81       // We assume that view operators automatically handle the math bit
82       // correctly by propagating the dispatch key in key_set.
83       // This is not necessarily always right, so you should test these cases.
84       op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
85       return;
86     }
87 
88     // Mutable inputs with math bit set to True and their clones
89     std::vector<std::pair<Tensor, Tensor>> mutable_inputs_with_their_clones;
90     for (const auto i : c10::irange(num_arguments)) {
91       auto& ivalue = (*stack)[stack_start + i];
92       if (!(ivalue.isTensor() || ivalue.isTensorList())) {
93         continue;
94       }
95       const auto& argument = arguments[i];
96       bool mut_arg = false;
97       if (argument.alias_info()) {
98         // Was already tested by is_write loop above
99         TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite());
100         mut_arg = true;
101       }
102       if (ivalue.isTensor()) {
103         if (!is_bit_set(ivalue.toTensor())) {
104           continue;
105         }
106         auto tensor = std::move(ivalue).toTensor();
107         auto resolved_tensor = at::clone(tensor);
108         if (mut_arg) {
109           TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ",
110             op_name, "bit set to true.");
111           mutable_inputs_with_their_clones.emplace_back(std::move(tensor), resolved_tensor);
112         }
113         (*stack)[stack_start + i] = std::move(resolved_tensor);
114       } else if (ivalue.isTensorList()) {
115         auto tensors = std::move(ivalue).toTensorList();
116         for(const auto j : c10::irange(tensors.size())) {
117           const auto& tensor = tensors[j];
118           if (!is_bit_set(tensor)) {
119             continue;
120           }
121           TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ",
122               op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ",
123               op.schema().name());
124           tensors[j] = at::clone(tensor);
125         }
126         (*stack)[stack_start + i] = std::move(tensors);
127       }
128     }
129 
130     op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
131 
132     TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1);
133 
134     for (std::pair<Tensor, Tensor> mut_tensors: mutable_inputs_with_their_clones) {
135       auto& mutable_input =  mut_tensors.first;
136       auto& cloned_mutable_input =  mut_tensors.second;
137       auto& ivalue = (*stack)[stack_start];
138       auto returned_output = std::move(ivalue).toTensor();
139 
140       // sanity check to ensure that the tensor in stack aliases the cloned_mutable_input
141       TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output));
142 
143       // necessary for out= arg
144       at::native::resize_output(mutable_input, returned_output.sizes());
145 
146       mutable_input.copy_(returned_output);
147       (*stack)[stack_start] = std::move(mutable_input);
148     }
149   }
150 
151   virtual ~MathOpFallback() = default;
152 
153   DispatchKey key;
154   string op_name;
155 };
156 
157 } // namespace at::native
158