xref: /aosp_15_r20/external/pytorch/aten/src/ATen/core/boxing/KernelFunction.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <ATen/core/boxing/KernelFunction.h>
2 #include <ATen/core/dispatch/Dispatcher.h>
3 
4 #include <sstream>
5 
6 namespace c10 {
7 
8 // This a "fake" kernel which doesn't actually do anything.  Instead, it is a
9 // distinguished kernel which is special cased by the dispatch table to
10 // be handled specially.  Its semantics is that it redispatches to the
11 // *next* dispatch key that would have been processed, skipping the current
12 // one.
fallthrough_kernel(OperatorKernel *,const OperatorHandle &,DispatchKeySet,Stack *)13 void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*) {
14   TORCH_INTERNAL_ASSERT(0,
15     "fallthrough_kernel was executed but it should have been short-circuited by the dispatcher. "
16     "This could occur if you registered a fallthrough kernel as a override for a specific operator "
17     "(as opposed to a backend fallback); this is NOT currently supported, and we do not intend to "
18     "add support for it in the near future.  If you do find yourself in need of this, "
19     "let us know in the bug tracker.");
20 }
21 
ambiguous_autogradother_kernel(OperatorKernel *,const OperatorHandle & op,DispatchKeySet,Stack *)22 void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle& op, DispatchKeySet, Stack*) {
23   TORCH_INTERNAL_ASSERT(0,
24     op.operator_name(), " has kernels registered to both CompositeImplicitAutograd and a backend mapped to AutogradOther. "
25     "This makes the backend kernel unreachable; the dispatcher will always prefer the CompositeImplicitAutograd lowering "
26     "(see Note [Ambiguity in AutogradOther kernel]). "
27     "If you want to override CompositeImplicitAutograd, please open an issue to request a dedicated "
28     "Autograd dispatch key for the backend.\n",
29     "If you only want to run inference instead of training, in C++, add `c10::InferenceMode mode;` "
30     "before model.forward(); in Python, use `torch.inference_mode()` as a context manager (see "
31     "https://pytorch.org/docs/stable/generated/torch.inference_mode.html).",
32     "\nCanonical state\n~~~~~~~~~~~\n", op.dumpState(), "\n\n");
33 }
34 
named_not_supported_kernel(OperatorKernel *,const OperatorHandle & op,DispatchKeySet,Stack *)35 void named_not_supported_kernel(OperatorKernel*, const OperatorHandle& op, DispatchKeySet, Stack*) {
36   // DO NOT LOOK AT STACK, YOU HAVE SHORT CIRCUITED BOXING
37   // See Note [named_not_supported_kernel]
38   TORCH_CHECK(0,
39     op.operator_name(), " is not yet supported with named tensors. Please drop names via "
40     "`tensor = tensor.rename(None)`, call the op with an unnamed tensor, "
41     "and set names on the result of the operation."
42     );
43 }
44 
45 // single line summary of state
dumpState() const46 std::string KernelFunction::dumpState() const {
47   std::ostringstream oss;
48   auto boxed_kernel_fn = boxed_kernel_func_.getFnPtr();
49   if (boxed_kernel_fn == fallthrough_kernel) {
50     oss << "fallthrough ";
51   }
52   if (boxed_kernel_fn) {
53     oss << "boxed ";
54   }
55   if (unboxed_kernel_func_) {
56     oss << "unboxed ";
57   }
58   return oss.str();
59 }
60 
_equalsBoxedAndUnboxed(const KernelFunction & other) const61 bool KernelFunction::_equalsBoxedAndUnboxed(const KernelFunction& other) const {
62   return boxed_kernel_func_.getFnPtr() == other.boxed_kernel_func_.getFnPtr() &&
63          unboxed_kernel_func_ == other.unboxed_kernel_func_;
64 }
65 
66 } // namespace c10
67