xref: /aosp_15_r20/external/pytorch/torch/csrc/jit/passes/inliner.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <torch/csrc/jit/passes/inliner.h>
2 
3 #include <ATen/core/interned_strings.h>
4 #include <torch/csrc/jit/api/function_impl.h>
5 #include <torch/csrc/jit/api/module.h>
6 #include <torch/csrc/jit/frontend/error_report.h>
7 #include <torch/csrc/jit/jit_log.h>
8 
9 namespace torch::jit {
10 
11 namespace prim {
12 using namespace ::c10::prim;
13 }
14 
tryToGraphFunction(Node * n)15 GraphFunction* tryToGraphFunction(Node* n) {
16   if (n->kind() == prim::CallFunction) {
17     AT_ASSERT(n->input(0)->node()->kind() == prim::Constant);
18     auto function_constant = n->input(0)->node();
19     auto fun_type = function_constant->output()->type()->expect<FunctionType>();
20     return tryToGraphFunction(*fun_type->function());
21   }
22   if (n->kind() == prim::CallMethod) {
23     const std::string& name = n->s(attr::name);
24     if (auto class_type = n->input(0)->type()->cast<ClassType>()) {
25       Function& function = class_type->getMethod(name);
26       return tryToGraphFunction(function);
27     }
28   }
29   return nullptr;
30 }
31 
inlineCalls(Block * block)32 static void inlineCalls(Block* block) {
33   for (auto it = block->nodes().begin(), end = block->nodes().end();
34        it != end;) {
35     Node* cur = *it++;
36     switch (cur->kind()) {
37       case prim::CallFunction: {
38         if (auto graphFunction = tryToGraphFunction(cur)) {
39           auto function_constant = cur->input(0)->node();
40           auto fun_type =
41               function_constant->output()->type()->expect<FunctionType>();
42 
43           cur->removeInput(0);
44           GRAPH_UPDATE(
45               "Inlining function '",
46               fun_type->function()->name(),
47               "' to ",
48               *cur);
49 
50           std::shared_ptr<Graph> g = nullptr;
51           // inline optimized graph for debugging/testing purposes.
52           // we only insert fallback functions in JIT optimized graphs for
53           // execution, not on the Graph that is used for serialization
54           bool fallback =
55               function_constant->hasAttribute(Symbol::attr("fallback"));
56           if (fallback && graphFunction->get_executor().isOptimized()) {
57             auto exec_plans =
58                 graphFunction->get_executor().getDebugState().execution_plans;
59             if (!exec_plans.empty()) {
60               g = exec_plans.begin()->second.graph;
61               // optimized_graph() calls Inline, so we only need to explicitly
62               // invoke inlining on the jit optimized graph with recursive
63               // fallback function calls
64               Inline(*g);
65             }
66           }
67           if (g == nullptr) {
68             g = graphFunction->optimized_graph();
69           }
70 
71           GRAPH_UPDATE("Function body: ", g);
72           inlineCallTo(cur, graphFunction, g.get());
73         }
74       } break;
75       case prim::CallMethod: {
76         if (auto graphFunction = tryToGraphFunction(cur)) {
77           GRAPH_UPDATE("Inlining method '", cur->s(attr::name), "' to ", *cur);
78           GRAPH_UPDATE("Function body: ", graphFunction->optimized_graph());
79           inlineCallTo(cur, graphFunction);
80         }
81       } break;
82       default: {
83         for (auto b : cur->blocks()) {
84           inlineCalls(b);
85         }
86       } break;
87     }
88   }
89 }
90 
Inline(Graph & graph)91 void Inline(Graph& graph) {
92   GRAPH_DUMP("Before Inlining: ", &graph);
93   inlineCalls(graph.block());
94   GRAPH_DUMP("After Inlining: ", &graph);
95 }
96 
97 } // namespace torch::jit
98