xref: /aosp_15_r20/external/pytorch/build.bzl (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1load(
2    ":ufunc_defs.bzl",
3    "aten_ufunc_generated_cpu_kernel_sources",
4    "aten_ufunc_generated_cpu_sources",
5    "aten_ufunc_generated_cuda_sources",
6)
7
8def define_targets(rules):
9    rules.cc_library(
10        name = "caffe2_core_macros",
11        hdrs = [":caffe2_core_macros_h"],
12    )
13
14    rules.cmake_configure_file(
15        name = "caffe2_core_macros_h",
16        src = "caffe2/core/macros.h.in",
17        out = "caffe2/core/macros.h",
18        definitions = [
19            "CAFFE2_BUILD_SHARED_LIBS",
20            "CAFFE2_PERF_WITH_AVX",
21            "CAFFE2_PERF_WITH_AVX2",
22            "CAFFE2_USE_EXCEPTION_PTR",
23            "CAFFE2_USE_CUDNN",
24            "USE_MKLDNN",
25            "CAFFE2_USE_ITT",
26            "USE_ROCM_KERNEL_ASSERT",
27            "EIGEN_MPL2_ONLY",
28        ],
29    )
30
31    rules.cc_library(
32        name = "caffe2_serialize",
33        srcs = [
34            "caffe2/serialize/file_adapter.cc",
35            "caffe2/serialize/inline_container.cc",
36            "caffe2/serialize/istream_adapter.cc",
37            "caffe2/serialize/read_adapter_interface.cc",
38        ],
39        copts = ["-fexceptions"],
40        tags = [
41            "-fbcode",
42            "supermodule:android/default/pytorch",
43            "supermodule:ios/default/public.pytorch",
44            "xplat",
45        ],
46        visibility = ["//visibility:public"],
47        deps = [
48            ":caffe2_headers",
49            "//c10",
50            "//third_party/miniz-2.1.0:miniz",
51            "@com_github_glog//:glog",
52        ],
53    )
54
55    #
56    # ATen generated code
57    # You need to keep this is sync with the files written out
58    # by gen.py (in the cmake build system, we track generated files
59    # via generated_cpp.txt and generated_cpp.txt-cuda
60    #
61    # Sure would be nice to use gen.py to create this list dynamically
62    # instead of hardcoding, no? Well, we can't, as discussed in this
63    # thread:
64    # https://fb.facebook.com/groups/askbuck/permalink/1924258337622772/
65
66    gen_aten_srcs = [
67        "aten/src/ATen/native/native_functions.yaml",
68        "aten/src/ATen/native/tags.yaml",
69    ] + rules.glob(["aten/src/ATen/templates/*"])
70
71    gen_aten_cmd = " ".join([
72        "$(execpath //torchgen:gen)",
73        "--install_dir=$(RULEDIR)",
74        "--source-path aten/src/ATen",
75        "--aoti_install_dir=$(RULEDIR)/torch/csrc/inductor/aoti_torch/generated"
76    ] + (["--static_dispatch_backend CPU"] if rules.is_cpu_static_dispatch_build() else []))
77
78    gen_aten_outs_cuda = (
79        GENERATED_H_CUDA + GENERATED_CPP_CUDA + GENERATED_AOTI_CUDA_CPP +
80        aten_ufunc_generated_cuda_sources()
81    )
82
83    gen_aten_outs = (
84        GENERATED_H + GENERATED_H_CORE +
85        GENERATED_CPP + GENERATED_CPP_CORE +
86        GENERATED_AOTI_CPP +
87        aten_ufunc_generated_cpu_sources() +
88        aten_ufunc_generated_cpu_kernel_sources() + [
89            "Declarations.yaml",
90        ] + gen_aten_outs_cuda
91    )
92
93    rules.genrule(
94        name = "gen_aten",
95        srcs = gen_aten_srcs,
96        outs = gen_aten_outs,
97        cmd = gen_aten_cmd,
98        tools = ["//torchgen:gen"],
99    )
100
101    rules.genrule(
102        name = "gen_aten_hip",
103        srcs = gen_aten_srcs,
104        outs = gen_aten_outs_cuda,
105        cmd = gen_aten_cmd + " --rocm",
106        features = ["-create_bazel_outputs"],
107        tags = ["-bazel"],
108        tools = ["//torchgen:gen"],
109    )
110
111    rules.genrule(
112        name = "generate-code",
113        srcs = [
114            ":DispatchKeyNativeFunctions.cpp",
115            ":DispatchKeyNativeFunctions.h",
116            ":LazyIr.h",
117            ":LazyNonNativeIr.h",
118            ":RegisterDispatchDefinitions.ini",
119            ":RegisterDispatchKey.cpp",
120            ":native_functions.yaml",
121            ":shape_inference.h",
122            ":tags.yaml",
123            ":ts_native_functions.cpp",
124            ":ts_native_functions.yaml",
125        ],
126        outs = GENERATED_AUTOGRAD_CPP + GENERATED_AUTOGRAD_PYTHON + GENERATED_TESTING_PY,
127        cmd = "$(execpath //tools/setup_helpers:generate_code) " +
128              "--gen-dir=$(RULEDIR) " +
129              "--native-functions-path $(location :native_functions.yaml) " +
130              "--tags-path=$(location :tags.yaml) " +
131              "--gen_lazy_ts_backend",
132        tools = ["//tools/setup_helpers:generate_code"],
133    )
134
135    rules.cc_library(
136        name = "generated-autograd-headers",
137        hdrs = [":{}".format(h) for h in _GENERATED_AUTOGRAD_CPP_HEADERS + _GENERATED_AUTOGRAD_PYTHON_HEADERS],
138        visibility = ["//visibility:public"],
139    )
140
141    rules.genrule(
142        name = "version_h",
143        srcs = [
144            ":torch/csrc/api/include/torch/version.h.in",
145            ":version.txt",
146        ],
147        outs = ["torch/csrc/api/include/torch/version.h"],
148        cmd = "$(execpath //tools/setup_helpers:gen_version_header) " +
149              "--template-path $(location :torch/csrc/api/include/torch/version.h.in) " +
150              "--version-path $(location :version.txt) --output-path $@ ",
151        tools = ["//tools/setup_helpers:gen_version_header"],
152    )
153
154#
155# ATen generated code
156# You need to keep this is sync with the files written out
157# by gen.py (in the cmake build system, we track generated files
158# via generated_cpp.txt and generated_cpp.txt-cuda
159#
160# Sure would be nice to use gen.py to create this list dynamically
161# instead of hardcoding, no? Well, we can't, as discussed in this
162# thread:
163# https://fb.facebook.com/groups/askbuck/permalink/1924258337622772/
164
165GENERATED_H = [
166    "Functions.h",
167    "NativeFunctions.h",
168    "NativeMetaFunctions.h",
169    "FunctionalInverses.h",
170    "RedispatchFunctions.h",
171    "RegistrationDeclarations.h",
172    "VmapGeneratedPlumbing.h",
173]
174
175GENERATED_H_CORE = [
176    "Operators.h",
177    # CPUFunctions.h (and likely similar headers) need to be part of core because
178    # of the static dispatch build: TensorBody.h directly includes CPUFunctions.h.
179    # The disinction looks pretty arbitrary though; maybe will can kill core
180    # and merge the two?
181    "CPUFunctions.h",
182    "CPUFunctions_inl.h",
183    "CompositeExplicitAutogradFunctions.h",
184    "CompositeExplicitAutogradFunctions_inl.h",
185    "CompositeExplicitAutogradNonFunctionalFunctions.h",
186    "CompositeExplicitAutogradNonFunctionalFunctions_inl.h",
187    "CompositeImplicitAutogradFunctions.h",
188    "CompositeImplicitAutogradFunctions_inl.h",
189    "CompositeImplicitAutogradNestedTensorFunctions.h",
190    "CompositeImplicitAutogradNestedTensorFunctions_inl.h",
191    "MetaFunctions.h",
192    "MetaFunctions_inl.h",
193    "core/TensorBody.h",
194    "MethodOperators.h",
195    "core/aten_interned_strings.h",
196    "core/enum_tag.h",
197]
198
199GENERATED_H_CUDA = [
200    "CUDAFunctions.h",
201    "CUDAFunctions_inl.h",
202]
203
204GENERATED_CPP_CUDA = [
205    "RegisterCUDA.cpp",
206    "RegisterNestedTensorCUDA.cpp",
207    "RegisterSparseCUDA.cpp",
208    "RegisterSparseCsrCUDA.cpp",
209    "RegisterQuantizedCUDA.cpp",
210]
211
212GENERATED_CPP = [
213    "Functions.cpp",
214    "RegisterBackendSelect.cpp",
215    "RegisterCPU.cpp",
216    "RegisterQuantizedCPU.cpp",
217    "RegisterNestedTensorCPU.cpp",
218    "RegisterSparseCPU.cpp",
219    "RegisterSparseCsrCPU.cpp",
220    "RegisterMkldnnCPU.cpp",
221    "RegisterCompositeImplicitAutograd.cpp",
222    "RegisterCompositeImplicitAutogradNestedTensor.cpp",
223    "RegisterZeroTensor.cpp",
224    "RegisterMeta.cpp",
225    "RegisterQuantizedMeta.cpp",
226    "RegisterNestedTensorMeta.cpp",
227    "RegisterSparseMeta.cpp",
228    "RegisterCompositeExplicitAutograd.cpp",
229    "RegisterCompositeExplicitAutogradNonFunctional.cpp",
230    "CompositeViewCopyKernels.cpp",
231    "RegisterSchema.cpp",
232    "RegisterFunctionalization_0.cpp",
233    "RegisterFunctionalization_1.cpp",
234    "RegisterFunctionalization_2.cpp",
235    "RegisterFunctionalization_3.cpp",
236]
237
238GENERATED_CPP_CORE = [
239    "Operators_0.cpp",
240    "Operators_1.cpp",
241    "Operators_2.cpp",
242    "Operators_3.cpp",
243    "Operators_4.cpp",
244    "core/ATenOpList.cpp",
245    "core/TensorMethods.cpp",
246]
247
248# These lists are temporarily living in and exported from the shared
249# structure so that an internal build that lives under a different
250# root can access them. These could technically live in a separate
251# file in the same directory but that would require extra work to
252# ensure that file is synced to both Meta internal repositories and
253# GitHub. This problem will go away when the targets downstream of
254# generate-code that use these lists are moved into the shared
255# structure as well.
256
257_GENERATED_AUTOGRAD_PYTHON_HEADERS = [
258    "torch/csrc/autograd/generated/python_functions.h",
259    "torch/csrc/autograd/generated/python_return_types.h",
260]
261
262_GENERATED_AUTOGRAD_CPP_HEADERS = [
263    "torch/csrc/autograd/generated/Functions.h",
264    "torch/csrc/autograd/generated/VariableType.h",
265    "torch/csrc/autograd/generated/ViewFuncs.h",
266    "torch/csrc/autograd/generated/variable_factories.h",
267]
268
269GENERATED_TESTING_PY = [
270    "torch/testing/_internal/generated/annotated_fn_args.py",
271]
272
273GENERATED_LAZY_H = [
274    "torch/csrc/lazy/generated/LazyIr.h",
275    "torch/csrc/lazy/generated/LazyNonNativeIr.h",
276    "torch/csrc/lazy/generated/LazyNativeFunctions.h",
277]
278
279_GENERATED_AUTOGRAD_PYTHON_CPP = [
280    "torch/csrc/autograd/generated/python_functions_0.cpp",
281    "torch/csrc/autograd/generated/python_functions_1.cpp",
282    "torch/csrc/autograd/generated/python_functions_2.cpp",
283    "torch/csrc/autograd/generated/python_functions_3.cpp",
284    "torch/csrc/autograd/generated/python_functions_4.cpp",
285    "torch/csrc/autograd/generated/python_nn_functions.cpp",
286    "torch/csrc/autograd/generated/python_nested_functions.cpp",
287    "torch/csrc/autograd/generated/python_fft_functions.cpp",
288    "torch/csrc/autograd/generated/python_linalg_functions.cpp",
289    "torch/csrc/autograd/generated/python_return_types.cpp",
290    "torch/csrc/autograd/generated/python_enum_tag.cpp",
291    "torch/csrc/autograd/generated/python_sparse_functions.cpp",
292    "torch/csrc/autograd/generated/python_special_functions.cpp",
293    "torch/csrc/autograd/generated/python_torch_functions_0.cpp",
294    "torch/csrc/autograd/generated/python_torch_functions_1.cpp",
295    "torch/csrc/autograd/generated/python_torch_functions_2.cpp",
296    "torch/csrc/autograd/generated/python_variable_methods.cpp",
297]
298
299GENERATED_AUTOGRAD_PYTHON = _GENERATED_AUTOGRAD_PYTHON_HEADERS + _GENERATED_AUTOGRAD_PYTHON_CPP
300
301GENERATED_AUTOGRAD_CPP = [
302    "torch/csrc/autograd/generated/Functions.cpp",
303    "torch/csrc/autograd/generated/VariableType_0.cpp",
304    "torch/csrc/autograd/generated/VariableType_1.cpp",
305    "torch/csrc/autograd/generated/VariableType_2.cpp",
306    "torch/csrc/autograd/generated/VariableType_3.cpp",
307    "torch/csrc/autograd/generated/VariableType_4.cpp",
308    "torch/csrc/autograd/generated/ViewFuncs.cpp",
309    "torch/csrc/autograd/generated/TraceType_0.cpp",
310    "torch/csrc/autograd/generated/TraceType_1.cpp",
311    "torch/csrc/autograd/generated/TraceType_2.cpp",
312    "torch/csrc/autograd/generated/TraceType_3.cpp",
313    "torch/csrc/autograd/generated/TraceType_4.cpp",
314    "torch/csrc/autograd/generated/ADInplaceOrViewType_0.cpp",
315    "torch/csrc/autograd/generated/ADInplaceOrViewType_1.cpp",
316    "torch/csrc/lazy/generated/LazyNativeFunctions.cpp",
317    "torch/csrc/lazy/generated/RegisterAutogradLazy.cpp",
318    "torch/csrc/lazy/generated/RegisterLazy.cpp",
319] + _GENERATED_AUTOGRAD_CPP_HEADERS + GENERATED_LAZY_H
320
321GENERATED_AOTI_CPP = [
322    "torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.cpp",
323]
324
325GENERATED_AOTI_CUDA_CPP = [
326    "torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.cpp",
327]
328