xref: /aosp_15_r20/external/pytorch/test/inductor/test_smoke.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# Owner(s): ["module: inductor"]
2import logging
3import unittest
4
5import torch
6import torch._logging
7from torch._inductor.test_case import TestCase
8from torch.testing._internal.common_utils import IS_LINUX
9from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU
10
11
12class MLP(torch.nn.Module):
13    def __init__(self) -> None:
14        super().__init__()
15        self.l1 = torch.nn.Linear(1, 6)
16        self.l2 = torch.nn.Linear(6, 1)
17
18    def forward(self, x=None):
19        x = torch.relu(self.l1(x))
20        x = torch.relu(self.l2(x))
21        return x
22
23
24def _test_f(x):
25    return x * x
26
27
28class SmokeTest(TestCase):
29    @unittest.skipIf(not HAS_GPU, "Triton is not available")
30    def test_mlp(self):
31        torch._logging.set_logs(
32            dynamo=logging.DEBUG, inductor=logging.DEBUG, aot=logging.DEBUG
33        )
34
35        mlp = torch.compile(MLP().to(GPU_TYPE))
36        for _ in range(3):
37            mlp(torch.randn(1, device=GPU_TYPE))
38
39        # set back to defaults
40        torch._logging.set_logs()
41
42    @unittest.skipIf(not HAS_GPU, "Triton is not available")
43    def test_compile_decorator(self):
44        @torch.compile
45        def foo(x):
46            return torch.sin(x) + x.min()
47
48        @torch.compile(mode="reduce-overhead")
49        def bar(x):
50            return x * x
51
52        for _ in range(3):
53            foo(torch.full((3, 4), 0.7, device=GPU_TYPE))
54            bar(torch.rand((2, 2), device=GPU_TYPE))
55
56    def test_compile_invalid_options(self):
57        with self.assertRaises(RuntimeError):
58            opt_f = torch.compile(_test_f, mode="ha")
59
60
61if __name__ == "__main__":
62    from torch._inductor.test_case import run_tests
63
64    if IS_LINUX and HAS_GPU:
65        if (not HAS_CUDA) or torch.cuda.get_device_properties(0).major <= 5:
66            run_tests()
67