xref: /aosp_15_r20/external/pytorch/torch/backends/mkl/__init__.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2import torch
3
4
5def is_available():
6    r"""Return whether PyTorch is built with MKL support."""
7    return torch._C.has_mkl
8
9
10VERBOSE_OFF = 0
11VERBOSE_ON = 1
12
13
14class verbose:
15    """
16    On-demand oneMKL verbosing functionality.
17
18    To make it easier to debug performance issues, oneMKL can dump verbose
19    messages containing execution information like duration while executing
20    the kernel. The verbosing functionality can be invoked via an environment
21    variable named `MKL_VERBOSE`. However, this methodology dumps messages in
22    all steps. Those are a large amount of verbose messages. Moreover, for
23    investigating the performance issues, generally taking verbose messages
24    for one single iteration is enough. This on-demand verbosing functionality
25    makes it possible to control scope for verbose message dumping. In the
26    following example, verbose messages will be dumped out for the second
27    inference only.
28
29    .. highlight:: python
30    .. code-block:: python
31
32        import torch
33        model(data)
34        with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
35            model(data)
36
37    Args:
38        level: Verbose level
39            - ``VERBOSE_OFF``: Disable verbosing
40            - ``VERBOSE_ON``:  Enable verbosing
41    """
42
43    def __init__(self, enable):
44        self.enable = enable
45
46    def __enter__(self):
47        if self.enable == VERBOSE_OFF:
48            return
49        st = torch._C._verbose.mkl_set_verbose(self.enable)
50        assert (
51            st
52        ), "Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
53        return self
54
55    def __exit__(self, exc_type, exc_val, exc_tb):
56        torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
57        return False
58