xref: /aosp_15_r20/external/pytorch/torch/utils/collect_env.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2
3# Unlike the rest of the PyTorch this file must be python2 compliant.
4# This script outputs relevant system environment info
5# Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
6import datetime
7import locale
8import re
9import subprocess
10import sys
11import os
12from collections import namedtuple
13
14
15try:
16    import torch
17    TORCH_AVAILABLE = True
18except (ImportError, NameError, AttributeError, OSError):
19    TORCH_AVAILABLE = False
20
21# System Environment Information
22SystemEnv = namedtuple('SystemEnv', [
23    'torch_version',
24    'is_debug_build',
25    'cuda_compiled_version',
26    'gcc_version',
27    'clang_version',
28    'cmake_version',
29    'os',
30    'libc_version',
31    'python_version',
32    'python_platform',
33    'is_cuda_available',
34    'cuda_runtime_version',
35    'cuda_module_loading',
36    'nvidia_driver_version',
37    'nvidia_gpu_models',
38    'cudnn_version',
39    'pip_version',  # 'pip' or 'pip3'
40    'pip_packages',
41    'conda_packages',
42    'hip_compiled_version',
43    'hip_runtime_version',
44    'miopen_runtime_version',
45    'caching_allocator_config',
46    'is_xnnpack_available',
47    'cpu_info',
48])
49
50DEFAULT_CONDA_PATTERNS = {
51    "torch",
52    "numpy",
53    "cudatoolkit",
54    "soumith",
55    "mkl",
56    "magma",
57    "triton",
58    "optree",
59}
60
61DEFAULT_PIP_PATTERNS = {
62    "torch",
63    "numpy",
64    "mypy",
65    "flake8",
66    "triton",
67    "optree",
68    "onnx",
69}
70
71
72def run(command):
73    """Return (return-code, stdout, stderr)."""
74    shell = True if type(command) is str else False
75    p = subprocess.Popen(command, stdout=subprocess.PIPE,
76                         stderr=subprocess.PIPE, shell=shell)
77    raw_output, raw_err = p.communicate()
78    rc = p.returncode
79    if get_platform() == 'win32':
80        enc = 'oem'
81    else:
82        enc = locale.getpreferredencoding()
83    output = raw_output.decode(enc)
84    err = raw_err.decode(enc)
85    return rc, output.strip(), err.strip()
86
87
88def run_and_read_all(run_lambda, command):
89    """Run command using run_lambda; reads and returns entire output if rc is 0."""
90    rc, out, _ = run_lambda(command)
91    if rc != 0:
92        return None
93    return out
94
95
96def run_and_parse_first_match(run_lambda, command, regex):
97    """Run command using run_lambda, returns the first regex match if it exists."""
98    rc, out, _ = run_lambda(command)
99    if rc != 0:
100        return None
101    match = re.search(regex, out)
102    if match is None:
103        return None
104    return match.group(1)
105
106def run_and_return_first_line(run_lambda, command):
107    """Run command using run_lambda and returns first line if output is not empty."""
108    rc, out, _ = run_lambda(command)
109    if rc != 0:
110        return None
111    return out.split('\n')[0]
112
113
114def get_conda_packages(run_lambda, patterns=None):
115    if patterns is None:
116        patterns = DEFAULT_CONDA_PATTERNS
117    conda = os.environ.get('CONDA_EXE', 'conda')
118    out = run_and_read_all(run_lambda, "{} list".format(conda))
119    if out is None:
120        return out
121
122    return "\n".join(
123        line
124        for line in out.splitlines()
125        if not line.startswith("#")
126        and any(name in line for name in patterns)
127    )
128
129def get_gcc_version(run_lambda):
130    return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
131
132def get_clang_version(run_lambda):
133    return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)')
134
135
136def get_cmake_version(run_lambda):
137    return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
138
139
140def get_nvidia_driver_version(run_lambda):
141    if get_platform() == 'darwin':
142        cmd = 'kextstat | grep -i cuda'
143        return run_and_parse_first_match(run_lambda, cmd,
144                                         r'com[.]nvidia[.]CUDA [(](.*?)[)]')
145    smi = get_nvidia_smi()
146    return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
147
148
149def get_gpu_info(run_lambda):
150    if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
151        if TORCH_AVAILABLE and torch.cuda.is_available():
152            if torch.version.hip is not None:
153                prop = torch.cuda.get_device_properties(0)
154                if hasattr(prop, "gcnArchName"):
155                    gcnArch = " ({})".format(prop.gcnArchName)
156                else:
157                    gcnArch = "NoGCNArchNameOnOldPyTorch"
158            else:
159                gcnArch = ""
160            return torch.cuda.get_device_name(None) + gcnArch
161        return None
162    smi = get_nvidia_smi()
163    uuid_regex = re.compile(r' \(UUID: .+?\)')
164    rc, out, _ = run_lambda(smi + ' -L')
165    if rc != 0:
166        return None
167    # Anonymize GPUs by removing their UUID
168    return re.sub(uuid_regex, '', out)
169
170
171def get_running_cuda_version(run_lambda):
172    return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
173
174
175def get_cudnn_version(run_lambda):
176    """Return a list of libcudnn.so; it's hard to tell which one is being used."""
177    if get_platform() == 'win32':
178        system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
179        cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
180        where_cmd = os.path.join(system_root, 'System32', 'where')
181        cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
182    elif get_platform() == 'darwin':
183        # CUDA libraries and drivers can be found in /usr/local/cuda/. See
184        # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
185        # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
186        # Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
187        cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
188    else:
189        cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
190    rc, out, _ = run_lambda(cudnn_cmd)
191    # find will return 1 if there are permission errors or if not found
192    if len(out) == 0 or (rc != 1 and rc != 0):
193        l = os.environ.get('CUDNN_LIBRARY')
194        if l is not None and os.path.isfile(l):
195            return os.path.realpath(l)
196        return None
197    files_set = set()
198    for fn in out.split('\n'):
199        fn = os.path.realpath(fn)  # eliminate symbolic links
200        if os.path.isfile(fn):
201            files_set.add(fn)
202    if not files_set:
203        return None
204    # Alphabetize the result because the order is non-deterministic otherwise
205    files = sorted(files_set)
206    if len(files) == 1:
207        return files[0]
208    result = '\n'.join(files)
209    return 'Probably one of the following:\n{}'.format(result)
210
211
212def get_nvidia_smi():
213    # Note: nvidia-smi is currently available only on Windows and Linux
214    smi = 'nvidia-smi'
215    if get_platform() == 'win32':
216        system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
217        program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
218        legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
219        new_path = os.path.join(system_root, 'System32', smi)
220        smis = [new_path, legacy_path]
221        for candidate_smi in smis:
222            if os.path.exists(candidate_smi):
223                smi = '"{}"'.format(candidate_smi)
224                break
225    return smi
226
227
228# example outputs of CPU infos
229#  * linux
230#    Architecture:            x86_64
231#      CPU op-mode(s):        32-bit, 64-bit
232#      Address sizes:         46 bits physical, 48 bits virtual
233#      Byte Order:            Little Endian
234#    CPU(s):                  128
235#      On-line CPU(s) list:   0-127
236#    Vendor ID:               GenuineIntel
237#      Model name:            Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
238#        CPU family:          6
239#        Model:               106
240#        Thread(s) per core:  2
241#        Core(s) per socket:  32
242#        Socket(s):           2
243#        Stepping:            6
244#        BogoMIPS:            5799.78
245#        Flags:               fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr
246#                             sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl
247#                             xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16
248#                             pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand
249#                             hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced
250#                             fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap
251#                             avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1
252#                             xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq
253#                             avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
254#    Virtualization features:
255#      Hypervisor vendor:     KVM
256#      Virtualization type:   full
257#    Caches (sum of all):
258#      L1d:                   3 MiB (64 instances)
259#      L1i:                   2 MiB (64 instances)
260#      L2:                    80 MiB (64 instances)
261#      L3:                    108 MiB (2 instances)
262#    NUMA:
263#      NUMA node(s):          2
264#      NUMA node0 CPU(s):     0-31,64-95
265#      NUMA node1 CPU(s):     32-63,96-127
266#    Vulnerabilities:
267#      Itlb multihit:         Not affected
268#      L1tf:                  Not affected
269#      Mds:                   Not affected
270#      Meltdown:              Not affected
271#      Mmio stale data:       Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
272#      Retbleed:              Not affected
273#      Spec store bypass:     Mitigation; Speculative Store Bypass disabled via prctl and seccomp
274#      Spectre v1:            Mitigation; usercopy/swapgs barriers and __user pointer sanitization
275#      Spectre v2:            Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
276#      Srbds:                 Not affected
277#      Tsx async abort:       Not affected
278#  * win32
279#    Architecture=9
280#    CurrentClockSpeed=2900
281#    DeviceID=CPU0
282#    Family=179
283#    L2CacheSize=40960
284#    L2CacheSpeed=
285#    Manufacturer=GenuineIntel
286#    MaxClockSpeed=2900
287#    Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
288#    ProcessorType=3
289#    Revision=27142
290#
291#    Architecture=9
292#    CurrentClockSpeed=2900
293#    DeviceID=CPU1
294#    Family=179
295#    L2CacheSize=40960
296#    L2CacheSpeed=
297#    Manufacturer=GenuineIntel
298#    MaxClockSpeed=2900
299#    Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
300#    ProcessorType=3
301#    Revision=27142
302
303def get_cpu_info(run_lambda):
304    rc, out, err = 0, '', ''
305    if get_platform() == 'linux':
306        rc, out, err = run_lambda('lscpu')
307    elif get_platform() == 'win32':
308        rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \
309        CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE')
310    elif get_platform() == 'darwin':
311        rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
312    cpu_info = 'None'
313    if rc == 0:
314        cpu_info = out
315    else:
316        cpu_info = err
317    return cpu_info
318
319
320def get_platform():
321    if sys.platform.startswith('linux'):
322        return 'linux'
323    elif sys.platform.startswith('win32'):
324        return 'win32'
325    elif sys.platform.startswith('cygwin'):
326        return 'cygwin'
327    elif sys.platform.startswith('darwin'):
328        return 'darwin'
329    else:
330        return sys.platform
331
332
333def get_mac_version(run_lambda):
334    return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
335
336
337def get_windows_version(run_lambda):
338    system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
339    wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
340    findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
341    return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd))
342
343
344def get_lsb_version(run_lambda):
345    return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
346
347
348def check_release_file(run_lambda):
349    return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
350                                     r'PRETTY_NAME="(.*)"')
351
352
353def get_os(run_lambda):
354    from platform import machine
355    platform = get_platform()
356
357    if platform == 'win32' or platform == 'cygwin':
358        return get_windows_version(run_lambda)
359
360    if platform == 'darwin':
361        version = get_mac_version(run_lambda)
362        if version is None:
363            return None
364        return 'macOS {} ({})'.format(version, machine())
365
366    if platform == 'linux':
367        # Ubuntu/Debian based
368        desc = get_lsb_version(run_lambda)
369        if desc is not None:
370            return '{} ({})'.format(desc, machine())
371
372        # Try reading /etc/*-release
373        desc = check_release_file(run_lambda)
374        if desc is not None:
375            return '{} ({})'.format(desc, machine())
376
377        return '{} ({})'.format(platform, machine())
378
379    # Unknown platform
380    return platform
381
382
383def get_python_platform():
384    import platform
385    return platform.platform()
386
387
388def get_libc_version():
389    import platform
390    if get_platform() != 'linux':
391        return 'N/A'
392    return '-'.join(platform.libc_ver())
393
394
395def get_pip_packages(run_lambda, patterns=None):
396    """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages."""
397    if patterns is None:
398        patterns = DEFAULT_PIP_PATTERNS
399
400    # People generally have `pip` as `pip` or `pip3`
401    # But here it is invoked as `python -mpip`
402    def run_with_pip(pip):
403        out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"])
404        return "\n".join(
405            line
406            for line in out.splitlines()
407            if any(name in line for name in patterns)
408        )
409
410    pip_version = 'pip3' if sys.version[0] == '3' else 'pip'
411    out = run_with_pip([sys.executable, '-mpip'])
412
413    return pip_version, out
414
415
416def get_cachingallocator_config():
417    ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
418    return ca_config
419
420
421def get_cuda_module_loading_config():
422    if TORCH_AVAILABLE and torch.cuda.is_available():
423        torch.cuda.init()
424        config = os.environ.get('CUDA_MODULE_LOADING', '')
425        return config
426    else:
427        return "N/A"
428
429
430def is_xnnpack_available():
431    if TORCH_AVAILABLE:
432        import torch.backends.xnnpack
433        return str(torch.backends.xnnpack.enabled)  # type: ignore[attr-defined]
434    else:
435        return "N/A"
436
437def get_env_info():
438    """
439    Collects environment information to aid in debugging.
440
441    The returned environment information contains details on torch version, is debug build
442    or not, cuda compiled version, gcc version, clang version, cmake version, operating
443    system, libc version, python version, python platform, CUDA availability, CUDA
444    runtime version, CUDA module loading config, GPU model and configuration, Nvidia
445    driver version, cuDNN version, pip version and versions of relevant pip and
446    conda packages, HIP runtime version, MIOpen runtime version,
447    Caching allocator config, XNNPACK availability and CPU information.
448
449    Returns:
450        SystemEnv (namedtuple): A tuple containining various environment details
451            and system information.
452    """
453    run_lambda = run
454    pip_version, pip_list_output = get_pip_packages(run_lambda)
455
456    if TORCH_AVAILABLE:
457        version_str = torch.__version__
458        debug_mode_str = str(torch.version.debug)
459        cuda_available_str = str(torch.cuda.is_available())
460        cuda_version_str = torch.version.cuda
461        if not hasattr(torch.version, 'hip') or torch.version.hip is None:  # cuda version
462            hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
463        else:  # HIP version
464            def get_version_or_na(cfg, prefix):
465                _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s]
466                return _lst[0] if _lst else 'N/A'
467
468            cfg = torch._C._show_config().split('\n')
469            hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime')
470            miopen_runtime_version = get_version_or_na(cfg, 'MIOpen')
471            cuda_version_str = 'N/A'
472            hip_compiled_version = torch.version.hip
473    else:
474        version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
475        hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
476
477    sys_version = sys.version.replace("\n", " ")
478
479    conda_packages = get_conda_packages(run_lambda)
480
481    return SystemEnv(
482        torch_version=version_str,
483        is_debug_build=debug_mode_str,
484        python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1),
485        python_platform=get_python_platform(),
486        is_cuda_available=cuda_available_str,
487        cuda_compiled_version=cuda_version_str,
488        cuda_runtime_version=get_running_cuda_version(run_lambda),
489        cuda_module_loading=get_cuda_module_loading_config(),
490        nvidia_gpu_models=get_gpu_info(run_lambda),
491        nvidia_driver_version=get_nvidia_driver_version(run_lambda),
492        cudnn_version=get_cudnn_version(run_lambda),
493        hip_compiled_version=hip_compiled_version,
494        hip_runtime_version=hip_runtime_version,
495        miopen_runtime_version=miopen_runtime_version,
496        pip_version=pip_version,
497        pip_packages=pip_list_output,
498        conda_packages=conda_packages,
499        os=get_os(run_lambda),
500        libc_version=get_libc_version(),
501        gcc_version=get_gcc_version(run_lambda),
502        clang_version=get_clang_version(run_lambda),
503        cmake_version=get_cmake_version(run_lambda),
504        caching_allocator_config=get_cachingallocator_config(),
505        is_xnnpack_available=is_xnnpack_available(),
506        cpu_info=get_cpu_info(run_lambda),
507    )
508
509env_info_fmt = """
510PyTorch version: {torch_version}
511Is debug build: {is_debug_build}
512CUDA used to build PyTorch: {cuda_compiled_version}
513ROCM used to build PyTorch: {hip_compiled_version}
514
515OS: {os}
516GCC version: {gcc_version}
517Clang version: {clang_version}
518CMake version: {cmake_version}
519Libc version: {libc_version}
520
521Python version: {python_version}
522Python platform: {python_platform}
523Is CUDA available: {is_cuda_available}
524CUDA runtime version: {cuda_runtime_version}
525CUDA_MODULE_LOADING set to: {cuda_module_loading}
526GPU models and configuration: {nvidia_gpu_models}
527Nvidia driver version: {nvidia_driver_version}
528cuDNN version: {cudnn_version}
529HIP runtime version: {hip_runtime_version}
530MIOpen runtime version: {miopen_runtime_version}
531Is XNNPACK available: {is_xnnpack_available}
532
533CPU:
534{cpu_info}
535
536Versions of relevant libraries:
537{pip_packages}
538{conda_packages}
539""".strip()
540
541
542def pretty_str(envinfo):
543    def replace_nones(dct, replacement='Could not collect'):
544        for key in dct.keys():
545            if dct[key] is not None:
546                continue
547            dct[key] = replacement
548        return dct
549
550    def replace_bools(dct, true='Yes', false='No'):
551        for key in dct.keys():
552            if dct[key] is True:
553                dct[key] = true
554            elif dct[key] is False:
555                dct[key] = false
556        return dct
557
558    def prepend(text, tag='[prepend]'):
559        lines = text.split('\n')
560        updated_lines = [tag + line for line in lines]
561        return '\n'.join(updated_lines)
562
563    def replace_if_empty(text, replacement='No relevant packages'):
564        if text is not None and len(text) == 0:
565            return replacement
566        return text
567
568    def maybe_start_on_next_line(string):
569        # If `string` is multiline, prepend a \n to it.
570        if string is not None and len(string.split('\n')) > 1:
571            return '\n{}\n'.format(string)
572        return string
573
574    mutable_dict = envinfo._asdict()
575
576    # If nvidia_gpu_models is multiline, start on the next line
577    mutable_dict['nvidia_gpu_models'] = \
578        maybe_start_on_next_line(envinfo.nvidia_gpu_models)
579
580    # If the machine doesn't have CUDA, report some fields as 'No CUDA'
581    dynamic_cuda_fields = [
582        'cuda_runtime_version',
583        'nvidia_gpu_models',
584        'nvidia_driver_version',
585    ]
586    all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
587    all_dynamic_cuda_fields_missing = all(
588        mutable_dict[field] is None for field in dynamic_cuda_fields)
589    if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
590        for field in all_cuda_fields:
591            mutable_dict[field] = 'No CUDA'
592        if envinfo.cuda_compiled_version is None:
593            mutable_dict['cuda_compiled_version'] = 'None'
594
595    # Replace True with Yes, False with No
596    mutable_dict = replace_bools(mutable_dict)
597
598    # Replace all None objects with 'Could not collect'
599    mutable_dict = replace_nones(mutable_dict)
600
601    # If either of these are '', replace with 'No relevant packages'
602    mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
603    mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
604
605    # Tag conda and pip packages with a prefix
606    # If they were previously None, they'll show up as ie '[conda] Could not collect'
607    if mutable_dict['pip_packages']:
608        mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
609                                               '[{}] '.format(envinfo.pip_version))
610    if mutable_dict['conda_packages']:
611        mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
612                                                 '[conda] ')
613    mutable_dict['cpu_info'] = envinfo.cpu_info
614    return env_info_fmt.format(**mutable_dict)
615
616
617def get_pretty_env_info():
618    """
619    Returns a pretty string of environment information.
620
621    This function retrieves environment information by calling the `get_env_info` function
622    and then formats the information into a human-readable string. The retrieved environment
623    information is listed in the document of `get_env_info`.
624    This function is used in `python collect_env.py` that should be executed when reporting a bug.
625
626    Returns:
627        str: A pretty string of the environment information.
628    """
629    return pretty_str(get_env_info())
630
631
632def main():
633    print("Collecting environment information...")
634    output = get_pretty_env_info()
635    print(output)
636
637    if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'):
638        minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR
639        if sys.platform == "linux" and os.path.exists(minidump_dir):
640            dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)]
641            latest = max(dumps, key=os.path.getctime)
642            ctime = os.path.getctime(latest)
643            creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S')
644            msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \
645                  "if this is related to your bug please include it when you file a report ***"
646            print(msg, file=sys.stderr)
647
648
649
650if __name__ == '__main__':
651    main()
652