1# Owner(s): ["module: cuda"] 2 3import sys 4import unittest 5 6import torch 7from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU 8from torch.testing._internal.common_utils import ( 9 NoTest, 10 run_tests, 11 skipIfRocmVersionLessThan, 12 TestCase, 13) 14 15 16# NOTE: this needs to be run in a brand new process 17 18if not TEST_CUDA: 19 print("CUDA not available, skipping tests", file=sys.stderr) 20 TestCase = NoTest # noqa: F811 21 22 23@torch.testing._internal.common_utils.markDynamoStrictTest 24class TestCudaPrimaryCtx(TestCase): 25 CTX_ALREADY_CREATED_ERR_MSG = ( 26 "Tests defined in test_cuda_primary_ctx.py must be run in a process " 27 "where CUDA contexts are never created. Use either run_test.py or add " 28 "--subprocess to run each test in a different subprocess." 29 ) 30 31 @skipIfRocmVersionLessThan((4, 4, 21504)) 32 def setUp(self): 33 for device in range(torch.cuda.device_count()): 34 # Ensure context has not been created beforehand 35 self.assertFalse( 36 torch._C._cuda_hasPrimaryContext(device), 37 TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG, 38 ) 39 40 @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected") 41 def test_str_repr(self): 42 x = torch.randn(1, device="cuda:1") 43 44 # We should have only created context on 'cuda:1' 45 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 46 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 47 48 str(x) 49 repr(x) 50 51 # We should still have only created context on 'cuda:1' 52 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 53 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 54 55 @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected") 56 def test_copy(self): 57 x = torch.randn(1, device="cuda:1") 58 59 # We should have only created context on 'cuda:1' 60 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 61 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 62 63 y = torch.randn(1, device="cpu") 64 y.copy_(x) 65 66 # We should still have only created context on 'cuda:1' 67 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 68 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 69 70 @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected") 71 def test_pin_memory(self): 72 x = torch.randn(1, device="cuda:1") 73 74 # We should have only created context on 'cuda:1' 75 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 76 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 77 78 self.assertFalse(x.is_pinned()) 79 80 # We should still have only created context on 'cuda:1' 81 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 82 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 83 84 x = torch.randn(3, device="cpu").pin_memory() 85 86 # We should still have only created context on 'cuda:1' 87 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 88 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 89 90 self.assertTrue(x.is_pinned()) 91 92 # We should still have only created context on 'cuda:1' 93 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 94 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 95 96 x = torch.randn(3, device="cpu", pin_memory=True) 97 98 # We should still have only created context on 'cuda:1' 99 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 100 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 101 102 x = torch.zeros(3, device="cpu", pin_memory=True) 103 104 # We should still have only created context on 'cuda:1' 105 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 106 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 107 108 x = torch.empty(3, device="cpu", pin_memory=True) 109 110 # We should still have only created context on 'cuda:1' 111 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 112 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 113 114 x = x.pin_memory() 115 116 # We should still have only created context on 'cuda:1' 117 self.assertFalse(torch._C._cuda_hasPrimaryContext(0)) 118 self.assertTrue(torch._C._cuda_hasPrimaryContext(1)) 119 120 121if __name__ == "__main__": 122 run_tests() 123