xref: /aosp_15_r20/external/pytorch/aten/src/ATen/test/cuda_reportMemoryUsage_test.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <ATen/test/reportMemoryUsage.h>
2 
3 #include <gtest/gtest.h>
4 
5 #include <c10/cuda/CUDACachingAllocator.h>
6 
TEST(DeviceCachingAllocator,check_reporter)7 TEST(DeviceCachingAllocator, check_reporter) {
8   auto reporter = std::make_shared<TestMemoryReportingInfo>();
9   c10::DebugInfoGuard guard(c10::DebugInfoKind::PROFILER_STATE, reporter);
10 
11   auto _200kb = 200 * 1024;
12   auto _500mb = 500 * 1024 * 1024;
13 
14   auto allocator = c10::cuda::CUDACachingAllocator::get();
15 
16   auto alloc1 = allocator->allocate(_200kb);
17   auto r = reporter->getLatestRecord();
18   EXPECT_EQ(alloc1.get(), r.ptr);
19   EXPECT_LE(_200kb, r.alloc_size);
20   EXPECT_LE(_200kb, r.total_allocated);
21   EXPECT_LE(_200kb, r.total_reserved);
22   EXPECT_TRUE(r.device.is_cuda());
23 
24   auto alloc1_true_ptr = r.ptr;
25   auto alloc1_true_alloc_size = r.alloc_size;
26 
27   // I bet pytorch will not waste that much memory
28   EXPECT_LT(r.total_allocated, 2 * _200kb);
29   // I bet pytorch will not reserve that much memory
30   EXPECT_LT(r.total_reserved, _500mb);
31 
32   auto alloc2 = allocator->allocate(_500mb);
33   r = reporter->getLatestRecord();
34   EXPECT_EQ(alloc2.get(), r.ptr);
35   EXPECT_LE(_500mb, r.alloc_size);
36   EXPECT_LE(_200kb + _500mb, r.total_allocated);
37   EXPECT_LE(_200kb + _500mb, r.total_reserved);
38   EXPECT_TRUE(r.device.is_cuda());
39   auto alloc2_true_ptr = r.ptr;
40   auto alloc2_true_alloc_size = r.alloc_size;
41 
42   auto max_reserved = r.total_reserved;
43 
44   alloc1.clear();
45   r = reporter->getLatestRecord();
46   EXPECT_EQ(alloc1_true_ptr, r.ptr);
47   EXPECT_EQ(-alloc1_true_alloc_size, r.alloc_size);
48   EXPECT_EQ(alloc2_true_alloc_size, r.total_allocated);
49   // alloc2 remain, it is a memory free operation, so it shouldn't reserve more
50   // memory.
51   EXPECT_TRUE(
52       alloc2_true_alloc_size <= static_cast<int64_t>(r.total_reserved) &&
53       r.total_reserved <= max_reserved);
54   EXPECT_TRUE(r.device.is_cuda());
55 
56   alloc2.clear();
57   r = reporter->getLatestRecord();
58   EXPECT_EQ(alloc2_true_ptr, r.ptr);
59   EXPECT_EQ(-alloc2_true_alloc_size, r.alloc_size);
60   EXPECT_EQ(0, r.total_allocated);
61   EXPECT_TRUE(r.total_reserved <= max_reserved);
62   EXPECT_TRUE(r.device.is_cuda());
63 }
64 
main(int argc,char * argv[])65 int main(int argc, char* argv[]) {
66   ::testing::InitGoogleTest(&argc, argv);
67   c10::cuda::CUDACachingAllocator::init(1);
68   return RUN_ALL_TESTS();
69 }
70