1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/madv_free_discardable_memory_posix.h"
6
7 #include <errno.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10 #include <sys/types.h>
11 #include <sys/utsname.h>
12
13 #include <atomic>
14
15 #include "base/atomicops.h"
16 #include "base/bits.h"
17 #include "base/functional/callback.h"
18 #include "base/logging.h"
19 #include "base/memory/madv_free_discardable_memory_allocator_posix.h"
20 #include "base/memory/page_size.h"
21 #include "base/notreached.h"
22 #include "base/strings/string_number_conversions.h"
23 #include "base/strings/stringprintf.h"
24 #include "base/tracing_buildflags.h"
25 #include "build/build_config.h"
26
27 #if BUILDFLAG(IS_ANDROID)
28 #include <sys/prctl.h>
29 #endif
30
31 #if BUILDFLAG(ENABLE_BASE_TRACING)
32 #include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
33 #include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
34 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
35
36 #if defined(ADDRESS_SANITIZER)
37 #include <sanitizer/asan_interface.h>
38 #endif // defined(ADDRESS_SANITIZER)
39
40 namespace {
41
42 constexpr intptr_t kPageMagicCookie = 1;
43
AllocatePages(size_t size_in_pages)44 void* AllocatePages(size_t size_in_pages) {
45 const size_t length = size_in_pages * base::GetPageSize();
46 void* data = mmap(nullptr, length, PROT_READ | PROT_WRITE,
47 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
48 PCHECK(data != MAP_FAILED);
49
50 #if BUILDFLAG(IS_ANDROID)
51 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, data, length,
52 "madv-free-discardable");
53 #endif
54
55 return data;
56 }
57
58 // Checks if the system supports usage of MADV_FREE as a backing for discardable
59 // memory.
ProbePlatformMadvFreeSupport()60 base::MadvFreeSupport ProbePlatformMadvFreeSupport() {
61 // Note: If the compiling system does not have headers for Linux 4.5+, then
62 // the MADV_FREE define will not exist and the probe will default to
63 // unsupported, regardless of whether the target system actually supports
64 // MADV_FREE.
65 #if !BUILDFLAG(IS_APPLE) && defined(MADV_FREE)
66 uint8_t* dummy_page = static_cast<uint8_t*>(AllocatePages(1));
67 dummy_page[0] = 1;
68
69 base::MadvFreeSupport support = base::MadvFreeSupport::kUnsupported;
70
71 // Check if the MADV_FREE advice value exists.
72 int retval = madvise(dummy_page, base::GetPageSize(), MADV_FREE);
73 if (!retval) {
74 // For Linux 4.5 to 4.12, MADV_FREE on a swapless system will lead to memory
75 // being immediately discarded. Verify that the memory was not discarded.
76 if (dummy_page[0]) {
77 support = base::MadvFreeSupport::kSupported;
78 }
79 }
80 PCHECK(!munmap(dummy_page, base::GetPageSize()));
81 return support;
82 #else
83 return base::MadvFreeSupport::kUnsupported;
84 #endif
85 }
86
87 } // namespace
88
89 namespace base {
90
MadvFreeDiscardableMemoryPosix(size_t size_in_bytes,std::atomic<size_t> * allocator_byte_count)91 MadvFreeDiscardableMemoryPosix::MadvFreeDiscardableMemoryPosix(
92 size_t size_in_bytes,
93 std::atomic<size_t>* allocator_byte_count)
94 : size_in_bytes_(size_in_bytes),
95 allocated_pages_((size_in_bytes_ + base::GetPageSize() - 1) /
96 base::GetPageSize()),
97 allocator_byte_count_(allocator_byte_count),
98 page_first_word_((size_in_bytes_ + base::GetPageSize() - 1) /
99 base::GetPageSize()) {
100 data_ = AllocatePages(allocated_pages_);
101 (*allocator_byte_count_) += size_in_bytes_;
102 }
103
~MadvFreeDiscardableMemoryPosix()104 MadvFreeDiscardableMemoryPosix::~MadvFreeDiscardableMemoryPosix() {
105 if (Deallocate()) {
106 DVLOG(1) << "Region evicted during destructor with " << allocated_pages_
107 << " pages";
108 }
109 }
110
Lock()111 bool MadvFreeDiscardableMemoryPosix::Lock() {
112 DFAKE_SCOPED_LOCK(thread_collision_warner_);
113 DCHECK(!is_locked_);
114 // Locking fails if the memory has been deallocated.
115 if (!data_)
116 return false;
117
118 #if defined(ADDRESS_SANITIZER)
119 // We need to unpoison here since locking pages writes to them.
120 // Note that even if locking fails, we want to unpoison anyways after
121 // deallocation.
122 ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
123 #endif // defined(ADDRESS_SANITIZER)
124
125 size_t page_index;
126 for (page_index = 0; page_index < allocated_pages_; ++page_index) {
127 if (!LockPage(page_index))
128 break;
129 }
130
131 if (page_index < allocated_pages_) {
132 DVLOG(1) << "Region eviction discovered during lock with "
133 << allocated_pages_ << " pages";
134 Deallocate();
135 return false;
136 }
137 DCHECK(IsResident());
138
139 is_locked_ = true;
140 return true;
141 }
142
Unlock()143 void MadvFreeDiscardableMemoryPosix::Unlock() {
144 DFAKE_SCOPED_LOCK(thread_collision_warner_);
145 DCHECK(is_locked_);
146 DCHECK(data_ != nullptr);
147
148 for (size_t page_index = 0; page_index < allocated_pages_; ++page_index) {
149 UnlockPage(page_index);
150 }
151
152 #ifdef MADV_FREE
153 if (!keep_memory_for_testing_) {
154 int retval =
155 madvise(data_, allocated_pages_ * base::GetPageSize(), MADV_FREE);
156 DPCHECK(!retval);
157 }
158 #endif
159
160 #if defined(ADDRESS_SANITIZER)
161 ASAN_POISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
162 #endif // defined(ADDRESS_SANITIZER)
163
164 is_locked_ = false;
165 }
166
data() const167 void* MadvFreeDiscardableMemoryPosix::data() const {
168 DFAKE_SCOPED_LOCK(thread_collision_warner_);
169 DCHECK(is_locked_);
170 DCHECK(data_ != nullptr);
171
172 return data_;
173 }
174
LockPage(size_t page_index)175 bool MadvFreeDiscardableMemoryPosix::LockPage(size_t page_index) {
176 // We require the byte-level representation of std::atomic<intptr_t> to be
177 // equivalent to that of an intptr_t. Since std::atomic<intptr_t> has standard
178 // layout, having equal size is sufficient but not necessary for them to have
179 // the same byte-level representation.
180 static_assert(sizeof(intptr_t) == sizeof(std::atomic<intptr_t>),
181 "Incompatible layout of std::atomic.");
182 DCHECK(std::atomic<intptr_t>{}.is_lock_free());
183 std::atomic<intptr_t>* page_as_atomic =
184 reinterpret_cast<std::atomic<intptr_t>*>(
185 static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
186
187 intptr_t expected = kPageMagicCookie;
188
189 // Recall that we set the first word of the page to |kPageMagicCookie|
190 // (non-zero) during unlocking. Thus, if the value has changed, the page has
191 // been discarded. Restore the page's original first word from before
192 // unlocking only if the page has not been discarded.
193 if (!std::atomic_compare_exchange_strong_explicit(
194 page_as_atomic, &expected,
195 static_cast<intptr_t>(page_first_word_[page_index]),
196 std::memory_order_relaxed, std::memory_order_relaxed)) {
197 return false;
198 }
199
200 return true;
201 }
202
UnlockPage(size_t page_index)203 void MadvFreeDiscardableMemoryPosix::UnlockPage(size_t page_index) {
204 DCHECK(std::atomic<intptr_t>{}.is_lock_free());
205
206 std::atomic<intptr_t>* page_as_atomic =
207 reinterpret_cast<std::atomic<intptr_t>*>(
208 static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
209
210 // Store the first word of the page for use during unlocking.
211 page_first_word_[page_index].store(*page_as_atomic,
212 std::memory_order_relaxed);
213 // Store a non-zero value into the first word of the page, so we can tell when
214 // the page is discarded during locking.
215 page_as_atomic->store(kPageMagicCookie, std::memory_order_relaxed);
216 }
217
DiscardPage(size_t page_index)218 void MadvFreeDiscardableMemoryPosix::DiscardPage(size_t page_index) {
219 DFAKE_SCOPED_LOCK(thread_collision_warner_);
220 DCHECK(!is_locked_);
221 DCHECK(page_index < allocated_pages_);
222 int retval =
223 madvise(static_cast<uint8_t*>(data_) + base::GetPageSize() * page_index,
224 base::GetPageSize(), MADV_DONTNEED);
225 DPCHECK(!retval);
226 }
227
IsLockedForTesting() const228 bool MadvFreeDiscardableMemoryPosix::IsLockedForTesting() const {
229 DFAKE_SCOPED_LOCK(thread_collision_warner_);
230 return is_locked_;
231 }
232
DiscardForTesting()233 void MadvFreeDiscardableMemoryPosix::DiscardForTesting() {
234 DFAKE_SCOPED_LOCK(thread_collision_warner_);
235 DCHECK(!is_locked_);
236 int retval =
237 madvise(data_, base::GetPageSize() * allocated_pages_, MADV_DONTNEED);
238 DPCHECK(!retval);
239 }
240
241 trace_event::MemoryAllocatorDump*
CreateMemoryAllocatorDump(const char * name,trace_event::ProcessMemoryDump * pmd) const242 MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
243 const char* name,
244 trace_event::ProcessMemoryDump* pmd) const {
245 #if BUILDFLAG(ENABLE_BASE_TRACING)
246 DFAKE_SCOPED_LOCK(thread_collision_warner_);
247
248 using base::trace_event::MemoryAllocatorDump;
249 std::string allocator_dump_name = base::StringPrintf(
250 "discardable/segment_0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this));
251
252 MemoryAllocatorDump* allocator_dump =
253 pmd->CreateAllocatorDump(allocator_dump_name);
254
255 bool is_discarded = IsDiscarded();
256
257 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name);
258 // The effective_size is the amount of unused space as a result of being
259 // page-aligned.
260 dump->AddScalar(MemoryAllocatorDump::kNameSize,
261 MemoryAllocatorDump::kUnitsBytes,
262 is_discarded ? 0U : static_cast<uint64_t>(size_in_bytes_));
263
264 allocator_dump->AddScalar(
265 MemoryAllocatorDump::kNameSize, MemoryAllocatorDump::kUnitsBytes,
266 is_discarded
267 ? 0U
268 : static_cast<uint64_t>(allocated_pages_ * base::GetPageSize()));
269 allocator_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
270 MemoryAllocatorDump::kUnitsObjects, 1U);
271 allocator_dump->AddScalar(
272 "wasted_size", MemoryAllocatorDump::kUnitsBytes,
273 static_cast<uint64_t>(allocated_pages_ * base::GetPageSize() -
274 size_in_bytes_));
275 allocator_dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
276 is_locked_ ? size_in_bytes_ : 0U);
277 allocator_dump->AddScalar("page_count", MemoryAllocatorDump::kUnitsObjects,
278 static_cast<uint64_t>(allocated_pages_));
279
280 // The amount of space that is discarded, but not unmapped (i.e. the memory
281 // was discarded while unlocked, but the pages are still mapped in memory
282 // since Deallocate() has not been called yet). This instance is discarded if
283 // it is unlocked and not all pages are resident in memory.
284 allocator_dump->AddScalar(
285 "discarded_size", MemoryAllocatorDump::kUnitsBytes,
286 is_discarded ? allocated_pages_ * base::GetPageSize() : 0U);
287
288 pmd->AddSuballocation(dump->guid(), allocator_dump_name);
289 return dump;
290 #else // BUILDFLAG(ENABLE_BASE_TRACING)
291 NOTREACHED();
292 return nullptr;
293 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
294 }
295
IsValid() const296 bool MadvFreeDiscardableMemoryPosix::IsValid() const {
297 DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
298 return data_ != nullptr;
299 }
300
SetKeepMemoryForTesting(bool keep_memory)301 void MadvFreeDiscardableMemoryPosix::SetKeepMemoryForTesting(bool keep_memory) {
302 DFAKE_SCOPED_LOCK(thread_collision_warner_);
303 DCHECK(is_locked_);
304 keep_memory_for_testing_ = keep_memory;
305 }
306
IsResident() const307 bool MadvFreeDiscardableMemoryPosix::IsResident() const {
308 DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
309 #if BUILDFLAG(IS_APPLE)
310 std::vector<char> vec(allocated_pages_);
311 #else
312 std::vector<unsigned char> vec(allocated_pages_);
313 #endif
314
315 int retval =
316 mincore(data_, allocated_pages_ * base::GetPageSize(), vec.data());
317 DPCHECK(retval == 0 || errno == EAGAIN);
318
319 for (size_t i = 0; i < allocated_pages_; ++i) {
320 if (!(vec[i] & 1))
321 return false;
322 }
323 return true;
324 }
325
IsDiscarded() const326 bool MadvFreeDiscardableMemoryPosix::IsDiscarded() const {
327 return !is_locked_ && !IsResident();
328 }
329
Deallocate()330 bool MadvFreeDiscardableMemoryPosix::Deallocate() {
331 DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
332 if (data_) {
333 #if defined(ADDRESS_SANITIZER)
334 ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
335 #endif // defined(ADDRESS_SANITIZER)
336
337 int retval = munmap(data_, allocated_pages_ * base::GetPageSize());
338 PCHECK(!retval);
339 data_ = nullptr;
340 (*allocator_byte_count_) -= size_in_bytes_;
341 return true;
342 }
343 return false;
344 }
345
GetMadvFreeSupport()346 MadvFreeSupport GetMadvFreeSupport() {
347 static MadvFreeSupport kMadvFreeSupport = ProbePlatformMadvFreeSupport();
348 return kMadvFreeSupport;
349 }
350
351 } // namespace base
352