xref: /aosp_15_r20/external/cronet/base/memory/discardable_shared_memory.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/memory/discardable_shared_memory.h"
6 
7 #include <stdint.h>
8 
9 #include <algorithm>
10 
11 #include "base/atomicops.h"
12 #include "base/bits.h"
13 #include "base/feature_list.h"
14 #include "base/logging.h"
15 #include "base/memory/discardable_memory.h"
16 #include "base/memory/discardable_memory_internal.h"
17 #include "base/memory/page_size.h"
18 #include "base/memory/shared_memory_tracker.h"
19 #include "base/numerics/safe_math.h"
20 #include "base/tracing_buildflags.h"
21 #include "build/build_config.h"
22 #include "partition_alloc/page_allocator.h"
23 
24 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
25 // For madvise() which is available on all POSIX compatible systems.
26 #include <sys/mman.h>
27 #endif
28 
29 #if BUILDFLAG(IS_ANDROID)
30 #include "third_party/ashmem/ashmem.h"
31 #endif
32 
33 #if BUILDFLAG(IS_WIN)
34 #include <windows.h>
35 
36 #include "base/win/windows_version.h"
37 #endif
38 
39 #if BUILDFLAG(IS_FUCHSIA)
40 #include <lib/zx/vmar.h>
41 #include <zircon/types.h>
42 #include "base/fuchsia/fuchsia_logging.h"
43 #endif
44 
45 #if BUILDFLAG(ENABLE_BASE_TRACING)
46 #include "base/trace_event/memory_allocator_dump.h"  // no-presubmit-check
47 #include "base/trace_event/process_memory_dump.h"    // no-presubmit-check
48 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
49 
50 namespace base {
51 namespace {
52 
53 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
54 // Atomic64 routines, depending on the architecture.
55 typedef intptr_t AtomicType;
56 typedef uintptr_t UAtomicType;
57 
58 // Template specialization for timestamp serialization/deserialization. This
59 // is used to serialize timestamps using Unix time on systems where AtomicType
60 // does not have enough precision to contain a timestamp in the standard
61 // serialized format.
62 template <int>
63 Time TimeFromWireFormat(int64_t value);
64 template <int>
65 int64_t TimeToWireFormat(Time time);
66 
67 // Serialize to Unix time when using 4-byte wire format.
68 // Note: 19 January 2038, this will cease to work.
69 template <>
TimeFromWireFormat(int64_t value)70 [[maybe_unused]] Time TimeFromWireFormat<4>(int64_t value) {
71   return value ? Time::UnixEpoch() + Seconds(value) : Time();
72 }
73 template <>
TimeToWireFormat(Time time)74 [[maybe_unused]] int64_t TimeToWireFormat<4>(Time time) {
75   return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
76 }
77 
78 // Standard serialization format when using 8-byte wire format.
79 template <>
TimeFromWireFormat(int64_t value)80 [[maybe_unused]] Time TimeFromWireFormat<8>(int64_t value) {
81   return Time::FromInternalValue(value);
82 }
83 template <>
TimeToWireFormat(Time time)84 [[maybe_unused]] int64_t TimeToWireFormat<8>(Time time) {
85   return time.ToInternalValue();
86 }
87 
88 struct SharedState {
89   enum LockState { UNLOCKED = 0, LOCKED = 1 };
90 
SharedStatebase::__anon2d00a0230111::SharedState91   explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
SharedStatebase::__anon2d00a0230111::SharedState92   SharedState(LockState lock_state, Time timestamp) {
93     int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
94     DCHECK_GE(wire_timestamp, 0);
95     DCHECK_EQ(lock_state & ~1, 0);
96     value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
97   }
98 
GetLockStatebase::__anon2d00a0230111::SharedState99   LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
100 
GetTimestampbase::__anon2d00a0230111::SharedState101   Time GetTimestamp() const {
102     return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
103   }
104 
105   // Bit 1: Lock state. Bit is set when locked.
106   // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
107   // purged.
108   union {
109     AtomicType i;
110     UAtomicType u;
111   } value;
112 };
113 
114 // Shared state is stored at offset 0 in shared memory segments.
SharedStateFromSharedMemory(const WritableSharedMemoryMapping & shared_memory)115 SharedState* SharedStateFromSharedMemory(
116     const WritableSharedMemoryMapping& shared_memory) {
117   DCHECK(shared_memory.IsValid());
118   return static_cast<SharedState*>(shared_memory.memory());
119 }
120 
121 // Round up |size| to a multiple of page size.
AlignToPageSize(size_t size)122 size_t AlignToPageSize(size_t size) {
123   return bits::AlignUp(size, base::GetPageSize());
124 }
125 
126 #if BUILDFLAG(IS_ANDROID)
UseAshmemUnpinningForDiscardableMemory()127 bool UseAshmemUnpinningForDiscardableMemory() {
128   if (!ashmem_device_is_supported())
129     return false;
130 
131   // If we are participating in the discardable memory backing trial, only
132   // enable ashmem unpinning when we are in the corresponding trial group.
133   if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
134     return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
135            base::DiscardableMemoryTrialGroup::kAshmem;
136   }
137   return true;
138 }
139 #endif  // BUILDFLAG(IS_ANDROID)
140 
141 }  // namespace
142 
DiscardableSharedMemory()143 DiscardableSharedMemory::DiscardableSharedMemory()
144     : mapped_size_(0), locked_page_count_(0) {
145 }
146 
DiscardableSharedMemory(UnsafeSharedMemoryRegion shared_memory_region)147 DiscardableSharedMemory::DiscardableSharedMemory(
148     UnsafeSharedMemoryRegion shared_memory_region)
149     : shared_memory_region_(std::move(shared_memory_region)),
150       mapped_size_(0),
151       locked_page_count_(0) {}
152 
153 DiscardableSharedMemory::~DiscardableSharedMemory() = default;
154 
CreateAndMap(size_t size)155 bool DiscardableSharedMemory::CreateAndMap(size_t size) {
156   CheckedNumeric<size_t> checked_size = size;
157   checked_size += AlignToPageSize(sizeof(SharedState));
158   if (!checked_size.IsValid())
159     return false;
160 
161   shared_memory_region_ =
162       UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
163 
164   if (!shared_memory_region_.IsValid())
165     return false;
166 
167   shared_memory_mapping_ = shared_memory_region_.Map();
168   if (!shared_memory_mapping_.IsValid())
169     return false;
170 
171   mapped_size_ = shared_memory_mapping_.mapped_size() -
172                  AlignToPageSize(sizeof(SharedState));
173 
174   locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
175 #if DCHECK_IS_ON()
176   for (size_t page = 0; page < locked_page_count_; ++page)
177     locked_pages_.insert(page);
178 #endif
179 
180   DCHECK(last_known_usage_.is_null());
181   SharedState new_state(SharedState::LOCKED, Time());
182   subtle::Release_Store(
183       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
184       new_state.value.i);
185   return true;
186 }
187 
Map(size_t size)188 bool DiscardableSharedMemory::Map(size_t size) {
189   DCHECK(!shared_memory_mapping_.IsValid());
190   if (shared_memory_mapping_.IsValid())
191     return false;
192 
193   shared_memory_mapping_ = shared_memory_region_.MapAt(
194       0, AlignToPageSize(sizeof(SharedState)) + size);
195   if (!shared_memory_mapping_.IsValid())
196     return false;
197 
198   mapped_size_ = shared_memory_mapping_.mapped_size() -
199                  AlignToPageSize(sizeof(SharedState));
200 
201   locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
202 #if DCHECK_IS_ON()
203   for (size_t page = 0; page < locked_page_count_; ++page)
204     locked_pages_.insert(page);
205 #endif
206 
207   return true;
208 }
209 
Unmap()210 bool DiscardableSharedMemory::Unmap() {
211   if (!shared_memory_mapping_.IsValid())
212     return false;
213 
214   shared_memory_mapping_ = WritableSharedMemoryMapping();
215   locked_page_count_ = 0;
216 #if DCHECK_IS_ON()
217   locked_pages_.clear();
218 #endif
219   mapped_size_ = 0;
220   return true;
221 }
222 
Lock(size_t offset,size_t length)223 DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
224     size_t offset, size_t length) {
225   DCHECK_EQ(AlignToPageSize(offset), offset);
226   DCHECK_EQ(AlignToPageSize(length), length);
227 
228   // Calls to this function must be synchronized properly.
229   DFAKE_SCOPED_LOCK(thread_collision_warner_);
230 
231   DCHECK(shared_memory_mapping_.IsValid());
232 
233   // We need to successfully acquire the platform independent lock before
234   // individual pages can be locked.
235   if (!locked_page_count_) {
236     // Return false when instance has been purged or not initialized properly
237     // by checking if |last_known_usage_| is NULL.
238     if (last_known_usage_.is_null())
239       return FAILED;
240 
241     SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
242     SharedState new_state(SharedState::LOCKED, Time());
243     SharedState result(subtle::Acquire_CompareAndSwap(
244         &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
245         old_state.value.i, new_state.value.i));
246     if (result.value.u != old_state.value.u) {
247       // Update |last_known_usage_| in case the above CAS failed because of
248       // an incorrect timestamp.
249       last_known_usage_ = result.GetTimestamp();
250       return FAILED;
251     }
252   }
253 
254   // Zero for length means "everything onward".
255   if (!length)
256     length = AlignToPageSize(mapped_size_) - offset;
257 
258   size_t start = offset / base::GetPageSize();
259   size_t end = start + length / base::GetPageSize();
260   DCHECK_LE(start, end);
261   DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
262 
263   // Add pages to |locked_page_count_|.
264   // Note: Locking a page that is already locked is an error.
265   locked_page_count_ += end - start;
266 #if DCHECK_IS_ON()
267   // Detect incorrect usage by keeping track of exactly what pages are locked.
268   for (auto page = start; page < end; ++page) {
269     auto result = locked_pages_.insert(page);
270     DCHECK(result.second);
271   }
272   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
273 #endif
274 
275   // Always behave as if memory was purged when trying to lock a 0 byte segment.
276   if (!length)
277       return PURGED;
278 
279 #if BUILDFLAG(IS_ANDROID)
280   // Ensure that the platform won't discard the required pages.
281   return LockPages(shared_memory_region_,
282                    AlignToPageSize(sizeof(SharedState)) + offset, length);
283 #elif BUILDFLAG(IS_APPLE)
284   // On macOS, there is no mechanism to lock pages. However, we do need to call
285   // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
286   // footprint via task_info().
287   //
288   // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
289   // madvise(MADV_FREE_REUSABLE) called on them has no effect.
290   //
291   // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
292   // that's where the memory is actually released, rather than Unlock(), which
293   // is a no-op on macOS.
294   //
295   // For more information, see
296   // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
297   madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
298               AlignToPageSize(sizeof(SharedState)),
299           AlignToPageSize(mapped_size_), MADV_FREE_REUSE);
300   return DiscardableSharedMemory::SUCCESS;
301 #else
302   return DiscardableSharedMemory::SUCCESS;
303 #endif
304 }
305 
Unlock(size_t offset,size_t length)306 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
307   DCHECK_EQ(AlignToPageSize(offset), offset);
308   DCHECK_EQ(AlignToPageSize(length), length);
309 
310   // Calls to this function must be synchronized properly.
311   DFAKE_SCOPED_LOCK(thread_collision_warner_);
312 
313   // Passing zero for |length| means "everything onward". Note that |length| may
314   // still be zero after this calculation, e.g. if |mapped_size_| is zero.
315   if (!length)
316     length = AlignToPageSize(mapped_size_) - offset;
317 
318   DCHECK(shared_memory_mapping_.IsValid());
319 
320   // Allow the pages to be discarded by the platform, if supported.
321   UnlockPages(shared_memory_region_,
322               AlignToPageSize(sizeof(SharedState)) + offset, length);
323 
324   size_t start = offset / base::GetPageSize();
325   size_t end = start + length / base::GetPageSize();
326   DCHECK_LE(start, end);
327   DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
328 
329   // Remove pages from |locked_page_count_|.
330   // Note: Unlocking a page that is not locked is an error.
331   DCHECK_GE(locked_page_count_, end - start);
332   locked_page_count_ -= end - start;
333 #if DCHECK_IS_ON()
334   // Detect incorrect usage by keeping track of exactly what pages are locked.
335   for (auto page = start; page < end; ++page) {
336     auto erased_count = locked_pages_.erase(page);
337     DCHECK_EQ(1u, erased_count);
338   }
339   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
340 #endif
341 
342   // Early out and avoid releasing the platform independent lock if some pages
343   // are still locked.
344   if (locked_page_count_)
345     return;
346 
347   Time current_time = Now();
348   DCHECK(!current_time.is_null());
349 
350   SharedState old_state(SharedState::LOCKED, Time());
351   SharedState new_state(SharedState::UNLOCKED, current_time);
352   // Note: timestamp cannot be NULL as that is a unique value used when
353   // locked or purged.
354   DCHECK(!new_state.GetTimestamp().is_null());
355   // Timestamp precision should at least be accurate to the second.
356   DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
357             (current_time - Time::UnixEpoch()).InSeconds());
358   SharedState result(subtle::Release_CompareAndSwap(
359       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
360       old_state.value.i, new_state.value.i));
361 
362   DCHECK_EQ(old_state.value.u, result.value.u);
363 
364   last_known_usage_ = current_time;
365 }
366 
memory() const367 void* DiscardableSharedMemory::memory() const {
368   return static_cast<uint8_t*>(shared_memory_mapping_.memory()) +
369          AlignToPageSize(sizeof(SharedState));
370 }
371 
Purge(Time current_time)372 bool DiscardableSharedMemory::Purge(Time current_time) {
373   // Calls to this function must be synchronized properly.
374   DFAKE_SCOPED_LOCK(thread_collision_warner_);
375   DCHECK(shared_memory_mapping_.IsValid());
376 
377   SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
378   SharedState new_state(SharedState::UNLOCKED, Time());
379   SharedState result(subtle::Acquire_CompareAndSwap(
380       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
381       old_state.value.i, new_state.value.i));
382 
383   // Update |last_known_usage_| to |current_time| if the memory is locked. This
384   // allows the caller to determine if purging failed because last known usage
385   // was incorrect or memory was locked. In the second case, the caller should
386   // most likely wait for some amount of time before attempting to purge the
387   // the memory again.
388   if (result.value.u != old_state.value.u) {
389     last_known_usage_ = result.GetLockState() == SharedState::LOCKED
390                             ? current_time
391                             : result.GetTimestamp();
392     return false;
393   }
394 
395 // The next section will release as much resource as can be done
396 // from the purging process, until the client process notices the
397 // purge and releases its own references.
398 // Note: this memory will not be accessed again.  The segment will be
399 // freed asynchronously at a later time, so just do the best
400 // immediately.
401 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
402 // Linux and Android provide MADV_REMOVE which is preferred as it has a
403 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
404 // provide MADV_FREE which has the same result but memory is purged lazily.
405 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
406 #define MADV_PURGE_ARGUMENT MADV_REMOVE
407 #elif BUILDFLAG(IS_APPLE)
408 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
409 // reusable bit, which allows both Activity Monitor and memory-infra to
410 // correctly track the pages.
411 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
412 #else
413 #define MADV_PURGE_ARGUMENT MADV_FREE
414 #endif
415   // Advise the kernel to remove resources associated with purged pages.
416   // Subsequent accesses of memory pages will succeed, but might result in
417   // zero-fill-on-demand pages.
418   if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
419                   AlignToPageSize(sizeof(SharedState)),
420               AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
421     DPLOG(ERROR) << "madvise() failed";
422   }
423 #elif BUILDFLAG(IS_WIN)
424   // On Windows, discarded pages are not returned to the system immediately and
425   // not guaranteed to be zeroed when returned to the application.
426   char* address = static_cast<char*>(shared_memory_mapping_.memory()) +
427                   AlignToPageSize(sizeof(SharedState));
428   size_t length = AlignToPageSize(mapped_size_);
429 
430   DWORD ret = DiscardVirtualMemory(address, length);
431   // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
432   // failure.
433   if (ret != ERROR_SUCCESS) {
434     void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
435     CHECK(ptr);
436   }
437 #elif BUILDFLAG(IS_FUCHSIA)
438   // De-commit via our VMAR, rather than relying on the VMO handle, since the
439   // handle may have been closed after the memory was mapped into this process.
440   uint64_t address_int = reinterpret_cast<uint64_t>(
441       static_cast<char*>(shared_memory_mapping_.memory()) +
442       AlignToPageSize(sizeof(SharedState)));
443   zx_status_t status = zx::vmar::root_self()->op_range(
444       ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped_size_), nullptr,
445       0);
446   ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
447 #endif  // BUILDFLAG(IS_FUCHSIA)
448 
449   last_known_usage_ = Time();
450   return true;
451 }
452 
ReleaseMemoryIfPossible(size_t offset,size_t length)453 void DiscardableSharedMemory::ReleaseMemoryIfPossible(size_t offset,
454                                                       size_t length) {
455 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
456 // Linux and Android provide MADV_REMOVE which is preferred as it has a
457 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
458 // provide MADV_FREE which has the same result but memory is purged lazily.
459 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
460 #define MADV_PURGE_ARGUMENT MADV_REMOVE
461 #elif BUILDFLAG(IS_APPLE)
462 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
463 // reusable bit, which allows both Activity Monitor and memory-infra to
464 // correctly track the pages.
465 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
466 #else  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
467 #define MADV_PURGE_ARGUMENT MADV_FREE
468 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
469         // BUILDFLAG(IS_ANDROID)
470   // Advise the kernel to remove resources associated with purged pages.
471   // Subsequent accesses of memory pages will succeed, but might result in
472   // zero-fill-on-demand pages.
473   if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) + offset,
474               length, MADV_PURGE_ARGUMENT)) {
475     DPLOG(ERROR) << "madvise() failed";
476   }
477 #else   // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
478   partition_alloc::DiscardSystemPages(
479       static_cast<char*>(shared_memory_mapping_.memory()) + offset, length);
480 #endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
481 }
482 
IsMemoryResident() const483 bool DiscardableSharedMemory::IsMemoryResident() const {
484   DCHECK(shared_memory_mapping_.IsValid());
485 
486   SharedState result(subtle::NoBarrier_Load(
487       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
488 
489   return result.GetLockState() == SharedState::LOCKED ||
490          !result.GetTimestamp().is_null();
491 }
492 
IsMemoryLocked() const493 bool DiscardableSharedMemory::IsMemoryLocked() const {
494   DCHECK(shared_memory_mapping_.IsValid());
495 
496   SharedState result(subtle::NoBarrier_Load(
497       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
498 
499   return result.GetLockState() == SharedState::LOCKED;
500 }
501 
Close()502 void DiscardableSharedMemory::Close() {
503   shared_memory_region_ = UnsafeSharedMemoryRegion();
504 }
505 
CreateSharedMemoryOwnershipEdge(trace_event::MemoryAllocatorDump * local_segment_dump,trace_event::ProcessMemoryDump * pmd,bool is_owned) const506 void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
507     trace_event::MemoryAllocatorDump* local_segment_dump,
508     trace_event::ProcessMemoryDump* pmd,
509     bool is_owned) const {
510 // Memory dumps are only supported when tracing support is enabled,.
511 #if BUILDFLAG(ENABLE_BASE_TRACING)
512   auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
513       shared_memory_mapping_, pmd);
514   // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
515   // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
516   uint64_t resident_size = shared_memory_dump->GetSizeInternal();
517   local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
518                                 trace_event::MemoryAllocatorDump::kUnitsBytes,
519                                 resident_size);
520 
521   // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
522   // the tracing UI will account the effective size of the segment to the
523   // client instead of manager.
524   // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
525   // values, crbug.com/754793.
526   const int kImportance = is_owned ? 2 : 0;
527   auto shared_memory_guid = shared_memory_mapping_.guid();
528   local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
529 
530   // Owned discardable segments which are allocated by client process, could
531   // have been cleared by the discardable manager. So, the segment need not
532   // exist in memory and weak dumps are created to indicate the UI that the dump
533   // should exist only if the manager also created the global dump edge.
534   if (is_owned) {
535     pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
536                                              shared_memory_guid, kImportance);
537   } else {
538     pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
539                                          shared_memory_guid, kImportance);
540   }
541 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
542 }
543 
544 // static
LockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)545 DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
546     const UnsafeSharedMemoryRegion& region,
547     size_t offset,
548     size_t length) {
549 #if BUILDFLAG(IS_ANDROID)
550   if (region.IsValid()) {
551     if (UseAshmemUnpinningForDiscardableMemory()) {
552       int pin_result =
553           ashmem_pin_region(region.GetPlatformHandle(), offset, length);
554       if (pin_result == ASHMEM_WAS_PURGED)
555         return PURGED;
556       if (pin_result < 0)
557         return FAILED;
558     }
559   }
560 #endif
561   return SUCCESS;
562 }
563 
564 // static
UnlockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)565 void DiscardableSharedMemory::UnlockPages(
566     const UnsafeSharedMemoryRegion& region,
567     size_t offset,
568     size_t length) {
569 #if BUILDFLAG(IS_ANDROID)
570   if (region.IsValid()) {
571     if (UseAshmemUnpinningForDiscardableMemory()) {
572       int unpin_result =
573           ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
574       DCHECK_EQ(0, unpin_result);
575     }
576   }
577 #endif
578 }
579 
Now() const580 Time DiscardableSharedMemory::Now() const {
581   return Time::Now();
582 }
583 
584 #if BUILDFLAG(IS_ANDROID)
585 // static
IsAshmemDeviceSupportedForTesting()586 bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
587   return UseAshmemUnpinningForDiscardableMemory();
588 }
589 #endif
590 
591 }  // namespace base
592