1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/shared_memory_security_policy.h"
6
7 #include <algorithm>
8 #include <atomic>
9 #include <limits>
10 #include <optional>
11
12 #include "base/bits.h"
13 #include "base/memory/page_size.h"
14 #include "base/numerics/checked_math.h"
15 #include "build/build_config.h"
16
17 namespace base {
18
19 namespace {
20
21 // Note: pointers are 32 bits on all architectures in NaCl. See
22 // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1162
23 #if defined(ARCH_CPU_32_BITS) || BUILDFLAG(IS_NACL)
24 // No effective limit on 32-bit, since there simply isn't enough address space
25 // for ASLR to be particularly effective.
26 constexpr size_t kTotalMappedSizeLimit = std::numeric_limits<size_t>::max();
27 #elif defined(ARCH_CPU_64_BITS)
28 // 32 GB of mappings ought to be enough for anybody.
29 constexpr size_t kTotalMappedSizeLimit = 32ULL * 1024 * 1024 * 1024;
30 #endif
31
32 static std::atomic_size_t total_mapped_size_;
33
AlignWithPageSize(size_t size)34 std::optional<size_t> AlignWithPageSize(size_t size) {
35 #if BUILDFLAG(IS_WIN)
36 // TODO(crbug.com/210609): Matches alignment requirements defined in
37 // platform_shared_memory_region_win.cc:PlatformSharedMemoryRegion::Create.
38 // Remove this when NaCl is gone.
39 static const size_t kSectionSize = 65536;
40 const size_t page_size = std::max(kSectionSize, GetPageSize());
41 #else
42 const size_t page_size = GetPageSize();
43 #endif // BUILDFLAG(IS_WIN)
44 size_t rounded_size = bits::AlignUp(size, page_size);
45
46 // Fail on overflow.
47 if (rounded_size < size)
48 return std::nullopt;
49
50 return rounded_size;
51 }
52
53 } // namespace
54
55 // static
AcquireReservationForMapping(size_t size)56 bool SharedMemorySecurityPolicy::AcquireReservationForMapping(size_t size) {
57 size_t previous_mapped_size =
58 total_mapped_size_.load(std::memory_order_relaxed);
59 size_t total_mapped_size;
60
61 std::optional<size_t> page_aligned_size = AlignWithPageSize(size);
62
63 if (!page_aligned_size)
64 return false;
65
66 // Relaxed memory ordering is all that's needed since all atomicity is all
67 // that's required. If the value is stale, compare_exchange_weak() will fail
68 // and the loop will retry the operation with an updated total mapped size.
69 do {
70 if (!CheckAdd(previous_mapped_size, *page_aligned_size)
71 .AssignIfValid(&total_mapped_size)) {
72 return false;
73 }
74 if (total_mapped_size >= kTotalMappedSizeLimit)
75 return false;
76 } while (!total_mapped_size_.compare_exchange_weak(
77 previous_mapped_size, total_mapped_size, std::memory_order_relaxed,
78 std::memory_order_relaxed));
79
80 return true;
81 }
82
83 // static
ReleaseReservationForMapping(size_t size)84 void SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_t size) {
85 // Note #1: relaxed memory ordering is sufficient since atomicity is all
86 // that's required.
87 // Note #2: |size| should never overflow when aligned to page size, since
88 // this should only be called if AcquireReservationForMapping() returned true.
89 std::optional<size_t> page_aligned_size = AlignWithPageSize(size);
90 total_mapped_size_.fetch_sub(*page_aligned_size, std::memory_order_relaxed);
91 }
92
93 } // namespace base
94