xref: /aosp_15_r20/frameworks/av/media/utils/include/mediautils/SharedMemoryAllocator.h (revision ec779b8e0859a360c3d303172224686826e6e0e1)
1 /*
2 ** Copyright 2022, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 **     http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16 
17 #pragma once
18 
19 #include <sys/stat.h>
20 #include <sys/types.h>
21 #include <unistd.h>
22 
23 #include <iomanip>
24 #include <limits>
25 #include <mutex>
26 #include <sstream>
27 #include <string>
28 #include <type_traits>
29 #include <unordered_map>
30 
31 #include <android-base/thread_annotations.h>
32 #include <binder/MemoryBase.h>
33 #include <binder/MemoryHeapBase.h>
34 #include <log/log_main.h>
35 #include <utils/StrongPointer.h>
36 
37 namespace std {
38 template <typename T>
39 struct hash<::android::wp<T>> {
40     size_t operator()(const ::android::wp<T>& x) const {
41         return std::hash<const T*>()(x.unsafe_get());
42     }
43 };
44 }  // namespace std
45 
46 namespace android::mediautils {
47 
48 // Allocations represent owning handles to a region of shared memory (and thus
49 // should not be copied in order to fulfill RAII).
50 // To share ownership between multiple objects, a
51 // ref-counting solution such as sp or shared ptr is appropriate, so the dtor
52 // is called once for a particular block of memory.
53 
54 using AllocationType = ::android::sp<IMemory>;
55 using WeakAllocationType = ::android::wp<IMemory>;
56 
57 namespace shared_allocator_impl {
58 constexpr inline size_t roundup(size_t size, size_t pageSize) {
59     LOG_ALWAYS_FATAL_IF(pageSize == 0 || (pageSize & (pageSize - 1)) != 0,
60                         "Page size not multiple of 2");
61     return ((size + pageSize - 1) & ~(pageSize - 1));
62 }
63 
64 constexpr inline bool isHeapValid(const sp<IMemoryHeap>& heap) {
65     return (heap && heap->getBase() &&
66             heap->getBase() != MAP_FAILED);  // TODO if not mapped locally
67 }
68 
69 template <typename, typename = void>
70 static constexpr bool has_deallocate_all = false;
71 
72 template <typename T>
73 static constexpr bool has_deallocate_all<
74         T, std::enable_if_t<std::is_same_v<decltype(std::declval<T>().deallocate_all()), void>,
75                             void>> = true;
76 
77 template <typename, typename = void>
78 static constexpr bool has_owns = false;
79 
80 template <typename T>
81 static constexpr bool
82         has_owns<T, std::enable_if_t<std::is_same_v<decltype(std::declval<T>().owns(
83                                                             std::declval<const AllocationType>())),
84                                                     bool>,
85                                      void>> = true;
86 
87 template <typename, typename = void>
88 static constexpr bool has_dump = false;
89 
90 template <typename T>
91 static constexpr bool has_dump<
92         T,
93         std::enable_if_t<std::is_same_v<decltype(std::declval<T>().dump()), std::string>, void>> =
94         true;
95 
96 }  // namespace shared_allocator_impl
97 
98 struct BasicAllocRequest {
99     size_t size;
100 };
101 struct NamedAllocRequest : public BasicAllocRequest {
102     std::string_view name;
103 };
104 
105 // We are required to add a layer of indirection to hold a handle to the actual
106 // block due to sp<> being unable to be created from an object once its
107 // ref-count has dropped to zero. So, we have to hold onto an extra reference
108 // here. We effectively want to know when the refCount of the object drops to
109 // one, since we need to hold on to a reference to pass the object to interfaces
110 // requiring an sp<>.
111 // TODO is there some way to avoid paying this cost?
112 template <typename Allocator>
113 class ScopedAllocator;
114 
115 class ScopedAllocation : public BnMemory {
116   public:
117     template <typename T>
118     friend class ScopedAllocator;
119     template <typename Deallocator>
120     ScopedAllocation(const AllocationType& allocation, Deallocator&& deallocator)
121         : mAllocation(allocation), mDeallocator(std::forward<Deallocator>(deallocator)) {}
122 
123     // Defer the implementation to the underlying mAllocation
124 
125     virtual sp<IMemoryHeap> getMemory(ssize_t* offset = nullptr,
126                                       size_t* size = nullptr) const override {
127         return mAllocation->getMemory(offset, size);
128     }
129 
130   private:
131     ~ScopedAllocation() override { mDeallocator(mAllocation); }
132 
133     const AllocationType mAllocation;
134     const std::function<void(const AllocationType&)> mDeallocator;
135 };
136 
137 // Allocations are only deallocated when going out of scope.
138 // This should almost always be the outermost allocator.
139 template <typename Allocator>
140 class ScopedAllocator {
141   public:
142     static size_t alignment() { return Allocator::alignment(); }
143 
144     explicit ScopedAllocator(const std::shared_ptr<Allocator>& allocator) : mAllocator(allocator) {}
145 
146     ScopedAllocator() : mAllocator(std::make_shared<Allocator>()) {}
147 
148     template <typename T>
149     auto allocate(T&& request) {
150         std::lock_guard l{*mLock};
151         const auto allocation = mAllocator->allocate(std::forward<T>(request));
152         if (!allocation) {
153             return sp<ScopedAllocation>{};
154         }
155         return sp<ScopedAllocation>::make(allocation,
156                 [allocator = mAllocator, lock = mLock] (const AllocationType& allocation) {
157                     std::lock_guard l{*lock};
158                     allocator->deallocate(allocation);
159                 });
160     }
161 
162     // Deallocate and deallocate_all are implicitly unsafe due to double
163     // deallocates upon ScopedAllocation destruction. We can protect against this
164     // efficiently with a gencount (for deallocate_all) or inefficiently (for
165     // deallocate) but we choose not to
166     //
167     // Owns is only safe to pseudo-impl due to static cast reqs
168     template <typename Enable = bool>
169     auto owns(const sp<ScopedAllocation>& allocation) const
170             -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
171         std::lock_guard l{*mLock};
172         return mAllocator->owns(allocation->mAllocation);
173     }
174 
175     template <typename Enable = std::string>
176     auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
177         std::lock_guard l{*mLock};
178         return mAllocator->dump();
179     }
180 
181   private:
182     // We store a shared pointer in order to ensure that the allocator outlives
183     // allocations (which call back to become dereferenced).
184     const std::shared_ptr<Allocator> mAllocator;
185     const std::shared_ptr<std::mutex> mLock = std::make_shared<std::mutex>();
186 };
187 
188 // A simple policy for PolicyAllocator which enforces a pool size and an allocation
189 // size range.
190 template <size_t PoolSize, size_t MinAllocSize = 0,
191           size_t MaxAllocSize = std::numeric_limits<size_t>::max()>
192 class SizePolicy {
193     static_assert(PoolSize > 0);
194 
195   public:
196     template <typename T>
197     bool isValid(T&& request) const {
198         static_assert(std::is_base_of_v<BasicAllocRequest, std::decay_t<T>>);
199         return !(request.size > kMaxAllocSize || request.size < kMinAllocSize ||
200                  mPoolSize + request.size > kPoolSize);
201     }
202 
203     void allocated(const AllocationType& alloc) { mPoolSize += alloc->size(); }
204 
205     void deallocated(const AllocationType& alloc) { mPoolSize -= alloc->size(); }
206 
207     void deallocated_all() { mPoolSize = 0; }
208 
209     static constexpr size_t kPoolSize = PoolSize;
210     static constexpr size_t kMinAllocSize = MinAllocSize;
211     static constexpr size_t kMaxAllocSize = MaxAllocSize;
212 
213   private:
214     size_t mPoolSize = 0;
215 };
216 
217 // An allocator which accepts or rejects allocation requests by a parametrized
218 // policy (which can carry state).
219 template <typename Allocator, typename Policy>
220 class PolicyAllocator {
221   public:
222     static size_t alignment() { return Allocator::alignment(); }
223 
224     PolicyAllocator(Allocator allocator, Policy policy)
225         : mAllocator(allocator), mPolicy(std::move(policy)) {}
226 
227     // Default initialize the allocator and policy
228     PolicyAllocator() = default;
229 
230     template <typename T>
231     AllocationType allocate(T&& request) {
232         static_assert(std::is_base_of_v<android::mediautils::BasicAllocRequest, std::decay_t<T>>);
233         request.size = shared_allocator_impl::roundup(request.size, alignment());
234         if (!mPolicy.isValid(request)) {
235             return {};
236         }
237         AllocationType val = mAllocator.allocate(std::forward<T>(request));
238         if (val == nullptr) return val;
239         mPolicy.allocated(val);
240         return val;
241     }
242 
243     void deallocate(const AllocationType& allocation) {
244         if (!allocation) return;
245         mPolicy.deallocated(allocation);
246         mAllocator.deallocate(allocation);
247     }
248 
249     template <typename Enable = void>
250     auto deallocate_all()
251             -> std::enable_if_t<shared_allocator_impl::has_deallocate_all<Allocator>, Enable> {
252         mAllocator.deallocate_all();
253         mPolicy.deallocated_all();
254     }
255 
256     template <typename Enable = bool>
257     auto owns(const AllocationType& allocation) const
258             -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
259         return mAllocator.owns(allocation);
260     }
261 
262     template <typename Enable = std::string>
263     auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
264         return mAllocator.dump();
265     }
266 
267   private:
268     [[no_unique_address]] Allocator mAllocator;
269     [[no_unique_address]] Policy mPolicy;
270 };
271 
272 // An allocator which keeps track of outstanding allocations for logging and
273 // querying ownership.
274 template <class Allocator>
275 class SnoopingAllocator {
276   public:
277     struct AllocationData {
278         std::string name;
279         size_t allocation_number;
280     };
281     static size_t alignment() { return Allocator::alignment(); }
282 
283     SnoopingAllocator(Allocator allocator, std::string_view name)
284         : mName(name), mAllocator(std::move(allocator)) {}
285 
286     explicit SnoopingAllocator(std::string_view name) : mName(name), mAllocator(Allocator{}) {}
287 
288     explicit SnoopingAllocator(Allocator allocator) : mAllocator(std::move(allocator)) {}
289 
290     // Default construct allocator and name
291     SnoopingAllocator() = default;
292 
293     template <typename T>
294     AllocationType allocate(T&& request) {
295         static_assert(std::is_base_of_v<NamedAllocRequest, std::decay_t<T>>);
296         AllocationType allocation = mAllocator.allocate(request);
297         if (allocation)
298             mAllocations.insert({WeakAllocationType{allocation},
299                                  {std::string{request.name}, mAllocationNumber++}});
300         return allocation;
301     }
302 
303     void deallocate(const AllocationType& allocation) {
304         if (!allocation) return;
305         mAllocations.erase(WeakAllocationType{allocation});
306         mAllocator.deallocate(allocation);
307     }
308 
309     void deallocate_all() {
310         if constexpr (shared_allocator_impl::has_deallocate_all<Allocator>) {
311             mAllocator.deallocate_all();
312         } else {
313             for (auto& [mem, value] : mAllocations) {
314                 mAllocator.deallocate(mem);
315             }
316         }
317         mAllocations.clear();
318     }
319 
320     bool owns(const AllocationType& allocation) const {
321         return (mAllocations.count(WeakAllocationType{allocation}) > 0);
322     }
323 
324     std::string dump() const {
325         std::ostringstream dump;
326         dump << mName << " Allocator Dump:\n";
327         dump << std::setw(8) << "HeapID" << std::setw(8) << "Size" << std::setw(8) << "Offset"
328              << std::setw(8) << "Order"
329              << "   Name\n";
330         for (auto& [mem, value] : mAllocations) {
331             // TODO Imem size and offset
332             const AllocationType handle = mem.promote();
333             if (!handle) {
334                 dump << "Invalid memory lifetime!";
335                 continue;
336             }
337             const auto heap = handle->getMemory();
338             dump << std::setw(8) << heap->getHeapID() << std::setw(8) << heap->getSize()
339                  << std::setw(8) << heap->getOffset() << std::setw(8) << value.allocation_number
340                  << "   " << value.name << "\n";
341         }
342         return dump.str();
343     }
344 
345     const std::unordered_map<WeakAllocationType, AllocationData>& getAllocations() {
346         return mAllocations;
347     }
348 
349   private:
350     const std::string mName;
351     [[no_unique_address]] Allocator mAllocator;
352     // We don't take copies of the underlying information in an allocation,
353     // rather, the allocation information is put on the heap and referenced via
354     // a ref-counted solution. So, the address of the allocation information is
355     // appropriate to hash. In order for this block to be freed, the underlying
356     // allocation must be referenced by no one (thus deallocated).
357     std::unordered_map<WeakAllocationType, AllocationData> mAllocations;
358     // For debugging purposes, monotonic
359     size_t mAllocationNumber = 0;
360 };
361 
362 // An allocator which passes a failed allocation request to a backup allocator.
363 template <class PrimaryAllocator, class SecondaryAllocator>
364 class FallbackAllocator {
365   public:
366     static_assert(shared_allocator_impl::has_owns<PrimaryAllocator>);
367 
368     static size_t alignment() { return PrimaryAllocator::alignment(); }
369 
370     FallbackAllocator(const PrimaryAllocator& primary, const SecondaryAllocator& secondary)
371         : mPrimary(primary), mSecondary(secondary) {
372       verify_alignment();
373     }
374 
375     // Default construct primary and secondary allocator
376     FallbackAllocator() {
377       verify_alignment();
378     }
379 
380     template <typename T>
381     AllocationType allocate(T&& request) {
382         AllocationType allocation = mPrimary.allocate(std::forward<T>(request));
383         if (!allocation) allocation = mSecondary.allocate(std::forward<T>(request));
384         return allocation;
385     }
386 
387     void deallocate(const AllocationType& allocation) {
388         if (!allocation) return;
389         if (mPrimary.owns(allocation)) {
390             mPrimary.deallocate(allocation);
391         } else {
392             mSecondary.deallocate(allocation);
393         }
394     }
395 
396     template <typename Enable = void>
397     auto deallocate_all() -> std::enable_if_t<
398             shared_allocator_impl::has_deallocate_all<PrimaryAllocator> &&
399                     shared_allocator_impl::has_deallocate_all<SecondaryAllocator>,
400             Enable> {
401         mPrimary.deallocate_all();
402         mSecondary.deallocate_all();
403     }
404 
405     template <typename Enable = bool>
406     auto owns(const AllocationType& allocation) const
407             -> std::enable_if_t<shared_allocator_impl::has_owns<SecondaryAllocator>, Enable> {
408         return mPrimary.owns(allocation) || mSecondary.owns(allocation);
409     }
410 
411     template <typename Enable = std::string>
412     auto dump() const
413             -> std::enable_if_t<shared_allocator_impl::has_dump<PrimaryAllocator> &&
414                                         shared_allocator_impl::has_dump<SecondaryAllocator>,
415                                 Enable> {
416         return std::string("Primary: \n") + mPrimary.dump() + std::string("Secondary: \n") +
417                mSecondary.dump();
418     }
419 
420   private:
421     void verify_alignment() {
422       LOG_ALWAYS_FATAL_IF(PrimaryAllocator::alignment() != SecondaryAllocator::alignment(),
423                           "PrimaryAllocator::alignment() != SecondaryAllocator::alignment()");
424     }
425     [[no_unique_address]] PrimaryAllocator mPrimary;
426     [[no_unique_address]] SecondaryAllocator mSecondary;
427 };
428 
429 // Wrap an allocator with a lock if backs multiple allocators through indirection
430 template <typename Allocator>
431 class LockedAllocator {
432   public:
433     static size_t alignment() { return Allocator::alignment(); }
434 
435     explicit LockedAllocator(Allocator allocator) : mAllocator(allocator) {}
436 
437     LockedAllocator() = default;
438 
439     template <typename T>
440     AllocationType allocate(T&& request) {
441         static_assert(std::is_base_of_v<android::mediautils::BasicAllocRequest, std::decay_t<T>>);
442         std::lock_guard l_{mMutex};
443         return mAllocator.allocate(std::forward<T>(request));
444     }
445 
446     void deallocate(const AllocationType& allocation) {
447         std::lock_guard l_{mMutex};
448         mAllocator.deallocate(allocation);
449     }
450 
451     template <typename Enable = void>
452     auto deallocate_all()
453             -> std::enable_if_t<shared_allocator_impl::has_deallocate_all<Allocator>, Enable> {
454         std::lock_guard l_{mMutex};
455         mAllocator.deallocate_all();
456     }
457 
458     template <typename Enable = bool>
459     auto owns(const AllocationType& allocation) const
460             -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
461         std::lock_guard l_{mMutex};
462         return mAllocator.owns(allocation);
463     }
464 
465     template <typename Enable = std::string>
466     auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
467         std::lock_guard l_{mMutex};
468         return mAllocator.dump();
469     }
470 
471   private:
472     std::mutex mMutex;
473     [[no_unique_address]] Allocator mAllocator GUARDED_BY(mMutex);
474 };
475 
476 // An allocator which is backed by a shared_ptr to an allocator, so multiple
477 // allocators can share the same backing allocator (and thus the same state).
478 // When the same backing allocator is used by multiple higher level allocators,
479 // locking at the sharing level is necessary.
480 template <typename Allocator>
481 class IndirectAllocator {
482   public:
483     static size_t alignment() { return Allocator::alignment(); }
484 
485     explicit IndirectAllocator(const std::shared_ptr<Allocator>& allocator)
486         : mAllocator(allocator) {}
487 
488     template <typename T>
489     AllocationType allocate(T&& request) {
490         return mAllocator->allocate(std::forward<T>(request));
491     }
492 
493     void deallocate(const AllocationType& allocation) {
494         if (!allocation) return;
495         mAllocator->deallocate(allocation);
496     }
497 
498     // We can't implement deallocate_all/dump/owns, since we may not be the only allocator with
499     // access to the underlying allocator (making it not well-defined). If these
500     // methods are necesesary, we need to wrap with a snooping allocator.
501   private:
502     const std::shared_ptr<Allocator> mAllocator;
503 };
504 
505 // Stateless. This allocator allocates full page-aligned MemoryHeapBases (backed by
506 // a shared memory mapped anonymous file) as allocations.
507 class MemoryHeapBaseAllocator {
508   public:
509     static size_t alignment() { return kPageSize; }
510     static constexpr unsigned FLAGS = 0;  // default flags
511 
512     template <typename T>
513     AllocationType allocate(T&& request) {
514         static_assert(std::is_base_of_v<BasicAllocRequest, std::decay_t<T>>);
515         auto heap =
516                 sp<MemoryHeapBase>::make(shared_allocator_impl::roundup(request.size, alignment()));
517         if (!shared_allocator_impl::isHeapValid(heap)) {
518             return {};
519         }
520         return sp<MemoryBase>::make(heap, 0, heap->getSize());
521     }
522 
523     // Passing a block not allocated by a HeapAllocator is undefined.
524     void deallocate(const AllocationType& allocation) {
525         if (!allocation) return;
526         const auto heap = allocation->getMemory();
527         if (!heap) return;
528         // This causes future mapped accesses (even across process boundaries)
529         // to receive SIGBUS.
530         ftruncate(heap->getHeapID(), 0);
531         // This static cast is safe, since as long as the block was originally
532         // allocated by us, the underlying IMemoryHeap was a MemoryHeapBase
533         static_cast<MemoryHeapBase&>(*heap).dispose();
534     }
535   private:
536     static inline const size_t kPageSize = getpagesize();
537 };
538 }  // namespace android::mediautils
539