1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! Heap implementation.
16
17 use alloc::alloc::alloc;
18 use alloc::alloc::Layout;
19 use alloc::boxed::Box;
20
21 use core::alloc::GlobalAlloc as _;
22 use core::ffi::c_void;
23 use core::mem;
24 use core::num::NonZeroUsize;
25 use core::ops::Range;
26 use core::ptr;
27 use core::ptr::NonNull;
28
29 use buddy_system_allocator::LockedHeap;
30 use spin::{
31 mutex::{SpinMutex, SpinMutexGuard},
32 Once,
33 };
34
35 /// Configures the size of the global allocator.
36 #[macro_export]
37 macro_rules! configure_heap {
38 ($len:expr) => {
39 static __HEAP: $crate::heap::HeapArray<{ $len }> = $crate::heap::HeapArray::new();
40 #[export_name = "get_heap"]
41 fn __get_heap() -> &'static mut [u8] {
42 __HEAP.get()
43 }
44 };
45 }
46
47 /// An array to be used as a heap.
48 ///
49 /// This should be stored in a static variable to have the appropriate lifetime.
50 pub struct HeapArray<const SIZE: usize> {
51 array: SpinMutex<[u8; SIZE]>,
52 }
53
54 impl<const SIZE: usize> HeapArray<SIZE> {
55 /// Creates a new empty heap array.
56 #[allow(clippy::new_without_default)]
new() -> Self57 pub const fn new() -> Self {
58 Self { array: SpinMutex::new([0; SIZE]) }
59 }
60
61 /// Gets the heap as a slice.
62 ///
63 /// Panics if called more than once.
get(&self) -> &mut [u8]64 pub fn get(&self) -> &mut [u8] {
65 SpinMutexGuard::leak(self.array.try_lock().expect("Page heap was already taken"))
66 .as_mut_slice()
67 }
68 }
69
70 extern "Rust" {
71 /// Gets slice used by the global allocator, configured using configure_heap!().
72 ///
73 /// Panics if called more than once.
get_heap() -> &'static mut [u8]74 fn get_heap() -> &'static mut [u8];
75 }
76
77 #[global_allocator]
78 static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
79
80 /// The range of addresses used for the heap.
81 static HEAP_RANGE: Once<Range<usize>> = Once::new();
82
83 /// Initialize the global allocator.
84 ///
85 /// Panics if called more than once.
init()86 pub(crate) fn init() {
87 // SAFETY: This is in fact a safe Rust function.
88 let heap = unsafe { get_heap() };
89
90 HEAP_RANGE.call_once(|| {
91 let range = heap.as_ptr_range();
92 range.start as usize..range.end as usize
93 });
94
95 let start = heap.as_mut_ptr() as usize;
96 let size = heap.len();
97
98 let mut heap = HEAP_ALLOCATOR.lock();
99 // SAFETY: We are supplying a valid memory range, and we only do this once.
100 unsafe { heap.init(start, size) };
101 }
102
103 /// Allocate an aligned but uninitialized slice of heap.
aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>>104 pub fn aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>> {
105 let size = NonZeroUsize::new(size)?.get();
106 let layout = Layout::from_size_align(size, align).ok()?;
107 // SAFETY: We verify that `size` and the returned `ptr` are non-null.
108 let ptr = unsafe { alloc(layout) };
109 let ptr = NonNull::new(ptr)?.as_ptr();
110 let slice_ptr = ptr::slice_from_raw_parts_mut(ptr, size);
111
112 // SAFETY: The memory was allocated using the proper layout by our global_allocator.
113 Some(unsafe { Box::from_raw(slice_ptr) })
114 }
115
116 #[no_mangle]
malloc(size: usize) -> *mut c_void117 unsafe extern "C" fn malloc(size: usize) -> *mut c_void {
118 allocate(size, false).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
119 }
120
121 #[no_mangle]
calloc(nmemb: usize, size: usize) -> *mut c_void122 unsafe extern "C" fn calloc(nmemb: usize, size: usize) -> *mut c_void {
123 let Some(size) = nmemb.checked_mul(size) else { return ptr::null_mut() };
124 allocate(size, true).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
125 }
126
127 #[no_mangle]
__memset_chk( dest: *mut c_void, val: u8, len: usize, destlen: usize, ) -> *mut c_void128 unsafe extern "C" fn __memset_chk(
129 dest: *mut c_void,
130 val: u8,
131 len: usize,
132 destlen: usize,
133 ) -> *mut c_void {
134 assert!(len <= destlen, "memset buffer overflow detected");
135 // SAFETY: `dest` is valid for writes of `len` bytes.
136 unsafe {
137 ptr::write_bytes(dest, val, len);
138 }
139 dest
140 }
141
142 #[no_mangle]
143 /// SAFETY: ptr must be null or point to a currently-allocated block returned by allocate (either
144 /// directly or via malloc or calloc). Note that this function is called directly from C, so we have
145 /// to trust that the C code is doing the right thing; there are checks below which will catch some
146 /// errors.
free(ptr: *mut c_void)147 unsafe extern "C" fn free(ptr: *mut c_void) {
148 let Some(ptr) = NonNull::new(ptr) else { return };
149 let heap_range = HEAP_RANGE.get().expect("free called before heap was initialised");
150 assert!(
151 heap_range.contains(&(ptr.as_ptr() as usize)),
152 "free() called on a pointer that is not part of the HEAP: {ptr:?}"
153 );
154 // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
155 // usize.
156 let (ptr, size) = unsafe {
157 let ptr = ptr.cast::<usize>().as_ptr().offset(-1);
158 (ptr, *ptr)
159 };
160 let size = NonZeroUsize::new(size).unwrap();
161 let layout = malloc_layout(size).unwrap();
162 // SAFETY: If our precondition is satisfied, then this is a valid currently-allocated block.
163 unsafe { HEAP_ALLOCATOR.dealloc(ptr as *mut u8, layout) }
164 }
165
166 /// Allocate a block of memory suitable to return from `malloc()` etc. Returns a valid pointer
167 /// to a suitable aligned region of size bytes, optionally zeroed (and otherwise uninitialized), or
168 /// None if size is 0 or allocation fails. The block can be freed by passing the returned pointer to
169 /// `free()`.
allocate(size: usize, zeroed: bool) -> Option<NonNull<usize>>170 fn allocate(size: usize, zeroed: bool) -> Option<NonNull<usize>> {
171 let size = NonZeroUsize::new(size)?.checked_add(mem::size_of::<usize>())?;
172 let layout = malloc_layout(size)?;
173 // SAFETY: layout is known to have non-zero size.
174 let ptr = unsafe {
175 if zeroed {
176 HEAP_ALLOCATOR.alloc_zeroed(layout)
177 } else {
178 HEAP_ALLOCATOR.alloc(layout)
179 }
180 };
181 let ptr = NonNull::new(ptr)?.cast::<usize>().as_ptr();
182 // SAFETY: ptr points to a newly allocated block of memory which is properly aligned
183 // for a usize and is big enough to hold a usize as well as the requested number of
184 // bytes.
185 unsafe {
186 *ptr = size.get();
187 NonNull::new(ptr.offset(1))
188 }
189 }
190
malloc_layout(size: NonZeroUsize) -> Option<Layout>191 fn malloc_layout(size: NonZeroUsize) -> Option<Layout> {
192 // We want at least 8 byte alignment, and we need to be able to store a usize.
193 const ALIGN: usize = const_max_size(mem::size_of::<usize>(), mem::size_of::<u64>());
194 Layout::from_size_align(size.get(), ALIGN).ok()
195 }
196
const_max_size(a: usize, b: usize) -> usize197 const fn const_max_size(a: usize, b: usize) -> usize {
198 if a > b {
199 a
200 } else {
201 b
202 }
203 }
204