1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Shared memory management.
16 
17 use super::error::MemoryTrackerError;
18 use super::util::virt_to_phys;
19 use crate::layout;
20 use crate::util::unchecked_align_down;
21 use aarch64_paging::paging::{MemoryRegion as VaRange, VirtualAddress, PAGE_SIZE};
22 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
23 use alloc::collections::BTreeSet;
24 use alloc::vec::Vec;
25 use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
26 use core::alloc::Layout;
27 use core::cmp::max;
28 use core::ops::Range;
29 use core::ptr::NonNull;
30 use core::result;
31 use hypervisor_backends::{self, get_mem_sharer, get_mmio_guard};
32 use log::trace;
33 use once_cell::race::OnceBox;
34 use spin::mutex::SpinMutex;
35 
36 pub(crate) static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
37 pub(crate) static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
38 
39 /// Memory range.
40 pub type MemoryRange = Range<usize>;
41 
42 type Result<T> = result::Result<T, MemoryTrackerError>;
43 
44 pub(crate) struct MmioSharer {
45     granule: usize,
46     frames: BTreeSet<usize>,
47 }
48 
49 impl MmioSharer {
new() -> Result<Self>50     pub fn new() -> Result<Self> {
51         let granule = Self::get_granule()?;
52         let frames = BTreeSet::new();
53 
54         // Allows safely calling util::unchecked_align_down().
55         assert!(granule.is_power_of_two());
56 
57         Ok(Self { granule, frames })
58     }
59 
get_granule() -> Result<usize>60     pub fn get_granule() -> Result<usize> {
61         let Some(mmio_guard) = get_mmio_guard() else {
62             return Ok(PAGE_SIZE);
63         };
64         match mmio_guard.granule()? {
65             granule if granule % PAGE_SIZE == 0 => Ok(granule), // For good measure.
66             granule => Err(MemoryTrackerError::UnsupportedMmioGuardGranule(granule)),
67         }
68     }
69 
70     /// Share the MMIO region aligned to the granule size containing addr (not validated as MMIO).
share(&mut self, addr: VirtualAddress) -> Result<VaRange>71     pub fn share(&mut self, addr: VirtualAddress) -> Result<VaRange> {
72         // This can't use virt_to_phys() since 0x0 is a valid MMIO address and we are ID-mapped.
73         let phys = addr.0;
74         let base = unchecked_align_down(phys, self.granule);
75 
76         // TODO(ptosi): Share the UART using this method and remove the hardcoded check.
77         if self.frames.contains(&base) || base == layout::UART_PAGE_ADDR {
78             return Err(MemoryTrackerError::DuplicateMmioShare(base));
79         }
80 
81         if let Some(mmio_guard) = get_mmio_guard() {
82             mmio_guard.map(base)?;
83         }
84 
85         let inserted = self.frames.insert(base);
86         assert!(inserted);
87 
88         let base_va = VirtualAddress(base);
89         Ok((base_va..base_va + self.granule).into())
90     }
91 
unshare_all(&mut self)92     pub fn unshare_all(&mut self) {
93         let Some(mmio_guard) = get_mmio_guard() else {
94             return self.frames.clear();
95         };
96 
97         while let Some(base) = self.frames.pop_first() {
98             mmio_guard.unmap(base).unwrap();
99         }
100     }
101 }
102 
103 impl Drop for MmioSharer {
drop(&mut self)104     fn drop(&mut self) {
105         self.unshare_all();
106     }
107 }
108 
109 /// Allocates a memory range of at least the given size and alignment that is shared with the host.
110 /// Returns a pointer to the buffer.
alloc_shared(layout: Layout) -> hypervisor_backends::Result<NonNull<u8>>111 pub(crate) fn alloc_shared(layout: Layout) -> hypervisor_backends::Result<NonNull<u8>> {
112     assert_ne!(layout.size(), 0);
113     let Some(buffer) = try_shared_alloc(layout) else {
114         handle_alloc_error(layout);
115     };
116 
117     trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
118     Ok(buffer)
119 }
120 
try_shared_alloc(layout: Layout) -> Option<NonNull<u8>>121 fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
122     let mut shared_pool = SHARED_POOL.get().unwrap().lock();
123 
124     if let Some(buffer) = shared_pool.alloc_aligned(layout) {
125         Some(NonNull::new(buffer as _).unwrap())
126     } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
127         // Adjusts the layout size to the max of the next power of two and the alignment,
128         // as this is the actual size of the memory allocated in `alloc_aligned()`.
129         let size = max(layout.size().next_power_of_two(), layout.align());
130         let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
131         shared_memory.refill(&mut shared_pool, refill_layout);
132         shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
133     } else {
134         None
135     }
136 }
137 
138 /// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
139 ///
140 /// The layout passed in must be the same layout passed to the original `alloc_shared` call.
141 ///
142 /// # Safety
143 ///
144 /// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
145 /// deallocated.
dealloc_shared( vaddr: NonNull<u8>, layout: Layout, ) -> hypervisor_backends::Result<()>146 pub(crate) unsafe fn dealloc_shared(
147     vaddr: NonNull<u8>,
148     layout: Layout,
149 ) -> hypervisor_backends::Result<()> {
150     SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
151 
152     trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
153     Ok(())
154 }
155 
156 /// Allocates memory on the heap and shares it with the host.
157 ///
158 /// Unshares all pages when dropped.
159 pub(crate) struct MemorySharer {
160     granule: usize,
161     frames: Vec<(usize, Layout)>,
162 }
163 
164 impl MemorySharer {
165     /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
166     /// `granule` must be a power of 2.
new(granule: usize, capacity: usize) -> Self167     pub fn new(granule: usize, capacity: usize) -> Self {
168         assert!(granule.is_power_of_two());
169         Self { granule, frames: Vec::with_capacity(capacity) }
170     }
171 
172     /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout)173     pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
174         let layout = hint.align_to(self.granule).unwrap().pad_to_align();
175         assert_ne!(layout.size(), 0);
176         // SAFETY: layout has non-zero size.
177         let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
178             handle_alloc_error(layout);
179         };
180 
181         let base = shared.as_ptr() as usize;
182         let end = base.checked_add(layout.size()).unwrap();
183 
184         if let Some(mem_sharer) = get_mem_sharer() {
185             trace!("Sharing memory region {:#x?}", base..end);
186             for vaddr in (base..end).step_by(self.granule) {
187                 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
188                 mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
189             }
190         }
191 
192         self.frames.push((base, layout));
193         pool.add_frame(base, end);
194     }
195 }
196 
197 impl Drop for MemorySharer {
drop(&mut self)198     fn drop(&mut self) {
199         while let Some((base, layout)) = self.frames.pop() {
200             if let Some(mem_sharer) = get_mem_sharer() {
201                 let end = base.checked_add(layout.size()).unwrap();
202                 trace!("Unsharing memory region {:#x?}", base..end);
203                 for vaddr in (base..end).step_by(self.granule) {
204                     let vaddr = NonNull::new(vaddr as *mut _).unwrap();
205                     mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
206                 }
207             }
208 
209             // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
210             unsafe { dealloc(base as *mut _, layout) };
211         }
212     }
213 }
214