1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use libc::c_void;
6 use win_util::create_file_mapping;
7 use win_util::duplicate_handle;
8 use winapi::um::winnt::PAGE_READWRITE;
9
10 pub use super::mmap_platform::MemoryMappingArena;
11 use crate::AsRawDescriptor;
12 use crate::Descriptor;
13 use crate::FromRawDescriptor;
14 use crate::MappedRegion;
15 use crate::MemoryMapping as CrateMemoryMapping;
16 use crate::MemoryMappingBuilder;
17 use crate::MmapError as Error;
18 use crate::MmapResult as Result;
19 use crate::Protection;
20 use crate::RawDescriptor;
21 use crate::SafeDescriptor;
22
23 /// Validates that `offset`..`offset+range_size` lies within the bounds of a memory mapping of
24 /// `mmap_size` bytes. Also checks for any overflow.
validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()>25 fn validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()> {
26 // Ensure offset + size doesn't overflow
27 let end_offset = offset
28 .checked_add(range_size)
29 .ok_or(Error::InvalidAddress)?;
30 // Ensure offset + size are within the mapping bounds
31 if end_offset <= mmap_size {
32 Ok(())
33 } else {
34 Err(Error::InvalidAddress)
35 }
36 }
37
38 impl dyn MappedRegion {
39 /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
40 /// the region. `offset`..`offset+size` must be contained within the `MappedRegion`.
msync(&self, offset: usize, size: usize) -> Result<()>41 pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
42 validate_includes_range(self.size(), offset, size)?;
43
44 // SAFETY:
45 // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
46 // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
47 // this `MappedRegion`.
48 let ret = unsafe {
49 use winapi::um::memoryapi::FlushViewOfFile;
50 if FlushViewOfFile((self.as_ptr() as usize + offset) as *mut libc::c_void, size) == 0 {
51 -1
52 } else {
53 0
54 }
55 };
56 if ret != -1 {
57 Ok(())
58 } else {
59 Err(Error::SystemCallFailed(super::Error::last()))
60 }
61 }
62 }
63
64 /// Wraps an anonymous shared memory mapping in the current process. Provides
65 /// RAII semantics including munmap when no longer needed.
66 #[derive(Debug)]
67 pub struct MemoryMapping {
68 pub(crate) addr: *mut c_void,
69 pub(crate) size: usize,
70 }
71
72 // SAFETY:
73 // Send and Sync aren't automatically inherited for the raw address pointer.
74 // Accessing that pointer is only done through the stateless interface which
75 // allows the object to be shared by multiple threads without a decrease in
76 // safety.
77 unsafe impl Send for MemoryMapping {}
78 // SAFETY: See comments for impl Send
79 unsafe impl Sync for MemoryMapping {}
80
81 impl MemoryMapping {
82 /// Creates an anonymous shared, read/write mapping of `size` bytes.
83 ///
84 /// # Arguments
85 /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMapping>86 pub fn new(size: usize) -> Result<MemoryMapping> {
87 MemoryMapping::new_protection(size, Protection::read_write())
88 }
89
90 /// Maps the first `size` bytes of the given `descriptor` as read/write.
91 ///
92 /// # Arguments
93 /// * `file_handle` - File handle to map from.
94 /// * `size` - Size of memory region in bytes.
from_descriptor( file_handle: &dyn AsRawDescriptor, size: usize, ) -> Result<MemoryMapping>95 pub fn from_descriptor(
96 file_handle: &dyn AsRawDescriptor,
97 size: usize,
98 ) -> Result<MemoryMapping> {
99 MemoryMapping::from_descriptor_offset(file_handle, size, 0)
100 }
101
from_raw_descriptor(file_handle: RawDescriptor, size: usize) -> Result<MemoryMapping>102 pub fn from_raw_descriptor(file_handle: RawDescriptor, size: usize) -> Result<MemoryMapping> {
103 MemoryMapping::from_descriptor_offset(&Descriptor(file_handle), size, 0)
104 }
105
from_descriptor_offset( file_handle: &dyn AsRawDescriptor, size: usize, offset: u64, ) -> Result<MemoryMapping>106 pub fn from_descriptor_offset(
107 file_handle: &dyn AsRawDescriptor,
108 size: usize,
109 offset: u64,
110 ) -> Result<MemoryMapping> {
111 MemoryMapping::from_descriptor_offset_protection(
112 file_handle,
113 size,
114 offset,
115 Protection::read_write(),
116 )
117 }
118
119 // Check that offset+count is valid and return the sum.
range_end(&self, offset: usize, count: usize) -> Result<usize>120 pub(crate) fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
121 let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
122 if mem_end > self.size() {
123 return Err(Error::InvalidAddress);
124 }
125 Ok(mem_end)
126 }
127 }
128
129 // SAFETY:
130 // Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
131 // be unmapped until it's Dropped.
132 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8133 fn as_ptr(&self) -> *mut u8 {
134 self.addr as *mut u8
135 }
136
size(&self) -> usize137 fn size(&self) -> usize {
138 self.size
139 }
140 }
141
142 impl CrateMemoryMapping {
from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping>143 pub fn from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping> {
144 MemoryMapping::from_raw_ptr(addr, size).map(|mapping| CrateMemoryMapping {
145 mapping,
146 _file_descriptor: None,
147 })
148 }
149 }
150
151 pub trait MemoryMappingBuilderWindows<'a> {
152 /// Build the memory mapping given the specified descriptor to mapped memory
153 ///
154 /// Default: Create a new memory mapping.
155 ///
156 /// descriptor MUST be a mapping handle. Files MUST use `MemoryMappingBuilder::from_file`
157 /// instead.
158 #[allow(clippy::wrong_self_convention)]
from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder159 fn from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder;
160 }
161
162 impl<'a> MemoryMappingBuilderWindows<'a> for MemoryMappingBuilder<'a> {
163 /// See MemoryMappingBuilderWindows.
from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder164 fn from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder {
165 self.descriptor = Some(descriptor);
166 self
167 }
168 }
169
170 impl<'a> MemoryMappingBuilder<'a> {
171 /// Build a MemoryMapping from the provided options.
build(self) -> Result<CrateMemoryMapping>172 pub fn build(self) -> Result<CrateMemoryMapping> {
173 match self.descriptor {
174 Some(descriptor) => {
175 let mapping_descriptor = if self.is_file_descriptor {
176 // On Windows, a file cannot be mmapped directly. We have to create a mapping
177 // handle for it first. That handle is then provided to Self::wrap, which
178 // performs the actual mmap (creating a mapped view).
179 //
180 // SAFETY:
181 // Safe because self.descriptor is guaranteed to be a valid handle.
182 let mapping_handle = unsafe {
183 create_file_mapping(
184 Some(descriptor.as_raw_descriptor()),
185 self.size as u64,
186 PAGE_READWRITE,
187 None,
188 )
189 }
190 .map_err(Error::StdSyscallFailed)?;
191
192 // SAFETY:
193 // The above comment block is why the SafeDescriptor wrap is safe.
194 Some(unsafe { SafeDescriptor::from_raw_descriptor(mapping_handle) })
195 } else {
196 None
197 };
198
199 MemoryMappingBuilder::wrap(
200 MemoryMapping::from_descriptor_offset_protection(
201 match mapping_descriptor.as_ref() {
202 Some(descriptor) => descriptor as &dyn AsRawDescriptor,
203 None => descriptor,
204 },
205 self.size,
206 self.offset.unwrap_or(0),
207 self.protection.unwrap_or_else(Protection::read_write),
208 )?,
209 if self.is_file_descriptor {
210 self.descriptor
211 } else {
212 None
213 },
214 )
215 }
216 None => MemoryMappingBuilder::wrap(
217 MemoryMapping::new_protection(
218 self.size,
219 self.protection.unwrap_or_else(Protection::read_write),
220 )?,
221 None,
222 ),
223 }
224 }
wrap( mapping: MemoryMapping, file_descriptor: Option<&'a dyn AsRawDescriptor>, ) -> Result<CrateMemoryMapping>225 pub fn wrap(
226 mapping: MemoryMapping,
227 file_descriptor: Option<&'a dyn AsRawDescriptor>,
228 ) -> Result<CrateMemoryMapping> {
229 let file_descriptor = match file_descriptor {
230 // SAFETY:
231 // Safe because `duplicate_handle` will return a handle or at least error out.
232 Some(descriptor) => unsafe {
233 Some(SafeDescriptor::from_raw_descriptor(
234 duplicate_handle(descriptor.as_raw_descriptor())
235 .map_err(Error::StdSyscallFailed)?,
236 ))
237 },
238 None => None,
239 };
240
241 Ok(CrateMemoryMapping {
242 mapping,
243 _file_descriptor: file_descriptor,
244 })
245 }
246 }
247
248 #[cfg(test)]
249 mod tests {
250 use super::*;
251 use crate::SharedMemory;
252 use crate::VolatileMemory;
253 use crate::VolatileMemoryError;
254
255 // get_slice() and other methods are only available on crate::MemoryMapping.
to_crate_mmap(mapping: MemoryMapping) -> crate::MemoryMapping256 fn to_crate_mmap(mapping: MemoryMapping) -> crate::MemoryMapping {
257 crate::MemoryMapping {
258 mapping,
259 _file_descriptor: None,
260 }
261 }
262
263 #[test]
basic_map()264 fn basic_map() {
265 let shm = SharedMemory::new("test", 1028).unwrap();
266 let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 1024).unwrap());
267 assert_eq!(1024, m.size());
268 }
269
270 #[test]
test_write_past_end()271 fn test_write_past_end() {
272 let shm = SharedMemory::new("test", 1028).unwrap();
273 let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
274 let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
275 assert!(res.is_ok());
276 assert_eq!(res.unwrap(), 5);
277 }
278
279 #[test]
slice_size()280 fn slice_size() {
281 let shm = SharedMemory::new("test", 1028).unwrap();
282 let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
283 let s = m.get_slice(2, 3).unwrap();
284 assert_eq!(s.size(), 3);
285 }
286
287 #[test]
slice_addr()288 fn slice_addr() {
289 let shm = SharedMemory::new("test", 1028).unwrap();
290 let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
291 let s = m.get_slice(2, 3).unwrap();
292 // SAFETY: trivially safe
293 assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
294 }
295
296 #[test]
slice_overflow_error()297 fn slice_overflow_error() {
298 let shm = SharedMemory::new("test", 1028).unwrap();
299 let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
300 let res = m.get_slice(usize::MAX, 3).unwrap_err();
301 assert_eq!(
302 res,
303 VolatileMemoryError::Overflow {
304 base: usize::MAX,
305 offset: 3,
306 }
307 );
308 }
309 #[test]
slice_oob_error()310 fn slice_oob_error() {
311 let shm = SharedMemory::new("test", 1028).unwrap();
312 let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
313 let res = m.get_slice(3, 3).unwrap_err();
314 assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
315 }
316 }
317