xref: /aosp_15_r20/external/crosvm/devices/src/virtio/iommu/ipc_memory_mapper.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Provide utility to communicate with an iommu in another process
6 
7 use std::sync::Arc;
8 
9 use anyhow::anyhow;
10 use anyhow::bail;
11 use anyhow::Context;
12 use anyhow::Result;
13 use base::error;
14 use base::AsRawDescriptor;
15 use base::AsRawDescriptors;
16 use base::Event;
17 use base::Protection;
18 use base::RawDescriptor;
19 use base::Tube;
20 use serde::Deserialize;
21 use serde::Serialize;
22 use smallvec::SmallVec;
23 use sync::Mutex;
24 use vm_memory::GuestAddress;
25 use vm_memory::GuestMemory;
26 use zerocopy::AsBytes;
27 use zerocopy::FromBytes;
28 
29 use crate::virtio::memory_mapper::MemRegion;
30 
31 #[derive(Serialize, Deserialize)]
32 pub(super) enum IommuRequest {
33     Export {
34         endpoint_id: u32,
35         iova: u64,
36         size: u64,
37     },
38     Release {
39         endpoint_id: u32,
40         iova: u64,
41         size: u64,
42     },
43     StartExportSession {
44         endpoint_id: u32,
45     },
46 }
47 
48 #[derive(Serialize, Deserialize)]
49 pub(super) enum IommuResponse {
50     Export(Vec<MemRegion>),
51     Release,
52     StartExportSession(Event),
53     Err(String),
54 }
55 
56 impl IommuRequest {
get_endpoint_id(&self) -> u3257     pub(super) fn get_endpoint_id(&self) -> u32 {
58         match self {
59             Self::Export { endpoint_id, .. } => *endpoint_id,
60             Self::Release { endpoint_id, .. } => *endpoint_id,
61             Self::StartExportSession { endpoint_id } => *endpoint_id,
62         }
63     }
64 }
65 
66 /// Sends an addr translation request to another process using `Tube`, and
67 /// gets the translated addr from another `Tube`
68 pub struct IpcMemoryMapper {
69     request_tx: Tube,
70     response_rx: Tube,
71     endpoint_id: u32,
72 }
73 
74 impl std::fmt::Debug for IpcMemoryMapper {
fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result75     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
76         f.debug_struct("IpcMemoryMapper")
77             .field("endpoint_id", &self.endpoint_id)
78             .finish()
79     }
80 }
81 
map_bad_resp(resp: IommuResponse) -> anyhow::Error82 fn map_bad_resp(resp: IommuResponse) -> anyhow::Error {
83     match resp {
84         IommuResponse::Err(e) => anyhow!("remote error {}", e),
85         _ => anyhow!("response type mismatch"),
86     }
87 }
88 
89 impl IpcMemoryMapper {
90     /// Returns a new `IpcMemoryMapper` instance.
91     ///
92     /// # Arguments
93     ///
94     /// * `request_tx` - A tube to send `TranslateRequest` to another process.
95     /// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
96     /// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self97     pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
98         Self {
99             request_tx,
100             response_rx,
101             endpoint_id,
102         }
103     }
104 
do_request(&self, req: IommuRequest) -> Result<IommuResponse>105     fn do_request(&self, req: IommuRequest) -> Result<IommuResponse> {
106         self.request_tx
107             .send(&req)
108             .context("failed to send request")?;
109         self.response_rx
110             .recv::<IommuResponse>()
111             .context("failed to get response")
112     }
113 
114     /// See [crate::virtio::memory_mapper::MemoryMapper::export].
export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>>115     pub fn export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
116         let req = IommuRequest::Export {
117             endpoint_id: self.endpoint_id,
118             iova,
119             size,
120         };
121         match self.do_request(req)? {
122             IommuResponse::Export(vec) => Ok(vec),
123             e => Err(map_bad_resp(e)),
124         }
125     }
126 
127     /// See [crate::virtio::memory_mapper::MemoryMapper::release].
release(&mut self, iova: u64, size: u64) -> Result<()>128     pub fn release(&mut self, iova: u64, size: u64) -> Result<()> {
129         let req = IommuRequest::Release {
130             endpoint_id: self.endpoint_id,
131             iova,
132             size,
133         };
134         match self.do_request(req)? {
135             IommuResponse::Release => Ok(()),
136             e => Err(map_bad_resp(e)),
137         }
138     }
139 
140     /// See [crate::virtio::memory_mapper::MemoryMapper::start_export_session].
start_export_session(&mut self) -> Result<Event>141     pub fn start_export_session(&mut self) -> Result<Event> {
142         let req = IommuRequest::StartExportSession {
143             endpoint_id: self.endpoint_id,
144         };
145         match self.do_request(req)? {
146             IommuResponse::StartExportSession(evt) => Ok(evt),
147             e => Err(map_bad_resp(e)),
148         }
149     }
150 }
151 
152 impl AsRawDescriptors for IpcMemoryMapper {
as_raw_descriptors(&self) -> Vec<RawDescriptor>153     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
154         vec![
155             self.request_tx.as_raw_descriptor(),
156             self.response_rx.as_raw_descriptor(),
157         ]
158     }
159 }
160 
161 pub struct CreateIpcMapperRet {
162     pub mapper: IpcMemoryMapper,
163     pub response_tx: Tube,
164 }
165 
166 /// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu
167 /// to respond to `TranslateRequest`s.
168 ///
169 /// # Arguments
170 ///
171 /// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
172 /// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This should be cloned and
173 ///   shared between different ipc mappers with different `endpoint_id`s.
create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet174 pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
175     let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
176     CreateIpcMapperRet {
177         mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),
178         response_tx,
179     }
180 }
181 
182 #[derive(Debug)]
183 struct ExportedRegionInner {
184     regions: Vec<MemRegion>,
185     iova: u64,
186     size: u64,
187     iommu: Arc<Mutex<IpcMemoryMapper>>,
188 }
189 
190 impl Drop for ExportedRegionInner {
drop(&mut self)191     fn drop(&mut self) {
192         if let Err(e) = self.iommu.lock().release(self.iova, self.size) {
193             error!("Error releasing region {:?}", e);
194         }
195     }
196 }
197 
198 /// A region exported from the virtio-iommu.
199 #[derive(Clone, Debug)]
200 pub struct ExportedRegion {
201     inner: Arc<Mutex<ExportedRegionInner>>,
202 }
203 
204 impl ExportedRegion {
205     /// Creates a new, fully initialized exported region.
new( mem: &GuestMemory, iommu: Arc<Mutex<IpcMemoryMapper>>, iova: u64, size: u64, ) -> Result<Self>206     pub fn new(
207         mem: &GuestMemory,
208         iommu: Arc<Mutex<IpcMemoryMapper>>,
209         iova: u64,
210         size: u64,
211     ) -> Result<Self> {
212         let regions = iommu
213             .lock()
214             .export(iova, size)
215             .context("failed to export")?;
216         for r in &regions {
217             if !mem.is_valid_range(r.gpa, r.len) {
218                 bail!("region not in memory range");
219             }
220         }
221         Ok(Self {
222             inner: Arc::new(Mutex::new(ExportedRegionInner {
223                 regions,
224                 iova,
225                 size,
226                 iommu,
227             })),
228         })
229     }
230 
231     // Helper function for copying to/from [iova, iova+remaining).
do_copy<C>( &self, iova: u64, mut remaining: usize, prot: Protection, mut copy_fn: C, ) -> Result<()> where C: FnMut(usize , GuestAddress, usize ) -> Result<usize>,232     fn do_copy<C>(
233         &self,
234         iova: u64,
235         mut remaining: usize,
236         prot: Protection,
237         mut copy_fn: C,
238     ) -> Result<()>
239     where
240         C: FnMut(usize /* offset */, GuestAddress, usize /* len */) -> Result<usize>,
241     {
242         let inner = self.inner.lock();
243         let mut region_offset = iova.checked_sub(inner.iova).with_context(|| {
244             format!(
245                 "out of bounds: src_iova={} region_iova={}",
246                 iova, inner.iova
247             )
248         })?;
249         let mut offset = 0;
250         for r in &inner.regions {
251             if region_offset >= r.len {
252                 region_offset -= r.len;
253                 continue;
254             }
255 
256             if !r.prot.allows(&prot) {
257                 bail!("gpa is not accessible");
258             }
259 
260             let len = (r.len as usize).min(remaining);
261             let copy_len = copy_fn(offset, r.gpa.unchecked_add(region_offset), len)?;
262             if len != copy_len {
263                 bail!("incomplete copy: expected={}, actual={}", len, copy_len);
264             }
265 
266             remaining -= len;
267             offset += len;
268             region_offset = 0;
269 
270             if remaining == 0 {
271                 return Ok(());
272             }
273         }
274 
275         Err(anyhow!("not enough data: remaining={}", remaining))
276     }
277 
278     /// Reads an object from the given iova. Fails if the specified iova range does
279     /// not lie within this region, or if part of the region isn't readable.
read_obj_from_addr<T: FromBytes>( &self, mem: &GuestMemory, iova: u64, ) -> anyhow::Result<T>280     pub fn read_obj_from_addr<T: FromBytes>(
281         &self,
282         mem: &GuestMemory,
283         iova: u64,
284     ) -> anyhow::Result<T> {
285         let mut buf = vec![0u8; std::mem::size_of::<T>()];
286         self.do_copy(iova, buf.len(), Protection::read(), |offset, gpa, len| {
287             mem.read_at_addr(&mut buf[offset..(offset + len)], gpa)
288                 .context("failed to read from gpa")
289         })?;
290         T::read_from(buf.as_bytes()).context("failed to construct obj")
291     }
292 
293     /// Writes an object at a given iova. Fails if the specified iova range does
294     /// not lie within this region, or if part of the region isn't writable.
write_obj_at_addr<T: AsBytes>( &self, mem: &GuestMemory, val: T, iova: u64, ) -> anyhow::Result<()>295     pub fn write_obj_at_addr<T: AsBytes>(
296         &self,
297         mem: &GuestMemory,
298         val: T,
299         iova: u64,
300     ) -> anyhow::Result<()> {
301         let buf = val.as_bytes();
302         self.do_copy(iova, buf.len(), Protection::write(), |offset, gpa, len| {
303             mem.write_at_addr(&buf[offset..(offset + len)], gpa)
304                 .context("failed to write from gpa")
305         })?;
306         Ok(())
307     }
308 
309     /// Validates that [iova, iova+size) lies within this region, and that
310     /// the region is valid according to mem.
is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool311     pub fn is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool {
312         let inner = self.inner.lock();
313         let iova_end = iova.checked_add(size);
314         if iova_end.is_none() {
315             return false;
316         }
317         if iova < inner.iova || iova_end.unwrap() > (inner.iova + inner.size) {
318             return false;
319         }
320         self.inner
321             .lock()
322             .regions
323             .iter()
324             .all(|r| mem.range_overlap(r.gpa, r.gpa.unchecked_add(r.len)))
325     }
326 
327     /// Gets the list of guest physical regions for the exported region.
get_mem_regions(&self) -> SmallVec<[MemRegion; 1]>328     pub fn get_mem_regions(&self) -> SmallVec<[MemRegion; 1]> {
329         SmallVec::from_slice(&self.inner.lock().regions)
330     }
331 }
332 
333 #[cfg(test)]
334 mod tests {
335     use std::thread;
336 
337     use base::Protection;
338     use vm_memory::GuestAddress;
339 
340     use super::*;
341 
342     #[test]
test()343     fn test() {
344         let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");
345         let CreateIpcMapperRet {
346             mut mapper,
347             response_tx,
348         } = create_ipc_mapper(3, request_tx);
349         let user_handle = thread::spawn(move || {
350             assert!(mapper
351                 .export(0x555, 1)
352                 .unwrap()
353                 .iter()
354                 .zip(&vec![MemRegion {
355                     gpa: GuestAddress(0x777),
356                     len: 1,
357                     prot: Protection::read_write(),
358                 },])
359                 .all(|(a, b)| a == b));
360         });
361         let iommu_handle = thread::spawn(move || {
362             let (endpoint_id, iova, size) = match request_rx.recv().unwrap() {
363                 IommuRequest::Export {
364                     endpoint_id,
365                     iova,
366                     size,
367                 } => (endpoint_id, iova, size),
368                 _ => unreachable!(),
369             };
370             assert_eq!(endpoint_id, 3);
371             assert_eq!(iova, 0x555);
372             assert_eq!(size, 1);
373             response_tx
374                 .send(&IommuResponse::Export(vec![MemRegion {
375                     gpa: GuestAddress(0x777),
376                     len: 1,
377                     prot: Protection::read_write(),
378                 }]))
379                 .unwrap();
380             // This join needs to be here because on Windows, if `response_tx`
381             // is dropped before `response_rx` can read, the connection will
382             // be severed and this test will fail.
383             user_handle.join().unwrap();
384         });
385         iommu_handle.join().unwrap();
386     }
387 }
388