1 // Copyright 2024 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Support for virtio-media devices in crosvm.
6 //!
7 //! This module provides implementation for the virtio-media traits required to make virtio-media
8 //! devices operate under crosvm. Sub-modules then integrate these devices with crosvm.
9
10 use std::collections::BTreeMap;
11 use std::os::fd::AsRawFd;
12 use std::os::fd::BorrowedFd;
13 use std::path::Path;
14 use std::path::PathBuf;
15 use std::rc::Rc;
16 use std::sync::Arc;
17
18 use anyhow::Context;
19 use base::error;
20 use base::Descriptor;
21 use base::Event;
22 use base::EventToken;
23 use base::EventType;
24 use base::MappedRegion;
25 use base::MemoryMappingArena;
26 use base::Protection;
27 use base::WaitContext;
28 use base::WorkerThread;
29 use resources::address_allocator::AddressAllocator;
30 use resources::AddressRange;
31 use resources::Alloc;
32 use sync::Mutex;
33 use virtio_media::io::WriteToDescriptorChain;
34 use virtio_media::poll::SessionPoller;
35 use virtio_media::protocol::SgEntry;
36 use virtio_media::protocol::V4l2Event;
37 use virtio_media::protocol::VirtioMediaDeviceConfig;
38 use virtio_media::GuestMemoryRange;
39 use virtio_media::VirtioMediaDevice;
40 use virtio_media::VirtioMediaDeviceRunner;
41 use virtio_media::VirtioMediaEventQueue;
42 use virtio_media::VirtioMediaGuestMemoryMapper;
43 use virtio_media::VirtioMediaHostMemoryMapper;
44 use vm_control::VmMemorySource;
45 use vm_memory::GuestAddress;
46 use vm_memory::GuestMemory;
47
48 use crate::virtio::copy_config;
49 use crate::virtio::device_constants::media::QUEUE_SIZES;
50 use crate::virtio::DeviceType;
51 use crate::virtio::Interrupt;
52 use crate::virtio::Queue;
53 use crate::virtio::Reader;
54 use crate::virtio::SharedMemoryMapper;
55 use crate::virtio::SharedMemoryRegion;
56 use crate::virtio::VirtioDevice;
57 use crate::virtio::Writer;
58
59 /// Structure supporting the implementation of `VirtioMediaEventQueue` for sending events to the
60 /// driver.
61 struct EventQueue(Queue);
62
63 impl VirtioMediaEventQueue for EventQueue {
64 /// Wait until an event descriptor becomes available and send `event` to the guest.
send_event(&mut self, event: V4l2Event)65 fn send_event(&mut self, event: V4l2Event) {
66 let mut desc;
67
68 loop {
69 match self.0.pop() {
70 Some(d) => {
71 desc = d;
72 break;
73 }
74 None => {
75 if let Err(e) = self.0.event().wait() {
76 error!("could not obtain a descriptor to send event to: {:#}", e);
77 return;
78 }
79 }
80 }
81 }
82
83 if let Err(e) = match event {
84 V4l2Event::Error(event) => WriteToDescriptorChain::write_obj(&mut desc.writer, event),
85 V4l2Event::DequeueBuffer(event) => {
86 WriteToDescriptorChain::write_obj(&mut desc.writer, event)
87 }
88 V4l2Event::Event(event) => WriteToDescriptorChain::write_obj(&mut desc.writer, event),
89 } {
90 error!("failed to write event: {}", e);
91 }
92
93 let written = desc.writer.bytes_written() as u32;
94 self.0.add_used(desc, written);
95 self.0.trigger_interrupt();
96 }
97 }
98
99 /// A `SharedMemoryMapper` behind an `Arc`, allowing it to be shared.
100 ///
101 /// This is required by the fact that devices can be activated several times, but the mapper is
102 /// only provided once. This might be a defect of the `VirtioDevice` interface.
103 #[derive(Clone)]
104 struct ArcedMemoryMapper(Arc<Mutex<Box<dyn SharedMemoryMapper>>>);
105
106 impl From<Box<dyn SharedMemoryMapper>> for ArcedMemoryMapper {
from(mapper: Box<dyn SharedMemoryMapper>) -> Self107 fn from(mapper: Box<dyn SharedMemoryMapper>) -> Self {
108 Self(Arc::new(Mutex::new(mapper)))
109 }
110 }
111
112 impl SharedMemoryMapper for ArcedMemoryMapper {
add_mapping( &mut self, source: VmMemorySource, offset: u64, prot: Protection, cache: hypervisor::MemCacheType, ) -> anyhow::Result<()>113 fn add_mapping(
114 &mut self,
115 source: VmMemorySource,
116 offset: u64,
117 prot: Protection,
118 cache: hypervisor::MemCacheType,
119 ) -> anyhow::Result<()> {
120 self.0.lock().add_mapping(source, offset, prot, cache)
121 }
122
remove_mapping(&mut self, offset: u64) -> anyhow::Result<()>123 fn remove_mapping(&mut self, offset: u64) -> anyhow::Result<()> {
124 self.0.lock().remove_mapping(offset)
125 }
126
as_raw_descriptor(&self) -> Option<base::RawDescriptor>127 fn as_raw_descriptor(&self) -> Option<base::RawDescriptor> {
128 self.0.lock().as_raw_descriptor()
129 }
130 }
131
132 /// Provides the ability to map host memory into the guest physical address space. Used to
133 /// implement `VirtioMediaHostMemoryMapper`.
134 struct HostMemoryMapper<M: SharedMemoryMapper> {
135 /// Mapper.
136 shm_mapper: M,
137 /// Address allocator for the mapper.
138 allocator: AddressAllocator,
139 }
140
141 impl<M: SharedMemoryMapper> VirtioMediaHostMemoryMapper for HostMemoryMapper<M> {
add_mapping( &mut self, buffer: BorrowedFd, length: u64, offset: u64, rw: bool, ) -> Result<u64, i32>142 fn add_mapping(
143 &mut self,
144 buffer: BorrowedFd,
145 length: u64,
146 offset: u64,
147 rw: bool,
148 ) -> Result<u64, i32> {
149 // TODO: technically `offset` can be used twice if a buffer is deleted and some other takes
150 // its place...
151 let shm_offset = self
152 .allocator
153 .allocate(length, Alloc::FileBacked(offset), "".into())
154 .map_err(|_| libc::ENOMEM)?;
155
156 match self.shm_mapper.add_mapping(
157 VmMemorySource::Descriptor {
158 descriptor: buffer.try_clone_to_owned().map_err(|_| libc::EIO)?.into(),
159 offset: 0,
160 size: length,
161 },
162 shm_offset,
163 if rw {
164 Protection::read_write()
165 } else {
166 Protection::read()
167 },
168 hypervisor::MemCacheType::CacheCoherent,
169 ) {
170 Ok(()) => Ok(shm_offset),
171 Err(e) => {
172 base::error!("failed to map memory buffer: {:#}", e);
173 Err(libc::EINVAL)
174 }
175 }
176 }
177
remove_mapping(&mut self, offset: u64) -> Result<(), i32>178 fn remove_mapping(&mut self, offset: u64) -> Result<(), i32> {
179 let _ = self.allocator.release_containing(offset);
180
181 self.shm_mapper
182 .remove_mapping(offset)
183 .map_err(|_| libc::EINVAL)
184 }
185 }
186
187 /// Direct linear mapping of sparse guest memory.
188 ///
189 /// A re-mapping of sparse guest memory into an arena that is linear to the host.
190 struct GuestMemoryMapping {
191 arena: MemoryMappingArena,
192 start_offset: usize,
193 }
194
195 impl GuestMemoryMapping {
new(mem: &GuestMemory, sgs: &[SgEntry]) -> anyhow::Result<Self>196 fn new(mem: &GuestMemory, sgs: &[SgEntry]) -> anyhow::Result<Self> {
197 let page_size = base::pagesize() as u64;
198 let page_mask = page_size - 1;
199
200 // Validate the SGs.
201 //
202 // We can only map full pages and need to maintain a linear area. This means that the
203 // following invariants must be withheld:
204 //
205 // - For all entries but the first, the start offset within the page must be 0.
206 // - For all entries but the last, `start + len` must be a multiple of page size.
207 for sg in sgs.iter().skip(1) {
208 if sg.start & page_mask != 0 {
209 anyhow::bail!("non-initial SG entry start offset is not 0");
210 }
211 }
212 for sg in sgs.iter().take(sgs.len() - 1) {
213 if (sg.start + sg.len as u64) & page_mask != 0 {
214 anyhow::bail!("non-terminal SG entry with start + len != page_size");
215 }
216 }
217
218 // Compute the arena size.
219 let arena_size = sgs
220 .iter()
221 .fold(0, |size, sg| size + (sg.start & page_mask) + sg.len as u64)
222 // Align to page size if the last entry did not cover a full page.
223 .next_multiple_of(page_size);
224 let mut arena = MemoryMappingArena::new(arena_size as usize)?;
225
226 // Map all SG entries.
227 let mut pos = 0;
228 for region in sgs {
229 // Address of the first page of the region.
230 let region_first_page = region.start & !page_mask;
231 let len = region.start - region_first_page + region.len as u64;
232 // Make sure to map whole pages (only necessary for the last entry).
233 let len = len.next_multiple_of(page_size) as usize;
234 // TODO: find the offset from the region, this assumes a single
235 // region starting at address 0.
236 let fd = mem.offset_region(region_first_page)?;
237 // Always map whole pages
238 arena.add_fd_offset(pos, len, fd, region_first_page)?;
239
240 pos += len;
241 }
242
243 let start_offset = sgs
244 .first()
245 .map(|region| region.start & page_mask)
246 .unwrap_or(0) as usize;
247
248 Ok(GuestMemoryMapping {
249 arena,
250 start_offset,
251 })
252 }
253 }
254
255 impl GuestMemoryRange for GuestMemoryMapping {
as_ptr(&self) -> *const u8256 fn as_ptr(&self) -> *const u8 {
257 // SAFETY: the arena has a valid pointer that covers `start_offset + len`.
258 unsafe { self.arena.as_ptr().add(self.start_offset) }
259 }
260
as_mut_ptr(&mut self) -> *mut u8261 fn as_mut_ptr(&mut self) -> *mut u8 {
262 // SAFETY: the arena has a valid pointer that covers `start_offset + len`.
263 unsafe { self.arena.as_ptr().add(self.start_offset) }
264 }
265 }
266
267 /// Copy of sparse guest memory that is written back upon destruction.
268 ///
269 /// Contrary to `GuestMemoryMapping` which re-maps guest memory to make it appear linear to the
270 /// host, this copies the sparse guest memory into a linear vector that is copied back upon
271 /// destruction. Doing so can be faster than a costly mapping operation if the guest area is small
272 /// enough.
273 struct GuestMemoryShadowMapping {
274 /// Sparse data copied from the guest.
275 data: Vec<u8>,
276 /// Guest memory to read from.
277 mem: GuestMemory,
278 /// SG entries describing the sparse guest area.
279 sgs: Vec<SgEntry>,
280 /// Whether the data has potentially been modified and requires to be written back to the
281 /// guest.
282 dirty: bool,
283 }
284
285 impl GuestMemoryShadowMapping {
new(mem: &GuestMemory, sgs: Vec<SgEntry>) -> anyhow::Result<Self>286 fn new(mem: &GuestMemory, sgs: Vec<SgEntry>) -> anyhow::Result<Self> {
287 let total_size = sgs.iter().fold(0, |total, sg| total + sg.len as usize);
288 let mut data = vec![0u8; total_size];
289 let mut pos = 0;
290 for sg in &sgs {
291 mem.read_exact_at_addr(
292 &mut data[pos..pos + sg.len as usize],
293 GuestAddress(sg.start),
294 )?;
295 pos += sg.len as usize;
296 }
297
298 Ok(Self {
299 data,
300 mem: mem.clone(),
301 sgs,
302 dirty: false,
303 })
304 }
305 }
306
307 impl GuestMemoryRange for GuestMemoryShadowMapping {
as_ptr(&self) -> *const u8308 fn as_ptr(&self) -> *const u8 {
309 self.data.as_ptr()
310 }
311
as_mut_ptr(&mut self) -> *mut u8312 fn as_mut_ptr(&mut self) -> *mut u8 {
313 self.dirty = true;
314 self.data.as_mut_ptr()
315 }
316 }
317
318 /// Write the potentially modified shadow buffer back into the guest memory.
319 impl Drop for GuestMemoryShadowMapping {
drop(&mut self)320 fn drop(&mut self) {
321 // No need to copy back if no modification has been done.
322 if !self.dirty {
323 return;
324 }
325
326 let mut pos = 0;
327 for sg in &self.sgs {
328 if let Err(e) = self.mem.write_all_at_addr(
329 &self.data[pos..pos + sg.len as usize],
330 GuestAddress(sg.start),
331 ) {
332 base::error!("failed to write back guest memory shadow mapping: {:#}", e);
333 }
334 pos += sg.len as usize;
335 }
336 }
337 }
338
339 /// A chunk of guest memory which can be either directly mapped, or copied into a shadow buffer.
340 enum GuestMemoryChunk {
341 Mapping(GuestMemoryMapping),
342 Shadow(GuestMemoryShadowMapping),
343 }
344
345 impl GuestMemoryRange for GuestMemoryChunk {
as_ptr(&self) -> *const u8346 fn as_ptr(&self) -> *const u8 {
347 match self {
348 GuestMemoryChunk::Mapping(m) => m.as_ptr(),
349 GuestMemoryChunk::Shadow(s) => s.as_ptr(),
350 }
351 }
352
as_mut_ptr(&mut self) -> *mut u8353 fn as_mut_ptr(&mut self) -> *mut u8 {
354 match self {
355 GuestMemoryChunk::Mapping(m) => m.as_mut_ptr(),
356 GuestMemoryChunk::Shadow(s) => s.as_mut_ptr(),
357 }
358 }
359 }
360
361 /// Newtype to implement `VirtioMediaGuestMemoryMapper` on `GuestMemory`.
362 ///
363 /// Whether to use a direct mapping or to copy the guest data into a shadow buffer is decided by
364 /// the size of the guest mapping. If it is below `MAPPING_THRESHOLD`, a shadow buffer is used ;
365 /// otherwise the area is mapped.
366 struct GuestMemoryMapper(GuestMemory);
367
368 impl VirtioMediaGuestMemoryMapper for GuestMemoryMapper {
369 type GuestMemoryMapping = GuestMemoryChunk;
370
new_mapping(&self, sgs: Vec<SgEntry>) -> anyhow::Result<Self::GuestMemoryMapping>371 fn new_mapping(&self, sgs: Vec<SgEntry>) -> anyhow::Result<Self::GuestMemoryMapping> {
372 /// Threshold at which we perform a direct mapping of the guest memory into the host.
373 /// Anything below that is copied into a shadow buffer and synced back to the guest when
374 /// the memory chunk is destroyed.
375 const MAPPING_THRESHOLD: usize = 0x400;
376 let total_size = sgs.iter().fold(0, |total, sg| total + sg.len as usize);
377
378 if total_size >= MAPPING_THRESHOLD {
379 GuestMemoryMapping::new(&self.0, &sgs).map(GuestMemoryChunk::Mapping)
380 } else {
381 GuestMemoryShadowMapping::new(&self.0, sgs).map(GuestMemoryChunk::Shadow)
382 }
383 }
384 }
385
386 #[derive(EventToken, Debug)]
387 enum Token {
388 CommandQueue,
389 V4l2Session(u32),
390 Kill,
391 InterruptResample,
392 }
393
394 /// Newtype to implement `SessionPoller` on `Rc<WaitContext<Token>>`.
395 #[derive(Clone)]
396 struct WaitContextPoller(Rc<WaitContext<Token>>);
397
398 impl SessionPoller for WaitContextPoller {
add_session(&self, session: BorrowedFd, session_id: u32) -> Result<(), i32>399 fn add_session(&self, session: BorrowedFd, session_id: u32) -> Result<(), i32> {
400 self.0
401 .add_for_event(
402 &Descriptor(session.as_raw_fd()),
403 EventType::Read,
404 Token::V4l2Session(session_id),
405 )
406 .map_err(|e| e.errno())
407 }
408
remove_session(&self, session: BorrowedFd)409 fn remove_session(&self, session: BorrowedFd) {
410 let _ = self.0.delete(&Descriptor(session.as_raw_fd()));
411 }
412 }
413
414 /// Worker to operate a virtio-media device inside a worker thread.
415 struct Worker<D: VirtioMediaDevice<Reader, Writer>> {
416 runner: VirtioMediaDeviceRunner<Reader, Writer, D, WaitContextPoller>,
417 cmd_queue: (Queue, Interrupt),
418 wait_ctx: Rc<WaitContext<Token>>,
419 }
420
421 impl<D> Worker<D>
422 where
423 D: VirtioMediaDevice<Reader, Writer>,
424 {
425 /// Create a new worker instance for `device`.
new( device: D, cmd_queue: Queue, cmd_interrupt: Interrupt, kill_evt: Event, wait_ctx: Rc<WaitContext<Token>>, ) -> anyhow::Result<Self>426 fn new(
427 device: D,
428 cmd_queue: Queue,
429 cmd_interrupt: Interrupt,
430 kill_evt: Event,
431 wait_ctx: Rc<WaitContext<Token>>,
432 ) -> anyhow::Result<Self> {
433 wait_ctx
434 .add_many(&[
435 (cmd_queue.event(), Token::CommandQueue),
436 (&kill_evt, Token::Kill),
437 ])
438 .context("when adding worker events to wait context")?;
439
440 Ok(Self {
441 runner: VirtioMediaDeviceRunner::new(device, WaitContextPoller(Rc::clone(&wait_ctx))),
442 cmd_queue: (cmd_queue, cmd_interrupt),
443 wait_ctx,
444 })
445 }
446
run(&mut self) -> anyhow::Result<()>447 fn run(&mut self) -> anyhow::Result<()> {
448 if let Some(resample_evt) = self.cmd_queue.1.get_resample_evt() {
449 self.wait_ctx
450 .add(resample_evt, Token::InterruptResample)
451 .context("failed adding resample event to WaitContext.")?;
452 }
453
454 loop {
455 let wait_events = self.wait_ctx.wait().context("Wait error")?;
456
457 for wait_event in wait_events.iter() {
458 match wait_event.token {
459 Token::CommandQueue => {
460 let _ = self.cmd_queue.0.event().wait();
461 while let Some(mut desc) = self.cmd_queue.0.pop() {
462 self.runner
463 .handle_command(&mut desc.reader, &mut desc.writer);
464 // Return the descriptor to the guest.
465 let written = desc.writer.bytes_written() as u32;
466 self.cmd_queue.0.add_used(desc, written);
467 self.cmd_queue.0.trigger_interrupt();
468 }
469 }
470 Token::Kill => {
471 return Ok(());
472 }
473 Token::V4l2Session(session_id) => {
474 let session = match self.runner.sessions.get_mut(&session_id) {
475 Some(session) => session,
476 None => {
477 base::error!(
478 "received event for non-registered session {}",
479 session_id
480 );
481 continue;
482 }
483 };
484
485 if let Err(e) = self.runner.device.process_events(session) {
486 base::error!(
487 "error while processing events for session {}: {:#}",
488 session_id,
489 e
490 );
491 if let Some(session) = self.runner.sessions.remove(&session_id) {
492 self.runner.device.close_session(session);
493 }
494 }
495 }
496 Token::InterruptResample => {
497 self.cmd_queue.1.interrupt_resample();
498 }
499 }
500 }
501 }
502 }
503 }
504
505 /// Implements the required traits to operate a [`VirtioMediaDevice`] under crosvm.
506 struct CrosvmVirtioMediaDevice<
507 D: VirtioMediaDevice<Reader, Writer>,
508 F: Fn(EventQueue, GuestMemoryMapper, HostMemoryMapper<ArcedMemoryMapper>) -> anyhow::Result<D>,
509 > {
510 /// Closure to create the device once all its resources are acquired.
511 create_device: F,
512 /// Virtio configuration area.
513 config: VirtioMediaDeviceConfig,
514
515 /// Virtio device features.
516 base_features: u64,
517 /// Mapper to make host video buffers visible to the guest.
518 ///
519 /// We unfortunately need to put it behind a `Arc` because the mapper is only passed once,
520 /// whereas the device can be activated several times, so we need to keep a reference to it
521 /// even after it is passed to the device.
522 shm_mapper: Option<ArcedMemoryMapper>,
523 /// Worker thread for the device.
524 worker_thread: Option<WorkerThread<()>>,
525 }
526
527 impl<D, F> CrosvmVirtioMediaDevice<D, F>
528 where
529 D: VirtioMediaDevice<Reader, Writer>,
530 F: Fn(EventQueue, GuestMemoryMapper, HostMemoryMapper<ArcedMemoryMapper>) -> anyhow::Result<D>,
531 {
new(base_features: u64, config: VirtioMediaDeviceConfig, create_device: F) -> Self532 fn new(base_features: u64, config: VirtioMediaDeviceConfig, create_device: F) -> Self {
533 Self {
534 base_features,
535 config,
536 shm_mapper: None,
537 create_device,
538 worker_thread: None,
539 }
540 }
541 }
542
543 const HOST_MAPPER_RANGE: u64 = 1 << 32;
544
545 impl<D, F> VirtioDevice for CrosvmVirtioMediaDevice<D, F>
546 where
547 D: VirtioMediaDevice<Reader, Writer> + Send + 'static,
548 F: Fn(EventQueue, GuestMemoryMapper, HostMemoryMapper<ArcedMemoryMapper>) -> anyhow::Result<D>
549 + Send,
550 {
keep_rds(&self) -> Vec<base::RawDescriptor>551 fn keep_rds(&self) -> Vec<base::RawDescriptor> {
552 let mut keep_rds = Vec::new();
553
554 if let Some(fd) = self.shm_mapper.as_ref().and_then(|m| m.as_raw_descriptor()) {
555 keep_rds.push(fd);
556 }
557
558 keep_rds
559 }
560
device_type(&self) -> DeviceType561 fn device_type(&self) -> DeviceType {
562 DeviceType::Media
563 }
564
queue_max_sizes(&self) -> &[u16]565 fn queue_max_sizes(&self) -> &[u16] {
566 QUEUE_SIZES
567 }
568
features(&self) -> u64569 fn features(&self) -> u64 {
570 self.base_features
571 }
572
read_config(&self, offset: u64, data: &mut [u8])573 fn read_config(&self, offset: u64, data: &mut [u8]) {
574 copy_config(data, 0, self.config.as_ref(), offset);
575 }
576
activate( &mut self, mem: vm_memory::GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>577 fn activate(
578 &mut self,
579 mem: vm_memory::GuestMemory,
580 interrupt: Interrupt,
581 mut queues: BTreeMap<usize, Queue>,
582 ) -> anyhow::Result<()> {
583 if queues.len() != QUEUE_SIZES.len() {
584 anyhow::bail!(
585 "wrong number of queues are passed: expected {}, actual {}",
586 queues.len(),
587 QUEUE_SIZES.len()
588 );
589 }
590
591 let cmd_queue = queues.remove(&0).context("missing queue 0")?;
592 let event_queue = EventQueue(queues.remove(&1).context("missing queue 1")?);
593
594 let shm_mapper = self
595 .shm_mapper
596 .clone()
597 .take()
598 .context("shared memory mapper was not specified")?;
599
600 let wait_ctx = WaitContext::new()?;
601 let device = (self.create_device)(
602 event_queue,
603 GuestMemoryMapper(mem),
604 HostMemoryMapper {
605 shm_mapper,
606 allocator: AddressAllocator::new(
607 AddressRange::from_start_and_end(0, HOST_MAPPER_RANGE - 1),
608 Some(base::pagesize() as u64),
609 None,
610 )?,
611 },
612 )?;
613
614 let worker_thread = WorkerThread::start("v_media_worker", move |e| {
615 let wait_ctx = Rc::new(wait_ctx);
616 let mut worker = match Worker::new(device, cmd_queue, interrupt, e, wait_ctx) {
617 Ok(worker) => worker,
618 Err(e) => {
619 error!("failed to create virtio-media worker: {:#}", e);
620 return;
621 }
622 };
623 if let Err(e) = worker.run() {
624 error!("virtio_media worker exited with error: {:#}", e);
625 }
626 });
627
628 self.worker_thread = Some(worker_thread);
629 Ok(())
630 }
631
reset(&mut self) -> anyhow::Result<()>632 fn reset(&mut self) -> anyhow::Result<()> {
633 if let Some(worker_thread) = self.worker_thread.take() {
634 worker_thread.stop();
635 }
636
637 Ok(())
638 }
639
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>640 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
641 Some(SharedMemoryRegion {
642 id: 0,
643 // We need a 32-bit address space as m2m devices start their CAPTURE buffers' offsets
644 // at 2GB.
645 length: HOST_MAPPER_RANGE,
646 })
647 }
648
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)649 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
650 self.shm_mapper = Some(ArcedMemoryMapper::from(mapper));
651 }
652 }
653
654 /// Create a simple media capture device.
655 ///
656 /// This device can only generate a fixed pattern at a fixed resolution, and should only be used
657 /// for checking that the virtio-media pipeline is working properly.
create_virtio_media_simple_capture_device(features: u64) -> Box<dyn VirtioDevice>658 pub fn create_virtio_media_simple_capture_device(features: u64) -> Box<dyn VirtioDevice> {
659 use virtio_media::devices::SimpleCaptureDevice;
660 use virtio_media::v4l2r::ioctl::Capabilities;
661
662 let mut card = [0u8; 32];
663 let card_name = "simple_device";
664 card[0..card_name.len()].copy_from_slice(card_name.as_bytes());
665
666 let device = CrosvmVirtioMediaDevice::new(
667 features,
668 VirtioMediaDeviceConfig {
669 device_caps: (Capabilities::VIDEO_CAPTURE | Capabilities::STREAMING).bits(),
670 // VFL_TYPE_VIDEO
671 device_type: 0,
672 card,
673 },
674 |event_queue, _, host_mapper| Ok(SimpleCaptureDevice::new(event_queue, host_mapper)),
675 );
676
677 Box::new(device)
678 }
679
680 /// Create a proxy device for a host V4L2 device.
681 ///
682 /// Since V4L2 is a Linux-specific API, this is only available on Linux targets.
683 #[cfg(any(target_os = "android", target_os = "linux"))]
create_virtio_media_v4l2_proxy_device<P: AsRef<Path>>( features: u64, device_path: P, ) -> anyhow::Result<Box<dyn VirtioDevice>>684 pub fn create_virtio_media_v4l2_proxy_device<P: AsRef<Path>>(
685 features: u64,
686 device_path: P,
687 ) -> anyhow::Result<Box<dyn VirtioDevice>> {
688 use virtio_media::devices::V4l2ProxyDevice;
689 use virtio_media::v4l2r;
690 use virtio_media::v4l2r::ioctl::Capabilities;
691
692 let device = v4l2r::device::Device::open(
693 device_path.as_ref(),
694 v4l2r::device::DeviceConfig::new().non_blocking_dqbuf(),
695 )?;
696 let mut device_caps = device.caps().device_caps();
697
698 // We are only exposing one device worth of capabilities.
699 device_caps.remove(Capabilities::DEVICE_CAPS);
700
701 // Read-write is not supported by design.
702 device_caps.remove(Capabilities::READWRITE);
703
704 let mut config = VirtioMediaDeviceConfig {
705 device_caps: device_caps.bits(),
706 // VFL_TYPE_VIDEO
707 device_type: 0,
708 card: Default::default(),
709 };
710 let card = &device.caps().card;
711 let name_slice = card[0..std::cmp::min(card.len(), config.card.len())].as_bytes();
712 config.card.as_mut_slice()[0..name_slice.len()].copy_from_slice(name_slice);
713 let device_path = PathBuf::from(device_path.as_ref());
714
715 let device = CrosvmVirtioMediaDevice::new(
716 features,
717 config,
718 move |event_queue, guest_mapper, host_mapper| {
719 let device =
720 V4l2ProxyDevice::new(device_path.clone(), event_queue, guest_mapper, host_mapper);
721
722 Ok(device)
723 },
724 );
725
726 Ok(Box::new(device))
727 }
728