1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! This module implements the virtio wayland used by the guest to access the host's wayland server.
6 //!
7 //! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8 //! sending commands to the guest that are generated by the host, usually messages from the wayland
9 //! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10 //! memory, open a wayland server connection, or send data over an existing connection.
11 //!
12 //! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13 //! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14 //! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15 //! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16 //! well as a page frame number that the guest can access the memory from.
17 //!
18 //! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19 //! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20 //! `WlResp` for responses.
21 //!
22 //! There is one `WlState` instance that contains every known vfd and the current state of `in`
23 //! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24 //! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25 //! requests, such as those registering guest memory.
26 //!
27 //! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28 //! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29 //! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31 use std::cell::RefCell;
32 use std::collections::btree_map::Entry;
33 use std::collections::BTreeMap;
34 use std::collections::BTreeSet;
35 use std::collections::VecDeque;
36 use std::convert::From;
37 use std::error::Error as StdError;
38 use std::fmt;
39 use std::fs::File;
40 use std::io;
41 use std::io::Read;
42 use std::io::Seek;
43 use std::io::SeekFrom;
44 use std::io::Write;
45 use std::mem::size_of;
46 #[cfg(feature = "minigbm")]
47 use std::os::raw::c_uint;
48 #[cfg(feature = "minigbm")]
49 use std::os::raw::c_ulonglong;
50 use std::os::unix::net::UnixStream;
51 use std::path::Path;
52 use std::path::PathBuf;
53 use std::rc::Rc;
54 use std::result;
55 use std::time::Duration;
56
57 use anyhow::anyhow;
58 use anyhow::Context;
59 use base::error;
60 #[cfg(feature = "minigbm")]
61 use base::ioctl_iow_nr;
62 use base::ioctl_iowr_nr;
63 use base::ioctl_with_ref;
64 use base::linux::SharedMemoryLinux;
65 use base::pagesize;
66 use base::pipe;
67 use base::round_up_to_page_size;
68 use base::unix::FileFlags;
69 use base::warn;
70 use base::AsRawDescriptor;
71 use base::Error;
72 use base::Event;
73 use base::EventToken;
74 use base::EventType;
75 #[cfg(feature = "gpu")]
76 use base::IntoRawDescriptor;
77 #[cfg(feature = "minigbm")]
78 use base::MemoryMappingBuilder;
79 #[cfg(feature = "minigbm")]
80 use base::MmapError;
81 use base::Protection;
82 use base::RawDescriptor;
83 use base::Result;
84 use base::SafeDescriptor;
85 use base::ScmSocket;
86 use base::SharedMemory;
87 use base::Tube;
88 use base::TubeError;
89 use base::VolatileMemoryError;
90 use base::WaitContext;
91 use base::WorkerThread;
92 use data_model::Le32;
93 use data_model::Le64;
94 use hypervisor::MemCacheType;
95 #[cfg(feature = "minigbm")]
96 use libc::EBADF;
97 #[cfg(feature = "minigbm")]
98 use libc::EINVAL;
99 #[cfg(feature = "minigbm")]
100 use libc::ENOSYS;
101 use remain::sorted;
102 use resources::address_allocator::AddressAllocator;
103 use resources::AddressRange;
104 use resources::Alloc;
105 #[cfg(feature = "minigbm")]
106 use rutabaga_gfx::DrmFormat;
107 #[cfg(feature = "minigbm")]
108 use rutabaga_gfx::ImageAllocationInfo;
109 #[cfg(feature = "minigbm")]
110 use rutabaga_gfx::ImageMemoryRequirements;
111 #[cfg(feature = "minigbm")]
112 use rutabaga_gfx::RutabagaDescriptor;
113 #[cfg(feature = "minigbm")]
114 use rutabaga_gfx::RutabagaError;
115 #[cfg(feature = "minigbm")]
116 use rutabaga_gfx::RutabagaGralloc;
117 #[cfg(feature = "minigbm")]
118 use rutabaga_gfx::RutabagaGrallocBackendFlags;
119 #[cfg(feature = "minigbm")]
120 use rutabaga_gfx::RutabagaGrallocFlags;
121 #[cfg(feature = "minigbm")]
122 use rutabaga_gfx::RutabagaIntoRawDescriptor;
123 #[cfg(feature = "minigbm")]
124 use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
125 #[cfg(feature = "minigbm")]
126 use rutabaga_gfx::RUTABAGA_MAP_CACHE_MASK;
127 use thiserror::Error as ThisError;
128 use vm_control::VmMemorySource;
129 use vm_memory::GuestAddress;
130 use vm_memory::GuestMemory;
131 use vm_memory::GuestMemoryError;
132 use zerocopy::AsBytes;
133 use zerocopy::FromBytes;
134 use zerocopy::FromZeroes;
135
136 #[cfg(feature = "gpu")]
137 use super::resource_bridge::get_resource_info;
138 #[cfg(feature = "gpu")]
139 use super::resource_bridge::BufferInfo;
140 #[cfg(feature = "gpu")]
141 use super::resource_bridge::ResourceBridgeError;
142 #[cfg(feature = "gpu")]
143 use super::resource_bridge::ResourceInfo;
144 #[cfg(feature = "gpu")]
145 use super::resource_bridge::ResourceRequest;
146 use super::DeviceType;
147 use super::Interrupt;
148 use super::Queue;
149 use super::Reader;
150 use super::SharedMemoryMapper;
151 use super::SharedMemoryRegion;
152 use super::VirtioDevice;
153 use super::Writer;
154 use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
155 use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
156 use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
157
158 const QUEUE_SIZE: u16 = 256;
159 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
160
161 const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
162 const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
163 const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
164 const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
165 const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
166 const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
167 const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
168 const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
169 #[cfg(feature = "minigbm")]
170 const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
171 #[cfg(feature = "minigbm")]
172 const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
173 #[cfg(feature = "gpu")]
174 const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
175 const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
176 const VIRTIO_WL_RESP_OK: u32 = 4096;
177 const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
178 #[cfg(feature = "minigbm")]
179 const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
180 const VIRTIO_WL_RESP_ERR: u32 = 4352;
181 const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
182 const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
183 const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
184 const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
185 const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
186 const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
187 const VIRTIO_WL_VFD_READ: u32 = 0x2;
188 const VIRTIO_WL_VFD_MAP: u32 = 0x2;
189 const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
190 const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
191
192 const NEXT_VFD_ID_BASE: u32 = 0x40000000;
193 const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
194 // Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
195 // number of allocs.
196 const IN_BUFFER_LEN: usize =
197 0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
198
199 #[cfg(feature = "minigbm")]
200 const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
201
202 #[cfg(feature = "minigbm")]
203 const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
204 #[cfg(feature = "minigbm")]
205 const DMA_BUF_SYNC_WRITE: c_uint = 0x2;
206 #[cfg(feature = "minigbm")]
207 const DMA_BUF_SYNC_END: c_uint = 0x4;
208
209 #[cfg(feature = "minigbm")]
210 #[repr(C)]
211 #[derive(Copy, Clone)]
212 struct dma_buf_sync {
213 flags: c_ulonglong,
214 }
215
216 #[cfg(feature = "minigbm")]
217 ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
218
219 #[repr(C)]
220 #[derive(Copy, Clone, Default)]
221 struct sync_file_info {
222 name: [u8; 32],
223 status: i32,
224 flags: u32,
225 num_fences: u32,
226 pad: u32,
227 sync_fence_info: u64,
228 }
229
230 ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
231
is_fence(f: &File) -> bool232 fn is_fence(f: &File) -> bool {
233 let info = sync_file_info::default();
234 // SAFETY:
235 // Safe as f is a valid file
236 unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO, &info) == 0 }
237 }
238
239 #[cfg(feature = "minigbm")]
240 #[derive(Debug, Default)]
241 struct GpuMemoryPlaneDesc {
242 stride: u32,
243 offset: u32,
244 }
245
246 #[cfg(feature = "minigbm")]
247 #[derive(Debug, Default)]
248 struct GpuMemoryDesc {
249 planes: [GpuMemoryPlaneDesc; 3],
250 }
251
252 const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
253 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
254 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
255 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
256
257 const VIRTIO_WL_PFN_SHIFT: u32 = 12;
258
encode_vfd_new( writer: &mut Writer, resp: bool, vfd_id: u32, flags: u32, pfn: u64, size: u32, ) -> WlResult<()>259 fn encode_vfd_new(
260 writer: &mut Writer,
261 resp: bool,
262 vfd_id: u32,
263 flags: u32,
264 pfn: u64,
265 size: u32,
266 ) -> WlResult<()> {
267 let ctrl_vfd_new = CtrlVfdNew {
268 hdr: CtrlHeader {
269 type_: Le32::from(if resp {
270 VIRTIO_WL_RESP_VFD_NEW
271 } else {
272 VIRTIO_WL_CMD_VFD_NEW
273 }),
274 flags: Le32::from(0),
275 },
276 id: Le32::from(vfd_id),
277 flags: Le32::from(flags),
278 pfn: Le64::from(pfn),
279 size: Le32::from(size),
280 padding: Default::default(),
281 };
282
283 writer
284 .write_obj(ctrl_vfd_new)
285 .map_err(WlError::WriteResponse)
286 }
287
288 #[cfg(feature = "minigbm")]
encode_vfd_new_dmabuf( writer: &mut Writer, vfd_id: u32, flags: u32, pfn: u64, size: u32, desc: GpuMemoryDesc, ) -> WlResult<()>289 fn encode_vfd_new_dmabuf(
290 writer: &mut Writer,
291 vfd_id: u32,
292 flags: u32,
293 pfn: u64,
294 size: u32,
295 desc: GpuMemoryDesc,
296 ) -> WlResult<()> {
297 let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
298 hdr: CtrlHeader {
299 type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
300 flags: Le32::from(0),
301 },
302 id: Le32::from(vfd_id),
303 flags: Le32::from(flags),
304 pfn: Le64::from(pfn),
305 size: Le32::from(size),
306 width: Le32::from(0),
307 height: Le32::from(0),
308 format: Le32::from(0),
309 stride0: Le32::from(desc.planes[0].stride),
310 stride1: Le32::from(desc.planes[1].stride),
311 stride2: Le32::from(desc.planes[2].stride),
312 offset0: Le32::from(desc.planes[0].offset),
313 offset1: Le32::from(desc.planes[1].offset),
314 offset2: Le32::from(desc.planes[2].offset),
315 };
316
317 writer
318 .write_obj(ctrl_vfd_new_dmabuf)
319 .map_err(WlError::WriteResponse)
320 }
321
encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()>322 fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
323 let ctrl_vfd_recv = CtrlVfdRecv {
324 hdr: CtrlHeader {
325 type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
326 flags: Le32::from(0),
327 },
328 id: Le32::from(vfd_id),
329 vfd_count: Le32::from(vfd_ids.len() as u32),
330 };
331 writer
332 .write_obj(ctrl_vfd_recv)
333 .map_err(WlError::WriteResponse)?;
334
335 for &recv_vfd_id in vfd_ids.iter() {
336 writer
337 .write_obj(Le32::from(recv_vfd_id))
338 .map_err(WlError::WriteResponse)?;
339 }
340
341 writer.write_all(data).map_err(WlError::WriteResponse)
342 }
343
encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()>344 fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
345 let ctrl_vfd_new = CtrlVfd {
346 hdr: CtrlHeader {
347 type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
348 flags: Le32::from(0),
349 },
350 id: Le32::from(vfd_id),
351 };
352
353 writer
354 .write_obj(ctrl_vfd_new)
355 .map_err(WlError::WriteResponse)
356 }
357
encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()>358 fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
359 match resp {
360 WlResp::VfdNew {
361 id,
362 flags,
363 pfn,
364 size,
365 resp,
366 } => encode_vfd_new(writer, resp, id, flags, pfn, size),
367 #[cfg(feature = "minigbm")]
368 WlResp::VfdNewDmabuf {
369 id,
370 flags,
371 pfn,
372 size,
373 desc,
374 } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
375 WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
376 WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
377 r => writer
378 .write_obj(Le32::from(r.get_code()))
379 .map_err(WlError::WriteResponse),
380 }
381 }
382
383 #[allow(dead_code)]
384 #[sorted]
385 #[derive(ThisError, Debug)]
386 enum WlError {
387 #[error("overflow in calculation")]
388 CheckedOffset,
389 #[error("failed to synchronize DMABuf access: {0}")]
390 DmabufSync(io::Error),
391 #[error("failed to create shared memory from descriptor: {0}")]
392 FromSharedMemory(Error),
393 #[error("failed to get seals: {0}")]
394 GetSeals(Error),
395 #[error("gralloc error: {0}")]
396 #[cfg(feature = "minigbm")]
397 GrallocError(#[from] RutabagaError),
398 #[error("access violation in guest memory: {0}")]
399 GuestMemory(#[from] GuestMemoryError),
400 #[error("invalid string: {0}")]
401 InvalidString(std::str::Utf8Error),
402 #[error("failed to create shared memory allocation: {0}")]
403 NewAlloc(Error),
404 #[error("failed to create pipe: {0}")]
405 NewPipe(Error),
406 #[error("error parsing descriptor: {0}")]
407 ParseDesc(io::Error),
408 #[error("failed to read a pipe: {0}")]
409 ReadPipe(io::Error),
410 #[error("failed to recv on a socket: {0}")]
411 RecvVfd(io::Error),
412 #[error("failed to send on a socket: {0}")]
413 SendVfd(io::Error),
414 #[error("shmem mapper failure: {0}")]
415 ShmemMapperError(anyhow::Error),
416 #[error("failed to connect socket: {0}")]
417 SocketConnect(io::Error),
418 #[error("failed to set socket as non-blocking: {0}")]
419 SocketNonBlock(io::Error),
420 #[error("unknown socket name: {0}")]
421 UnknownSocketName(String),
422 #[error("invalid response from parent VM")]
423 VmBadResponse,
424 #[error("failed to control parent VM: {0}")]
425 VmControl(TubeError),
426 #[error("access violating in guest volatile memory: {0}")]
427 VolatileMemory(#[from] VolatileMemoryError),
428 #[error("failed to listen to descriptor on wait context: {0}")]
429 WaitContextAdd(Error),
430 #[error("failed to write to a pipe: {0}")]
431 WritePipe(io::Error),
432 #[error("failed to write response: {0}")]
433 WriteResponse(io::Error),
434 }
435
436 type WlResult<T> = result::Result<T, WlError>;
437
438 pub const WL_SHMEM_ID: u8 = 0;
439 pub const WL_SHMEM_SIZE: u64 = 1 << 32;
440
441 struct VmRequesterState {
442 mapper: Box<dyn SharedMemoryMapper>,
443 #[cfg(feature = "minigbm")]
444 gralloc: RutabagaGralloc,
445
446 // Allocator for shm address space
447 address_allocator: AddressAllocator,
448
449 // Map of existing mappings in the shm address space
450 allocs: BTreeMap<u64 /* offset */, Alloc>,
451
452 // The id for the next shmem allocation
453 next_alloc: usize,
454 }
455
456 #[derive(Clone)]
457 struct VmRequester {
458 state: Rc<RefCell<VmRequesterState>>,
459 }
460
461 // The following are wrappers to avoid base dependencies in the rutabaga crate
462 #[cfg(feature = "minigbm")]
to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor463 fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
464 // SAFETY:
465 // Safe because we own the SafeDescriptor at this point.
466 unsafe { base::FromRawDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
467 }
468
469 impl VmRequester {
new( mapper: Box<dyn SharedMemoryMapper>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, ) -> VmRequester470 fn new(
471 mapper: Box<dyn SharedMemoryMapper>,
472 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
473 ) -> VmRequester {
474 VmRequester {
475 state: Rc::new(RefCell::new(VmRequesterState {
476 mapper,
477 #[cfg(feature = "minigbm")]
478 gralloc,
479 address_allocator: AddressAllocator::new(
480 AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
481 Some(pagesize() as u64),
482 None,
483 )
484 .expect("failed to create allocator"),
485 allocs: BTreeMap::new(),
486 next_alloc: 0,
487 })),
488 }
489 }
490
unregister_memory(&self, offset: u64) -> WlResult<()>491 fn unregister_memory(&self, offset: u64) -> WlResult<()> {
492 let mut state = self.state.borrow_mut();
493 state
494 .mapper
495 .remove_mapping(offset)
496 .map_err(WlError::ShmemMapperError)?;
497 let alloc = state
498 .allocs
499 .remove(&offset)
500 .context("unknown offset")
501 .map_err(WlError::ShmemMapperError)?;
502 state
503 .address_allocator
504 .release(alloc)
505 .expect("corrupt address space");
506 Ok(())
507 }
508
509 #[cfg(feature = "minigbm")]
allocate_and_register_gpu_memory( &self, width: u32, height: u32, format: u32, ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)>510 fn allocate_and_register_gpu_memory(
511 &self,
512 width: u32,
513 height: u32,
514 format: u32,
515 ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
516 let mut state = self.state.borrow_mut();
517
518 let img = ImageAllocationInfo {
519 width,
520 height,
521 drm_format: DrmFormat::from(format),
522 // Linear layout is a requirement as virtio wayland guest expects
523 // this for CPU access to the buffer. Scanout and texturing are
524 // optional as the consumer (wayland compositor) is expected to
525 // fall-back to a less efficient mechanisms for presentation if
526 // neccesary. In practice, linear buffers for commonly used formats
527 // will also support scanout and texturing.
528 flags: RutabagaGrallocFlags::empty().use_linear(true),
529 };
530
531 let reqs = state
532 .gralloc
533 .get_image_memory_requirements(img)
534 .map_err(WlError::GrallocError)?;
535 let handle = state
536 .gralloc
537 .allocate_memory(reqs)
538 .map_err(WlError::GrallocError)?;
539 drop(state);
540
541 let safe_descriptor = to_safe_descriptor(handle.os_handle);
542 self.register_memory(
543 safe_descriptor
544 .try_clone()
545 .context("failed to dup gfx handle")
546 .map_err(WlError::ShmemMapperError)?,
547 reqs.size,
548 Protection::read_write(),
549 )
550 .map(|info| (info, safe_descriptor, reqs))
551 }
552
register_shmem(&self, shm: &SharedMemory) -> WlResult<u64>553 fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
554 let prot = match FileFlags::from_file(shm) {
555 Ok(FileFlags::Read) => Protection::read(),
556 Ok(FileFlags::Write) => Protection::write(),
557 Ok(FileFlags::ReadWrite) => {
558 let seals = shm.get_seals().map_err(WlError::GetSeals)?;
559 if seals.write_seal() {
560 Protection::read()
561 } else {
562 Protection::read_write()
563 }
564 }
565 Err(e) => {
566 return Err(WlError::ShmemMapperError(anyhow!(
567 "failed to get file descriptor flags with error: {:?}",
568 e
569 )))
570 }
571 };
572 self.register_memory(
573 SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
574 .context("failed to create safe descriptor")
575 .map_err(WlError::ShmemMapperError)?,
576 shm.size(),
577 prot,
578 )
579 }
580
register_memory( &self, descriptor: SafeDescriptor, size: u64, prot: Protection, ) -> WlResult<u64>581 fn register_memory(
582 &self,
583 descriptor: SafeDescriptor,
584 size: u64,
585 prot: Protection,
586 ) -> WlResult<u64> {
587 let mut state = self.state.borrow_mut();
588 let size = round_up_to_page_size(size as usize) as u64;
589
590 let source = VmMemorySource::Descriptor {
591 descriptor,
592 offset: 0,
593 size,
594 };
595 let alloc = Alloc::Anon(state.next_alloc);
596 state.next_alloc += 1;
597 let offset = state
598 .address_allocator
599 .allocate(size, alloc, "virtio-wl".to_owned())
600 .context("failed to allocate offset")
601 .map_err(WlError::ShmemMapperError)?;
602
603 match state
604 .mapper
605 .add_mapping(source, offset, prot, MemCacheType::CacheCoherent)
606 {
607 Ok(()) => {
608 state.allocs.insert(offset, alloc);
609 Ok(offset)
610 }
611 Err(e) => {
612 // We just allocated it ourselves, it must exist.
613 state
614 .address_allocator
615 .release(alloc)
616 .expect("corrupt address space");
617 Err(WlError::ShmemMapperError(e))
618 }
619 }
620 }
621 }
622
623 #[repr(C)]
624 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
625 struct CtrlHeader {
626 type_: Le32,
627 flags: Le32,
628 }
629
630 #[repr(C)]
631 #[derive(Copy, Clone, Default, FromZeroes, FromBytes, AsBytes)]
632 struct CtrlVfdNew {
633 hdr: CtrlHeader,
634 id: Le32,
635 flags: Le32,
636 pfn: Le64,
637 size: Le32,
638 padding: Le32,
639 }
640
641 #[repr(C)]
642 #[derive(Copy, Clone, Default, FromZeroes, FromBytes)]
643 struct CtrlVfdNewCtxNamed {
644 hdr: CtrlHeader,
645 id: Le32,
646 flags: Le32, // Ignored.
647 pfn: Le64, // Ignored.
648 size: Le32, // Ignored.
649 name: [u8; 32],
650 }
651
652 #[repr(C)]
653 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
654 #[cfg(feature = "minigbm")]
655 struct CtrlVfdNewDmabuf {
656 hdr: CtrlHeader,
657 id: Le32,
658 flags: Le32,
659 pfn: Le64,
660 size: Le32,
661 width: Le32,
662 height: Le32,
663 format: Le32,
664 stride0: Le32,
665 stride1: Le32,
666 stride2: Le32,
667 offset0: Le32,
668 offset1: Le32,
669 offset2: Le32,
670 }
671
672 #[cfg(feature = "minigbm")]
673 #[repr(C)]
674 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
675 struct CtrlVfdDmabufSync {
676 hdr: CtrlHeader,
677 id: Le32,
678 flags: Le32,
679 }
680
681 #[repr(C)]
682 #[derive(Copy, Clone, AsBytes, FromZeroes, FromBytes)]
683 struct CtrlVfdRecv {
684 hdr: CtrlHeader,
685 id: Le32,
686 vfd_count: Le32,
687 }
688
689 #[repr(C)]
690 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
691 struct CtrlVfd {
692 hdr: CtrlHeader,
693 id: Le32,
694 }
695
696 #[repr(C)]
697 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
698 struct CtrlVfdSend {
699 hdr: CtrlHeader,
700 id: Le32,
701 vfd_count: Le32,
702 // Remainder is an array of vfd_count IDs followed by data.
703 }
704
705 #[repr(C)]
706 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
707 struct CtrlVfdSendVfd {
708 kind: Le32,
709 id: Le32,
710 }
711
712 #[repr(C)]
713 #[derive(Copy, Clone, FromZeroes, FromBytes)]
714 union CtrlVfdSendVfdV2Payload {
715 id: Le32,
716 seqno: Le64,
717 }
718
719 #[repr(C)]
720 #[derive(Copy, Clone, FromZeroes, FromBytes)]
721 struct CtrlVfdSendVfdV2 {
722 kind: Le32,
723 payload: CtrlVfdSendVfdV2Payload,
724 }
725
726 impl CtrlVfdSendVfdV2 {
id(&self) -> Le32727 fn id(&self) -> Le32 {
728 assert!(
729 self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
730 || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
731 );
732 // SAFETY: trivially safe given we assert kind
733 unsafe { self.payload.id }
734 }
735 #[cfg(feature = "gpu")]
seqno(&self) -> Le64736 fn seqno(&self) -> Le64 {
737 assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
738 // SAFETY: trivially safe given we assert kind
739 unsafe { self.payload.seqno }
740 }
741 }
742
743 #[derive(Debug)]
744 #[allow(dead_code)]
745 enum WlResp<'a> {
746 Ok,
747 VfdNew {
748 id: u32,
749 flags: u32,
750 pfn: u64,
751 size: u32,
752 // The VfdNew variant can be either a response or a command depending on this `resp`. This
753 // is important for the `get_code` method.
754 resp: bool,
755 },
756 #[cfg(feature = "minigbm")]
757 VfdNewDmabuf {
758 id: u32,
759 flags: u32,
760 pfn: u64,
761 size: u32,
762 desc: GpuMemoryDesc,
763 },
764 VfdRecv {
765 id: u32,
766 data: &'a [u8],
767 vfds: &'a [u32],
768 },
769 VfdHup {
770 id: u32,
771 },
772 Err(Box<dyn StdError>),
773 OutOfMemory,
774 InvalidId,
775 InvalidType,
776 InvalidFlags,
777 InvalidCommand,
778 }
779
780 impl<'a> WlResp<'a> {
get_code(&self) -> u32781 fn get_code(&self) -> u32 {
782 match *self {
783 WlResp::Ok => VIRTIO_WL_RESP_OK,
784 WlResp::VfdNew { resp, .. } => {
785 if resp {
786 VIRTIO_WL_RESP_VFD_NEW
787 } else {
788 VIRTIO_WL_CMD_VFD_NEW
789 }
790 }
791 #[cfg(feature = "minigbm")]
792 WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
793 WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
794 WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
795 WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
796 WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
797 WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
798 WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
799 WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
800 WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
801 }
802 }
803 }
804
805 #[derive(Default)]
806 struct WlVfd {
807 socket: Option<ScmSocket<UnixStream>>,
808 guest_shared_memory: Option<SharedMemory>,
809 remote_pipe: Option<File>,
810 local_pipe: Option<(u32 /* flags */, File)>,
811 slot: Option<(u64 /* offset */, VmRequester)>,
812 #[cfg(feature = "minigbm")]
813 is_dmabuf: bool,
814 #[cfg(feature = "minigbm")]
815 map_info: u32,
816 fence: Option<File>,
817 is_fence: bool,
818 }
819
820 impl fmt::Debug for WlVfd {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result821 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
822 write!(f, "WlVfd {{")?;
823 if let Some(s) = &self.socket {
824 write!(f, " socket: {}", s.as_raw_descriptor())?;
825 }
826 if let Some((offset, _)) = &self.slot {
827 write!(f, " offset: {}", offset)?;
828 }
829 if let Some(s) = &self.remote_pipe {
830 write!(f, " remote: {}", s.as_raw_descriptor())?;
831 }
832 if let Some((_, s)) = &self.local_pipe {
833 write!(f, " local: {}", s.as_raw_descriptor())?;
834 }
835 write!(f, " }}")
836 }
837 }
838
839 #[cfg(feature = "minigbm")]
flush_shared_memory(shared_memory: &SharedMemory) -> Result<()>840 fn flush_shared_memory(shared_memory: &SharedMemory) -> Result<()> {
841 let mmap = match MemoryMappingBuilder::new(shared_memory.size as usize)
842 .from_shared_memory(shared_memory)
843 .build()
844 {
845 Ok(v) => v,
846 Err(_) => return Err(Error::new(EINVAL)),
847 };
848 if let Err(err) = mmap.flush_all() {
849 base::error!("failed to flush shared memory: {}", err);
850 return match err {
851 MmapError::NotImplemented(_) => Err(Error::new(ENOSYS)),
852 _ => Err(Error::new(EINVAL)),
853 };
854 }
855 Ok(())
856 }
857
858 impl WlVfd {
connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd>859 fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
860 let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
861 let mut vfd = WlVfd::default();
862 vfd.socket = Some(socket.try_into().map_err(WlError::SocketConnect)?);
863 Ok(vfd)
864 }
865
allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd>866 fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
867 let size_page_aligned = round_up_to_page_size(size as usize) as u64;
868 let vfd_shm =
869 SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
870
871 let offset = vm.register_shmem(&vfd_shm)?;
872
873 let mut vfd = WlVfd::default();
874 vfd.guest_shared_memory = Some(vfd_shm);
875 vfd.slot = Some((offset, vm));
876 Ok(vfd)
877 }
878
879 #[cfg(feature = "minigbm")]
dmabuf( vm: VmRequester, width: u32, height: u32, format: u32, ) -> WlResult<(WlVfd, GpuMemoryDesc)>880 fn dmabuf(
881 vm: VmRequester,
882 width: u32,
883 height: u32,
884 format: u32,
885 ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
886 let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
887 let mut vfd = WlVfd::default();
888 let vfd_shm =
889 SharedMemory::from_safe_descriptor(desc, reqs.size).map_err(WlError::NewAlloc)?;
890
891 let mut desc = GpuMemoryDesc::default();
892 for i in 0..3 {
893 desc.planes[i] = GpuMemoryPlaneDesc {
894 stride: reqs.strides[i],
895 offset: reqs.offsets[i],
896 }
897 }
898
899 vfd.guest_shared_memory = Some(vfd_shm);
900 vfd.slot = Some((offset, vm));
901 vfd.is_dmabuf = true;
902 vfd.map_info = reqs.map_info;
903 Ok((vfd, desc))
904 }
905
906 #[cfg(feature = "minigbm")]
dmabuf_sync(&self, flags: u32) -> WlResult<()>907 fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
908 if !self.is_dmabuf {
909 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
910 }
911
912 match &self.guest_shared_memory {
913 Some(descriptor) => {
914 let sync = dma_buf_sync {
915 flags: flags as u64,
916 };
917 // SAFETY:
918 // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
919 if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC, &sync) } < 0 {
920 return Err(WlError::DmabufSync(io::Error::last_os_error()));
921 }
922
923 // virtio-wl kernel driver always maps dmabufs with WB memory type, regardless of
924 // the host memory type (which is wrong). However, to avoid changing the protocol,
925 // assume that all guest writes are cached and ensure clflush-like ops on all mapped
926 // cachelines if the host mapping is not cached.
927 const END_WRITE_MASK: u32 = DMA_BUF_SYNC_WRITE | DMA_BUF_SYNC_END;
928 if (flags & END_WRITE_MASK) == END_WRITE_MASK
929 && (self.map_info & RUTABAGA_MAP_CACHE_MASK) != RUTABAGA_MAP_CACHE_CACHED
930 {
931 if let Err(err) = flush_shared_memory(descriptor) {
932 base::warn!("failed to flush cached dmabuf mapping: {:?}", err);
933 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(
934 err.errno(),
935 )));
936 }
937 }
938 Ok(())
939 }
940 None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
941 }
942 }
943
pipe_remote_read_local_write() -> WlResult<WlVfd>944 fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
945 let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
946 let mut vfd = WlVfd::default();
947 vfd.remote_pipe = Some(read_pipe);
948 vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
949 Ok(vfd)
950 }
951
pipe_remote_write_local_read() -> WlResult<WlVfd>952 fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
953 let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
954 let mut vfd = WlVfd::default();
955 vfd.remote_pipe = Some(write_pipe);
956 vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
957 Ok(vfd)
958 }
959
from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd>960 fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
961 // We need to determine if the given file is more like shared memory or a pipe/socket. A
962 // quick and easy check is to seek to the end of the file. If it works we assume it's not a
963 // pipe/socket because those have no end. We can even use that seek location as an indicator
964 // for how big the shared memory chunk to map into guest memory is. If seeking to the end
965 // fails, we assume it's a socket or pipe with read/write semantics.
966 if descriptor.seek(SeekFrom::End(0)).is_ok() {
967 let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
968 let offset = vm.register_shmem(&shm)?;
969
970 let mut vfd = WlVfd::default();
971 vfd.guest_shared_memory = Some(shm);
972 vfd.slot = Some((offset, vm));
973 Ok(vfd)
974 } else if is_fence(&descriptor) {
975 let mut vfd = WlVfd::default();
976 vfd.is_fence = true;
977 vfd.fence = Some(descriptor);
978 Ok(vfd)
979 } else {
980 let flags = match FileFlags::from_file(&descriptor) {
981 Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
982 Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
983 Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
984 _ => 0,
985 };
986 let mut vfd = WlVfd::default();
987 vfd.local_pipe = Some((flags, descriptor));
988 Ok(vfd)
989 }
990 }
991
flags(&self, use_transition_flags: bool) -> u32992 fn flags(&self, use_transition_flags: bool) -> u32 {
993 let mut flags = 0;
994 if use_transition_flags {
995 if self.socket.is_some() {
996 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
997 }
998 if let Some((f, _)) = self.local_pipe {
999 flags |= f;
1000 }
1001 if self.is_fence {
1002 flags |= VIRTIO_WL_VFD_FENCE;
1003 }
1004 } else {
1005 if self.socket.is_some() {
1006 flags |= VIRTIO_WL_VFD_CONTROL;
1007 }
1008 if self.slot.is_some() {
1009 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
1010 }
1011 }
1012 flags
1013 }
1014
1015 // Offset within the shared memory region this VFD was mapped at.
offset(&self) -> Option<u64>1016 fn offset(&self) -> Option<u64> {
1017 self.slot.as_ref().map(|s| s.0)
1018 }
1019
1020 // Size in bytes of the shared memory VFD.
size(&self) -> Option<u64>1021 fn size(&self) -> Option<u64> {
1022 self.guest_shared_memory.as_ref().map(|shm| shm.size())
1023 }
1024
1025 // The descriptor that gets sent if this VFD is sent over a socket.
send_descriptor(&self) -> Option<RawDescriptor>1026 fn send_descriptor(&self) -> Option<RawDescriptor> {
1027 self.guest_shared_memory
1028 .as_ref()
1029 .map(|shm| shm.as_raw_descriptor())
1030 .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
1031 .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
1032 .or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
1033 }
1034
1035 // The FD that is used for polling for events on this VFD.
wait_descriptor(&self) -> Option<&dyn AsRawDescriptor>1036 fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
1037 self.socket
1038 .as_ref()
1039 .map(|s| s as &dyn AsRawDescriptor)
1040 .or_else(|| {
1041 self.local_pipe
1042 .as_ref()
1043 .map(|(_, p)| p as &dyn AsRawDescriptor)
1044 })
1045 .or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
1046 }
1047
1048 // Sends data/files from the guest to the host over this VFD.
send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp>1049 fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
1050 if let Some(socket) = &self.socket {
1051 socket
1052 .send_vectored_with_fds(&data.get_remaining(), rds)
1053 .map_err(WlError::SendVfd)?;
1054 // All remaining data in `data` is now considered consumed.
1055 data.consume(usize::MAX);
1056 Ok(WlResp::Ok)
1057 } else if let Some((_, local_pipe)) = &mut self.local_pipe {
1058 // Impossible to send descriptors over a simple pipe.
1059 if !rds.is_empty() {
1060 return Ok(WlResp::InvalidType);
1061 }
1062 data.read_to(local_pipe, usize::MAX)
1063 .map_err(WlError::WritePipe)?;
1064 Ok(WlResp::Ok)
1065 } else {
1066 Ok(WlResp::InvalidType)
1067 }
1068 }
1069
1070 // Receives data/files from the host for this VFD and queues it for the guest.
recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>>1071 fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
1072 if let Some(socket) = self.socket.take() {
1073 let mut buf = vec![0; IN_BUFFER_LEN];
1074 // If any errors happen, the socket will get dropped, preventing more reading.
1075 let (len, descriptors) = socket
1076 .recv_with_fds(&mut buf, VIRTWL_SEND_MAX_ALLOCS)
1077 .map_err(WlError::RecvVfd)?;
1078 // If any data gets read, the put the socket back for future recv operations.
1079 if len != 0 || !descriptors.is_empty() {
1080 buf.truncate(len);
1081 buf.shrink_to_fit();
1082 self.socket = Some(socket);
1083 in_file_queue.extend(descriptors.into_iter().map(File::from));
1084 return Ok(buf);
1085 }
1086 Ok(Vec::new())
1087 } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
1088 let mut buf = vec![0; IN_BUFFER_LEN];
1089 let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
1090 if len != 0 {
1091 buf.truncate(len);
1092 buf.shrink_to_fit();
1093 self.local_pipe = Some((flags, local_pipe));
1094 return Ok(buf);
1095 }
1096 Ok(Vec::new())
1097 } else {
1098 Ok(Vec::new())
1099 }
1100 }
1101
1102 // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
1103 // up events.
close_remote(&mut self)1104 fn close_remote(&mut self) {
1105 self.remote_pipe = None;
1106 }
1107
close(&mut self) -> WlResult<()>1108 fn close(&mut self) -> WlResult<()> {
1109 if let Some((offset, vm)) = self.slot.take() {
1110 vm.unregister_memory(offset)?;
1111 }
1112 self.socket = None;
1113 self.remote_pipe = None;
1114 self.local_pipe = None;
1115 Ok(())
1116 }
1117 }
1118
1119 impl Drop for WlVfd {
drop(&mut self)1120 fn drop(&mut self) {
1121 let _ = self.close();
1122 }
1123 }
1124
1125 #[derive(Debug)]
1126 enum WlRecv {
1127 Vfd { id: u32 },
1128 Data { buf: Vec<u8> },
1129 Hup,
1130 }
1131
1132 pub struct WlState {
1133 wayland_paths: BTreeMap<String, PathBuf>,
1134 vm: VmRequester,
1135 resource_bridge: Option<Tube>,
1136 use_transition_flags: bool,
1137 wait_ctx: WaitContext<u32>,
1138 vfds: BTreeMap<u32, WlVfd>,
1139 next_vfd_id: u32,
1140 in_file_queue: Vec<File>,
1141 in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
1142 current_recv_vfd: Option<u32>,
1143 recv_vfds: Vec<u32>,
1144 #[cfg(feature = "gpu")]
1145 signaled_fence: Option<SafeDescriptor>,
1146 use_send_vfd_v2: bool,
1147 address_offset: Option<u64>,
1148 }
1149
1150 impl WlState {
1151 /// Create a new `WlState` instance for running a virtio-wl device.
new( wayland_paths: BTreeMap<String, PathBuf>, mapper: Box<dyn SharedMemoryMapper>, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, address_offset: Option<u64>, ) -> WlState1152 pub fn new(
1153 wayland_paths: BTreeMap<String, PathBuf>,
1154 mapper: Box<dyn SharedMemoryMapper>,
1155 use_transition_flags: bool,
1156 use_send_vfd_v2: bool,
1157 resource_bridge: Option<Tube>,
1158 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
1159 address_offset: Option<u64>,
1160 ) -> WlState {
1161 WlState {
1162 wayland_paths,
1163 vm: VmRequester::new(
1164 mapper,
1165 #[cfg(feature = "minigbm")]
1166 gralloc,
1167 ),
1168 resource_bridge,
1169 wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
1170 use_transition_flags,
1171 vfds: BTreeMap::new(),
1172 next_vfd_id: NEXT_VFD_ID_BASE,
1173 in_file_queue: Vec::new(),
1174 in_queue: VecDeque::new(),
1175 current_recv_vfd: None,
1176 recv_vfds: Vec::new(),
1177 #[cfg(feature = "gpu")]
1178 signaled_fence: None,
1179 use_send_vfd_v2,
1180 address_offset,
1181 }
1182 }
1183
1184 /// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
1185 /// long-term solution is to replace the WaitContext completely by spawning async workers
1186 /// instead.
wait_ctx(&self) -> &WaitContext<u32>1187 pub fn wait_ctx(&self) -> &WaitContext<u32> {
1188 &self.wait_ctx
1189 }
1190
new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp>1191 fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
1192 if id & VFD_ID_HOST_MASK != 0 {
1193 return Ok(WlResp::InvalidId);
1194 }
1195
1196 if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
1197 return Ok(WlResp::InvalidFlags);
1198 }
1199
1200 if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
1201 return Ok(WlResp::InvalidFlags);
1202 }
1203
1204 match self.vfds.entry(id) {
1205 Entry::Vacant(entry) => {
1206 let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
1207 WlVfd::pipe_remote_read_local_write()?
1208 } else if flags & VIRTIO_WL_VFD_READ != 0 {
1209 WlVfd::pipe_remote_write_local_read()?
1210 } else {
1211 return Ok(WlResp::InvalidFlags);
1212 };
1213 self.wait_ctx
1214 .add(vfd.wait_descriptor().unwrap(), id)
1215 .map_err(WlError::WaitContextAdd)?;
1216 let resp = WlResp::VfdNew {
1217 id,
1218 flags: 0,
1219 pfn: 0,
1220 size: 0,
1221 resp: true,
1222 };
1223 entry.insert(vfd);
1224 Ok(resp)
1225 }
1226 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1227 }
1228 }
1229
new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp>1230 fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
1231 if id & VFD_ID_HOST_MASK != 0 {
1232 return Ok(WlResp::InvalidId);
1233 }
1234
1235 if self.use_transition_flags {
1236 if flags != 0 {
1237 return Ok(WlResp::InvalidFlags);
1238 }
1239 } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
1240 return Ok(WlResp::Err(Box::from("invalid flags")));
1241 }
1242
1243 if self.vfds.contains_key(&id) {
1244 return Ok(WlResp::InvalidId);
1245 }
1246 let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
1247 let resp = WlResp::VfdNew {
1248 id,
1249 flags,
1250 pfn: self.compute_pfn(&vfd.offset()),
1251 size: vfd.size().unwrap_or_default() as u32,
1252 resp: true,
1253 };
1254 self.vfds.insert(id, vfd);
1255 Ok(resp)
1256 }
1257
1258 #[cfg(feature = "minigbm")]
new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp>1259 fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
1260 if id & VFD_ID_HOST_MASK != 0 {
1261 return Ok(WlResp::InvalidId);
1262 }
1263
1264 if self.vfds.contains_key(&id) {
1265 return Ok(WlResp::InvalidId);
1266 }
1267 let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
1268 let resp = WlResp::VfdNewDmabuf {
1269 id,
1270 flags: 0,
1271 pfn: self.compute_pfn(&vfd.offset()),
1272 size: vfd.size().unwrap_or_default() as u32,
1273 desc,
1274 };
1275 self.vfds.insert(id, vfd);
1276 Ok(resp)
1277 }
1278
1279 #[cfg(feature = "minigbm")]
dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp>1280 fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1281 if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1282 return Ok(WlResp::InvalidFlags);
1283 }
1284
1285 match self.vfds.get_mut(&vfd_id) {
1286 Some(vfd) => {
1287 vfd.dmabuf_sync(flags)?;
1288 Ok(WlResp::Ok)
1289 }
1290 None => Ok(WlResp::InvalidId),
1291 }
1292 }
1293
new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp>1294 fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1295 if id & VFD_ID_HOST_MASK != 0 {
1296 return Ok(WlResp::InvalidId);
1297 }
1298
1299 let flags = if self.use_transition_flags {
1300 VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1301 } else {
1302 VIRTIO_WL_VFD_CONTROL
1303 };
1304
1305 match self.vfds.entry(id) {
1306 Entry::Vacant(entry) => {
1307 let vfd = entry.insert(WlVfd::connect(
1308 self.wayland_paths
1309 .get(name)
1310 .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1311 )?);
1312 self.wait_ctx
1313 .add(vfd.wait_descriptor().unwrap(), id)
1314 .map_err(WlError::WaitContextAdd)?;
1315 Ok(WlResp::VfdNew {
1316 id,
1317 flags,
1318 pfn: 0,
1319 size: 0,
1320 resp: true,
1321 })
1322 }
1323 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1324 }
1325 }
1326
process_wait_context(&mut self)1327 fn process_wait_context(&mut self) {
1328 let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1329 Ok(v) => v,
1330 Err(e) => {
1331 error!("failed waiting for vfd evens: {}", e);
1332 return;
1333 }
1334 };
1335
1336 for event in events.iter().filter(|e| e.is_readable) {
1337 if let Err(e) = self.recv(event.token) {
1338 error!("failed to recv from vfd: {}", e)
1339 }
1340 }
1341
1342 for event in events.iter().filter(|e| e.is_hungup) {
1343 if !event.is_readable {
1344 let vfd_id = event.token;
1345 if let Some(descriptor) =
1346 self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1347 {
1348 if let Err(e) = self.wait_ctx.delete(descriptor) {
1349 warn!("failed to remove hungup vfd from poll context: {}", e);
1350 }
1351 }
1352 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1353 }
1354 }
1355 }
1356
close(&mut self, vfd_id: u32) -> WlResult<WlResp>1357 fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1358 let mut to_delete = BTreeSet::new();
1359 for (dest_vfd_id, q) in &self.in_queue {
1360 if *dest_vfd_id == vfd_id {
1361 if let WlRecv::Vfd { id } = q {
1362 to_delete.insert(*id);
1363 }
1364 }
1365 }
1366 for vfd_id in to_delete {
1367 // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1368 let _ = self.close(vfd_id);
1369 }
1370 match self.vfds.remove(&vfd_id) {
1371 Some(mut vfd) => {
1372 self.in_queue.retain(|&(id, _)| id != vfd_id);
1373 vfd.close()?;
1374 Ok(WlResp::Ok)
1375 }
1376 None => Ok(WlResp::InvalidId),
1377 }
1378 }
1379
1380 #[cfg(feature = "gpu")]
get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor>1381 fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
1382 let sock = self.resource_bridge.as_ref().unwrap();
1383 match get_resource_info(sock, request) {
1384 Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
1385 Ok(ResourceInfo::Fence { handle }) => Some(handle),
1386 Err(ResourceBridgeError::InvalidResource(req)) => {
1387 warn!("attempt to send non-existent gpu resource {}", req);
1388 None
1389 }
1390 Err(e) => {
1391 error!("{}", e);
1392 // If there was an error with the resource bridge, it can no longer be
1393 // trusted to continue to function.
1394 self.resource_bridge = None;
1395 None
1396 }
1397 }
1398 }
1399
send( &mut self, vfd_id: u32, vfd_count: usize, foreign_id: bool, reader: &mut Reader, ) -> WlResult<WlResp>1400 fn send(
1401 &mut self,
1402 vfd_id: u32,
1403 vfd_count: usize,
1404 foreign_id: bool,
1405 reader: &mut Reader,
1406 ) -> WlResult<WlResp> {
1407 // First stage gathers and normalizes all id information from guest memory.
1408 let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1409 kind: Le32::from(0),
1410 payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1411 }; VIRTWL_SEND_MAX_ALLOCS];
1412 for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1413 *vfd_id = if foreign_id {
1414 if self.use_send_vfd_v2 {
1415 reader.read_obj().map_err(WlError::ParseDesc)?
1416 } else {
1417 let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1418 CtrlVfdSendVfdV2 {
1419 kind: vfd.kind,
1420 payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1421 }
1422 }
1423 } else {
1424 CtrlVfdSendVfdV2 {
1425 kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1426 payload: CtrlVfdSendVfdV2Payload {
1427 id: reader.read_obj().map_err(WlError::ParseDesc)?,
1428 },
1429 }
1430 };
1431 }
1432
1433 // Next stage collects corresponding file descriptors for each id.
1434 let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1435 #[cfg(feature = "gpu")]
1436 let mut bridged_files = Vec::new();
1437 for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1438 match send_vfd_id.kind.to_native() {
1439 VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1440 match self.vfds.get(&send_vfd_id.id().to_native()) {
1441 Some(vfd) => match vfd.send_descriptor() {
1442 Some(vfd_fd) => *descriptor = vfd_fd,
1443 None => return Ok(WlResp::InvalidType),
1444 },
1445 None => {
1446 warn!(
1447 "attempt to send non-existant vfd 0x{:08x}",
1448 send_vfd_id.id().to_native()
1449 );
1450 return Ok(WlResp::InvalidId);
1451 }
1452 }
1453 }
1454 #[cfg(feature = "gpu")]
1455 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1456 match self.get_info(ResourceRequest::GetBuffer {
1457 id: send_vfd_id.id().to_native(),
1458 }) {
1459 Some(handle) => {
1460 *descriptor = handle.as_raw_descriptor();
1461 bridged_files.push(handle.into());
1462 }
1463 None => return Ok(WlResp::InvalidId),
1464 }
1465 }
1466 #[cfg(feature = "gpu")]
1467 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1468 match self.get_info(ResourceRequest::GetFence {
1469 seqno: send_vfd_id.seqno().to_native(),
1470 }) {
1471 Some(handle) => {
1472 *descriptor = handle.as_raw_descriptor();
1473 bridged_files.push(handle.into());
1474 }
1475 None => return Ok(WlResp::InvalidId),
1476 }
1477 }
1478 #[cfg(feature = "gpu")]
1479 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1480 if self.resource_bridge.is_some() =>
1481 {
1482 if self.signaled_fence.is_none() {
1483 // If the guest is sending a signaled fence, we know a fence
1484 // with seqno 0 must already be signaled.
1485 match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1486 Some(handle) => self.signaled_fence = Some(handle),
1487 None => return Ok(WlResp::InvalidId),
1488 }
1489 }
1490 match self.signaled_fence.as_ref().unwrap().try_clone() {
1491 Ok(dup) => {
1492 *descriptor = dup.into_raw_descriptor();
1493 // SAFETY:
1494 // Safe because the fd comes from a valid SafeDescriptor.
1495 let file: File = unsafe {
1496 base::FromRawDescriptor::from_raw_descriptor(*descriptor)
1497 };
1498 bridged_files.push(file);
1499 }
1500 Err(_) => return Ok(WlResp::InvalidId),
1501 }
1502 }
1503 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1504 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1505 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1506 let _ = self.resource_bridge.as_ref();
1507 warn!("attempt to send foreign resource kind but feature is disabled");
1508 }
1509 kind => {
1510 warn!("attempt to send unknown foreign resource kind: {}", kind);
1511 return Ok(WlResp::InvalidId);
1512 }
1513 }
1514 }
1515
1516 // Final stage sends file descriptors and data to the target vfd's socket.
1517 match self.vfds.get_mut(&vfd_id) {
1518 Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1519 WlResp::Ok => {}
1520 _ => return Ok(WlResp::InvalidType),
1521 },
1522 None => return Ok(WlResp::InvalidId),
1523 }
1524 // The vfds with remote FDs need to be closed so that the local side can receive
1525 // hangup events.
1526 for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1527 if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1528 if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1529 vfd.close_remote();
1530 }
1531 }
1532 }
1533 Ok(WlResp::Ok)
1534 }
1535
recv(&mut self, vfd_id: u32) -> WlResult<()>1536 fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1537 let buf = match self.vfds.get_mut(&vfd_id) {
1538 Some(vfd) => {
1539 if vfd.is_fence {
1540 if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
1541 warn!("failed to remove hungup vfd from poll context: {}", e);
1542 }
1543 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1544 return Ok(());
1545 } else {
1546 vfd.recv(&mut self.in_file_queue)?
1547 }
1548 }
1549 None => return Ok(()),
1550 };
1551
1552 if self.in_file_queue.is_empty() && buf.is_empty() {
1553 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1554 return Ok(());
1555 }
1556 for file in self.in_file_queue.drain(..) {
1557 let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1558 if let Some(wait_descriptor) = vfd.wait_descriptor() {
1559 self.wait_ctx
1560 .add(wait_descriptor, self.next_vfd_id)
1561 .map_err(WlError::WaitContextAdd)?;
1562 }
1563 // Only necessary if we somehow wrap the id counter. The try_insert
1564 // API would be nicer, but that's currently experimental.
1565 while self.vfds.contains_key(&self.next_vfd_id) {
1566 self.next_vfd_id += 1;
1567 }
1568 self.vfds.insert(self.next_vfd_id, vfd);
1569 self.in_queue.push_back((
1570 vfd_id,
1571 WlRecv::Vfd {
1572 id: self.next_vfd_id,
1573 },
1574 ));
1575 self.next_vfd_id += 1;
1576 }
1577 self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1578
1579 Ok(())
1580 }
1581
execute(&mut self, reader: &mut Reader) -> WlResult<WlResp>1582 fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1583 let type_: Le32 = reader.peek_obj::<Le32>().map_err(WlError::ParseDesc)?;
1584 match type_.into() {
1585 VIRTIO_WL_CMD_VFD_NEW => {
1586 let ctrl = reader
1587 .read_obj::<CtrlVfdNew>()
1588 .map_err(WlError::ParseDesc)?;
1589 self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1590 }
1591 VIRTIO_WL_CMD_VFD_CLOSE => {
1592 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1593 self.close(ctrl.id.into())
1594 }
1595 VIRTIO_WL_CMD_VFD_SEND => {
1596 let ctrl = reader
1597 .read_obj::<CtrlVfdSend>()
1598 .map_err(WlError::ParseDesc)?;
1599 let foreign_id = false;
1600 self.send(
1601 ctrl.id.into(),
1602 ctrl.vfd_count.to_native() as usize,
1603 foreign_id,
1604 reader,
1605 )
1606 }
1607 #[cfg(feature = "gpu")]
1608 VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1609 let ctrl = reader
1610 .read_obj::<CtrlVfdSend>()
1611 .map_err(WlError::ParseDesc)?;
1612 let foreign_id = true;
1613 self.send(
1614 ctrl.id.into(),
1615 ctrl.vfd_count.to_native() as usize,
1616 foreign_id,
1617 reader,
1618 )
1619 }
1620 VIRTIO_WL_CMD_VFD_NEW_CTX => {
1621 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1622 self.new_context(ctrl.id.into(), "")
1623 }
1624 VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1625 let ctrl = reader
1626 .read_obj::<CtrlVfdNew>()
1627 .map_err(WlError::ParseDesc)?;
1628 self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1629 }
1630 #[cfg(feature = "minigbm")]
1631 VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1632 let ctrl = reader
1633 .read_obj::<CtrlVfdNewDmabuf>()
1634 .map_err(WlError::ParseDesc)?;
1635 self.new_dmabuf(
1636 ctrl.id.into(),
1637 ctrl.width.into(),
1638 ctrl.height.into(),
1639 ctrl.format.into(),
1640 )
1641 }
1642 #[cfg(feature = "minigbm")]
1643 VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1644 let ctrl = reader
1645 .read_obj::<CtrlVfdDmabufSync>()
1646 .map_err(WlError::ParseDesc)?;
1647 self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1648 }
1649 VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1650 let ctrl = reader
1651 .read_obj::<CtrlVfdNewCtxNamed>()
1652 .map_err(WlError::ParseDesc)?;
1653 let name_len = ctrl
1654 .name
1655 .iter()
1656 .position(|x| x == &0)
1657 .unwrap_or(ctrl.name.len());
1658 let name =
1659 std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1660 self.new_context(ctrl.id.into(), name)
1661 }
1662 op_type => {
1663 warn!("unexpected command {}", op_type);
1664 Ok(WlResp::InvalidCommand)
1665 }
1666 }
1667 }
1668
next_recv(&self) -> Option<WlResp>1669 fn next_recv(&self) -> Option<WlResp> {
1670 if let Some(q) = self.in_queue.front() {
1671 match *q {
1672 (vfd_id, WlRecv::Vfd { id }) => {
1673 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1674 match self.vfds.get(&id) {
1675 Some(vfd) => Some(WlResp::VfdNew {
1676 id,
1677 flags: vfd.flags(self.use_transition_flags),
1678 pfn: self.compute_pfn(&vfd.offset()),
1679 size: vfd.size().unwrap_or_default() as u32,
1680 resp: false,
1681 }),
1682 _ => Some(WlResp::VfdNew {
1683 id,
1684 flags: 0,
1685 pfn: 0,
1686 size: 0,
1687 resp: false,
1688 }),
1689 }
1690 } else {
1691 Some(WlResp::VfdRecv {
1692 id: self.current_recv_vfd.unwrap(),
1693 data: &[],
1694 vfds: &self.recv_vfds[..],
1695 })
1696 }
1697 }
1698 (vfd_id, WlRecv::Data { ref buf }) => {
1699 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1700 Some(WlResp::VfdRecv {
1701 id: vfd_id,
1702 data: &buf[..],
1703 vfds: &self.recv_vfds[..],
1704 })
1705 } else {
1706 Some(WlResp::VfdRecv {
1707 id: self.current_recv_vfd.unwrap(),
1708 data: &[],
1709 vfds: &self.recv_vfds[..],
1710 })
1711 }
1712 }
1713 (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1714 }
1715 } else {
1716 None
1717 }
1718 }
1719
pop_recv(&mut self)1720 fn pop_recv(&mut self) {
1721 if let Some(q) = self.in_queue.front() {
1722 match *q {
1723 (vfd_id, WlRecv::Vfd { id }) => {
1724 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1725 self.recv_vfds.push(id);
1726 self.current_recv_vfd = Some(vfd_id);
1727 } else {
1728 self.recv_vfds.clear();
1729 self.current_recv_vfd = None;
1730 return;
1731 }
1732 }
1733 (vfd_id, WlRecv::Data { .. }) => {
1734 self.recv_vfds.clear();
1735 self.current_recv_vfd = None;
1736 if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1737 return;
1738 }
1739 }
1740 (_, WlRecv::Hup) => {
1741 self.recv_vfds.clear();
1742 self.current_recv_vfd = None;
1743 }
1744 }
1745 }
1746 self.in_queue.pop_front();
1747 }
1748
compute_pfn(&self, offset: &Option<u64>) -> u641749 fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
1750 let addr = match (offset, self.address_offset) {
1751 (Some(o), Some(address_offset)) => o + address_offset,
1752 (Some(o), None) => *o,
1753 // without shmem, 0 is the special address for "no_pfn"
1754 (None, Some(_)) => 0,
1755 // with shmem, WL_SHMEM_SIZE is the special address for "no_pfn"
1756 (None, None) => WL_SHMEM_SIZE,
1757 };
1758 addr >> VIRTIO_WL_PFN_SHIFT
1759 }
1760 }
1761
1762 #[derive(ThisError, Debug, PartialEq, Eq)]
1763 #[error("no descriptors available in queue")]
1764 pub struct DescriptorsExhausted;
1765
1766 /// Handle incoming events and forward them to the VM over the input queue.
process_in_queue( in_queue: &mut Queue, state: &mut WlState, ) -> ::std::result::Result<(), DescriptorsExhausted>1767 pub fn process_in_queue(
1768 in_queue: &mut Queue,
1769 state: &mut WlState,
1770 ) -> ::std::result::Result<(), DescriptorsExhausted> {
1771 state.process_wait_context();
1772
1773 let mut needs_interrupt = false;
1774 let mut exhausted_queue = false;
1775 loop {
1776 let mut desc = if let Some(d) = in_queue.peek() {
1777 d
1778 } else {
1779 exhausted_queue = true;
1780 break;
1781 };
1782
1783 let mut should_pop = false;
1784 if let Some(in_resp) = state.next_recv() {
1785 match encode_resp(&mut desc.writer, in_resp) {
1786 Ok(()) => {
1787 should_pop = true;
1788 }
1789 Err(e) => {
1790 error!("failed to encode response to descriptor chain: {}", e);
1791 }
1792 }
1793 let bytes_written = desc.writer.bytes_written() as u32;
1794 needs_interrupt = true;
1795 let desc = desc.pop();
1796 in_queue.add_used(desc, bytes_written);
1797 } else {
1798 break;
1799 }
1800 if should_pop {
1801 state.pop_recv();
1802 }
1803 }
1804
1805 if needs_interrupt {
1806 in_queue.trigger_interrupt();
1807 }
1808
1809 if exhausted_queue {
1810 Err(DescriptorsExhausted)
1811 } else {
1812 Ok(())
1813 }
1814 }
1815
1816 /// Handle messages from the output queue and forward them to the display sever, if necessary.
process_out_queue(out_queue: &mut Queue, state: &mut WlState)1817 pub fn process_out_queue(out_queue: &mut Queue, state: &mut WlState) {
1818 let mut needs_interrupt = false;
1819 while let Some(mut desc) = out_queue.pop() {
1820 let resp = match state.execute(&mut desc.reader) {
1821 Ok(r) => r,
1822 Err(e) => WlResp::Err(Box::new(e)),
1823 };
1824
1825 match encode_resp(&mut desc.writer, resp) {
1826 Ok(()) => {}
1827 Err(e) => {
1828 error!("failed to encode response to descriptor chain: {}", e);
1829 }
1830 }
1831
1832 let len = desc.writer.bytes_written() as u32;
1833 out_queue.add_used(desc, len);
1834 needs_interrupt = true;
1835 }
1836
1837 if needs_interrupt {
1838 out_queue.trigger_interrupt();
1839 }
1840 }
1841
1842 struct Worker {
1843 interrupt: Interrupt,
1844 in_queue: Queue,
1845 out_queue: Queue,
1846 state: WlState,
1847 }
1848
1849 impl Worker {
new( interrupt: Interrupt, in_queue: Queue, out_queue: Queue, wayland_paths: BTreeMap<String, PathBuf>, mapper: Box<dyn SharedMemoryMapper>, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, address_offset: Option<u64>, ) -> Worker1850 fn new(
1851 interrupt: Interrupt,
1852 in_queue: Queue,
1853 out_queue: Queue,
1854 wayland_paths: BTreeMap<String, PathBuf>,
1855 mapper: Box<dyn SharedMemoryMapper>,
1856 use_transition_flags: bool,
1857 use_send_vfd_v2: bool,
1858 resource_bridge: Option<Tube>,
1859 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
1860 address_offset: Option<u64>,
1861 ) -> Worker {
1862 Worker {
1863 interrupt,
1864 in_queue,
1865 out_queue,
1866 state: WlState::new(
1867 wayland_paths,
1868 mapper,
1869 use_transition_flags,
1870 use_send_vfd_v2,
1871 resource_bridge,
1872 #[cfg(feature = "minigbm")]
1873 gralloc,
1874 address_offset,
1875 ),
1876 }
1877 }
1878
run(mut self, kill_evt: Event) -> anyhow::Result<Vec<Queue>>1879 fn run(mut self, kill_evt: Event) -> anyhow::Result<Vec<Queue>> {
1880 #[derive(EventToken)]
1881 enum Token {
1882 InQueue,
1883 OutQueue,
1884 Kill,
1885 State,
1886 InterruptResample,
1887 }
1888
1889 let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
1890 (self.in_queue.event(), Token::InQueue),
1891 (self.out_queue.event(), Token::OutQueue),
1892 (&kill_evt, Token::Kill),
1893 (&self.state.wait_ctx, Token::State),
1894 ])
1895 .context("failed creating WaitContext")?;
1896
1897 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
1898 wait_ctx
1899 .add(resample_evt, Token::InterruptResample)
1900 .context("failed adding resample event to WaitContext.")?;
1901 }
1902
1903 let mut watching_state_ctx = true;
1904 'wait: loop {
1905 let events = match wait_ctx.wait() {
1906 Ok(v) => v,
1907 Err(e) => {
1908 error!("failed waiting for events: {}", e);
1909 break;
1910 }
1911 };
1912
1913 for event in &events {
1914 match event.token {
1915 Token::InQueue => {
1916 let _ = self.in_queue.event().wait();
1917 if !watching_state_ctx {
1918 if let Err(e) =
1919 wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
1920 {
1921 error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
1922 break;
1923 }
1924 watching_state_ctx = true;
1925 }
1926 }
1927 Token::OutQueue => {
1928 let _ = self.out_queue.event().wait();
1929 process_out_queue(&mut self.out_queue, &mut self.state);
1930 }
1931 Token::Kill => break 'wait,
1932 Token::State => {
1933 if let Err(DescriptorsExhausted) =
1934 process_in_queue(&mut self.in_queue, &mut self.state)
1935 {
1936 if let Err(e) =
1937 wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
1938 {
1939 error!(
1940 "Failed to stop watching wait_ctx descriptor for WlState: {}",
1941 e
1942 );
1943 break;
1944 }
1945 watching_state_ctx = false;
1946 }
1947 }
1948 Token::InterruptResample => {
1949 self.interrupt.interrupt_resample();
1950 }
1951 }
1952 }
1953 }
1954
1955 let in_queue = self.in_queue;
1956 let out_queue = self.out_queue;
1957
1958 Ok(vec![in_queue, out_queue])
1959 }
1960 }
1961
1962 pub struct Wl {
1963 worker_thread: Option<WorkerThread<anyhow::Result<Vec<Queue>>>>,
1964 wayland_paths: BTreeMap<String, PathBuf>,
1965 mapper: Option<Box<dyn SharedMemoryMapper>>,
1966 resource_bridge: Option<Tube>,
1967 base_features: u64,
1968 acked_features: u64,
1969 #[cfg(feature = "minigbm")]
1970 gralloc: Option<RutabagaGralloc>,
1971 address_offset: Option<u64>,
1972 }
1973
1974 impl Wl {
new( base_features: u64, wayland_paths: BTreeMap<String, PathBuf>, resource_bridge: Option<Tube>, ) -> Result<Wl>1975 pub fn new(
1976 base_features: u64,
1977 wayland_paths: BTreeMap<String, PathBuf>,
1978 resource_bridge: Option<Tube>,
1979 ) -> Result<Wl> {
1980 Ok(Wl {
1981 worker_thread: None,
1982 wayland_paths,
1983 mapper: None,
1984 resource_bridge,
1985 base_features,
1986 acked_features: 0,
1987 #[cfg(feature = "minigbm")]
1988 gralloc: None,
1989 address_offset: None,
1990 })
1991 }
1992 }
1993
1994 impl VirtioDevice for Wl {
keep_rds(&self) -> Vec<RawDescriptor>1995 fn keep_rds(&self) -> Vec<RawDescriptor> {
1996 let mut keep_rds = Vec::new();
1997
1998 if let Some(mapper) = &self.mapper {
1999 if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
2000 keep_rds.push(raw_descriptor);
2001 }
2002 }
2003 if let Some(resource_bridge) = &self.resource_bridge {
2004 keep_rds.push(resource_bridge.as_raw_descriptor());
2005 }
2006 keep_rds
2007 }
2008
2009 #[cfg(feature = "minigbm")]
on_device_sandboxed(&mut self)2010 fn on_device_sandboxed(&mut self) {
2011 // Gralloc initialization can cause some GPU drivers to create their own threads
2012 // and that must be done after sandboxing.
2013 match RutabagaGralloc::new(RutabagaGrallocBackendFlags::new()) {
2014 Ok(g) => self.gralloc = Some(g),
2015 Err(e) => {
2016 error!("failed to initialize gralloc {:?}", e);
2017 }
2018 };
2019 }
2020
device_type(&self) -> DeviceType2021 fn device_type(&self) -> DeviceType {
2022 DeviceType::Wl
2023 }
2024
queue_max_sizes(&self) -> &[u16]2025 fn queue_max_sizes(&self) -> &[u16] {
2026 QUEUE_SIZES
2027 }
2028
features(&self) -> u642029 fn features(&self) -> u64 {
2030 self.base_features
2031 | 1 << VIRTIO_WL_F_TRANS_FLAGS
2032 | 1 << VIRTIO_WL_F_SEND_FENCES
2033 | 1 << VIRTIO_WL_F_USE_SHMEM
2034 }
2035
ack_features(&mut self, value: u64)2036 fn ack_features(&mut self, value: u64) {
2037 self.acked_features |= value;
2038 }
2039
activate( &mut self, _mem: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>2040 fn activate(
2041 &mut self,
2042 _mem: GuestMemory,
2043 interrupt: Interrupt,
2044 mut queues: BTreeMap<usize, Queue>,
2045 ) -> anyhow::Result<()> {
2046 if queues.len() != QUEUE_SIZES.len() {
2047 return Err(anyhow!(
2048 "expected {} queues, got {}",
2049 QUEUE_SIZES.len(),
2050 queues.len()
2051 ));
2052 }
2053
2054 let mapper = self.mapper.take().context("missing mapper")?;
2055
2056 let wayland_paths = self.wayland_paths.clone();
2057 let use_transition_flags = self.acked_features & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0;
2058 let use_send_vfd_v2 = self.acked_features & (1 << VIRTIO_WL_F_SEND_FENCES) != 0;
2059 let use_shmem = self.acked_features & (1 << VIRTIO_WL_F_USE_SHMEM) != 0;
2060 let resource_bridge = self.resource_bridge.take();
2061 #[cfg(feature = "minigbm")]
2062 let gralloc = self
2063 .gralloc
2064 .take()
2065 .expect("gralloc already passed to worker");
2066 let address_offset = if !use_shmem {
2067 self.address_offset
2068 } else {
2069 None
2070 };
2071
2072 self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
2073 Worker::new(
2074 interrupt,
2075 queues.pop_first().unwrap().1,
2076 queues.pop_first().unwrap().1,
2077 wayland_paths,
2078 mapper,
2079 use_transition_flags,
2080 use_send_vfd_v2,
2081 resource_bridge,
2082 #[cfg(feature = "minigbm")]
2083 gralloc,
2084 address_offset,
2085 )
2086 .run(kill_evt)
2087 }));
2088
2089 Ok(())
2090 }
2091
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>2092 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
2093 Some(SharedMemoryRegion {
2094 id: WL_SHMEM_ID,
2095 length: WL_SHMEM_SIZE,
2096 })
2097 }
2098
set_shared_memory_region_base(&mut self, shmem_base: GuestAddress)2099 fn set_shared_memory_region_base(&mut self, shmem_base: GuestAddress) {
2100 self.address_offset = Some(shmem_base.0);
2101 }
2102
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)2103 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
2104 self.mapper = Some(mapper);
2105 }
2106
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>2107 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
2108 if let Some(worker_thread) = self.worker_thread.take() {
2109 let queues = worker_thread.stop()?;
2110 return Ok(Some(BTreeMap::from_iter(queues.into_iter().enumerate())));
2111 }
2112 Ok(None)
2113 }
2114
virtio_wake( &mut self, device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>2115 fn virtio_wake(
2116 &mut self,
2117 device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
2118 ) -> anyhow::Result<()> {
2119 match device_state {
2120 None => Ok(()),
2121 Some((mem, interrupt, queues)) => {
2122 // TODO: activate is just what we want at the moment, but we should probably move
2123 // it into a "start workers" function to make it obvious that it isn't strictly
2124 // used for activate events.
2125 self.activate(mem, interrupt, queues)?;
2126 Ok(())
2127 }
2128 }
2129 }
2130
2131 // ANDROID: Add empty implementations for successful snapshot taking. Change to full
2132 // implementation as part of b/266514618
2133 // virtio-wl is not used, but is created. As such, virtio_snapshot/restore will be called when
2134 // cuttlefish attempts to take a snapshot.
virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value>2135 fn virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
2136 Ok(serde_json::Value::Null)
2137 }
2138
virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>2139 fn virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
2140 anyhow::ensure!(
2141 data == serde_json::Value::Null,
2142 "unexpected snapshot data: should be null, got {}",
2143 data,
2144 );
2145 Ok(())
2146 }
2147 }
2148