1 // Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause 3 // 4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 // 6 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 7 // Use of this source code is governed by a BSD-style license that can be 8 // found in the LICENSE-BSD-Google file. 9 10 //! Common traits and structs for vhost-kern and vhost-user backend drivers. 11 12 use std::cell::RefCell; 13 use std::os::unix::io::AsRawFd; 14 use std::os::unix::io::RawFd; 15 use std::sync::RwLock; 16 17 use vm_memory::{bitmap::Bitmap, Address, GuestMemoryRegion, GuestRegionMmap}; 18 use vmm_sys_util::eventfd::EventFd; 19 20 #[cfg(feature = "vhost-user")] 21 use super::vhost_user::message::{VhostUserMemoryRegion, VhostUserSingleMemoryRegion}; 22 use super::{Error, Result}; 23 24 /// Maximum number of memory regions supported. 25 pub const VHOST_MAX_MEMORY_REGIONS: usize = 255; 26 27 /// Vring configuration data. 28 #[derive(Default, Clone, Copy)] 29 pub struct VringConfigData { 30 /// Maximum queue size supported by the driver. 31 pub queue_max_size: u16, 32 /// Actual queue size negotiated by the driver. 33 pub queue_size: u16, 34 /// Bitmask of vring flags. 35 pub flags: u32, 36 /// Descriptor table address. 37 pub desc_table_addr: u64, 38 /// Used ring buffer address. 39 pub used_ring_addr: u64, 40 /// Available ring buffer address. 41 pub avail_ring_addr: u64, 42 /// Optional address for logging. 43 pub log_addr: Option<u64>, 44 } 45 46 impl VringConfigData { 47 /// Check whether the log (flag, address) pair is valid. is_log_addr_valid(&self) -> bool48 pub fn is_log_addr_valid(&self) -> bool { 49 if self.flags & 0x1 != 0 && self.log_addr.is_none() { 50 return false; 51 } 52 53 true 54 } 55 56 /// Get the log address, default to zero if not available. get_log_addr(&self) -> u6457 pub fn get_log_addr(&self) -> u64 { 58 if self.flags & 0x1 != 0 && self.log_addr.is_some() { 59 self.log_addr.unwrap() 60 } else { 61 0 62 } 63 } 64 } 65 66 /// Memory region configuration data. 67 #[derive(Default, Clone, Copy)] 68 pub struct VhostUserMemoryRegionInfo { 69 /// Guest physical address of the memory region. 70 pub guest_phys_addr: u64, 71 /// Size of the memory region. 72 pub memory_size: u64, 73 /// Virtual address in the current process. 74 pub userspace_addr: u64, 75 /// Optional offset where region starts in the mapped memory. 76 pub mmap_offset: u64, 77 /// Optional file descriptor for mmap. 78 pub mmap_handle: RawFd, 79 80 #[cfg(feature = "xen")] 81 /// Xen specific flags. 82 pub xen_mmap_flags: u32, 83 84 #[cfg(feature = "xen")] 85 /// Xen specific data. 86 pub xen_mmap_data: u32, 87 } 88 89 impl VhostUserMemoryRegionInfo { 90 /// Creates Self from GuestRegionMmap. from_guest_region<B: Bitmap>(region: &GuestRegionMmap<B>) -> Result<Self>91 pub fn from_guest_region<B: Bitmap>(region: &GuestRegionMmap<B>) -> Result<Self> { 92 let file_offset = region 93 .file_offset() 94 .ok_or(Error::InvalidGuestMemoryRegion)?; 95 96 Ok(Self { 97 guest_phys_addr: region.start_addr().raw_value(), 98 memory_size: region.len(), 99 userspace_addr: region.as_ptr() as u64, 100 mmap_offset: file_offset.start(), 101 mmap_handle: file_offset.file().as_raw_fd(), 102 #[cfg(feature = "xen")] 103 xen_mmap_flags: region.xen_mmap_flags(), 104 #[cfg(feature = "xen")] 105 xen_mmap_data: region.xen_mmap_data(), 106 }) 107 } 108 109 /// Creates VhostUserMemoryRegion from Self. 110 #[cfg(feature = "vhost-user")] to_region(&self) -> VhostUserMemoryRegion111 pub fn to_region(&self) -> VhostUserMemoryRegion { 112 #[cfg(not(feature = "xen"))] 113 return VhostUserMemoryRegion::new( 114 self.guest_phys_addr, 115 self.memory_size, 116 self.userspace_addr, 117 self.mmap_offset, 118 ); 119 120 #[cfg(feature = "xen")] 121 VhostUserMemoryRegion::with_xen( 122 self.guest_phys_addr, 123 self.memory_size, 124 self.userspace_addr, 125 self.mmap_offset, 126 self.xen_mmap_flags, 127 self.xen_mmap_data, 128 ) 129 } 130 131 /// Creates VhostUserSingleMemoryRegion from Self. 132 #[cfg(feature = "vhost-user")] to_single_region(&self) -> VhostUserSingleMemoryRegion133 pub fn to_single_region(&self) -> VhostUserSingleMemoryRegion { 134 VhostUserSingleMemoryRegion::new( 135 self.guest_phys_addr, 136 self.memory_size, 137 self.userspace_addr, 138 self.mmap_offset, 139 #[cfg(feature = "xen")] 140 self.xen_mmap_flags, 141 #[cfg(feature = "xen")] 142 self.xen_mmap_data, 143 ) 144 } 145 } 146 147 /// Shared memory region data for logging dirty pages 148 #[derive(Default, Clone, Copy)] 149 pub struct VhostUserDirtyLogRegion { 150 /// Size of the shared memory region for logging dirty pages 151 pub mmap_size: u64, 152 /// Offset where region starts 153 pub mmap_offset: u64, 154 /// File descriptor for mmap 155 pub mmap_handle: RawFd, 156 } 157 158 /// Vhost memory access permission (VHOST_ACCESS_* mapping) 159 #[repr(u8)] 160 #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] 161 pub enum VhostAccess { 162 /// No access. 163 #[default] 164 No = 0, 165 /// Read-Only access. 166 ReadOnly = 1, 167 /// Write-Only access. 168 WriteOnly = 2, 169 /// Read and Write access. 170 ReadWrite = 3, 171 } 172 173 /// Vhost IOTLB message type (VHOST_IOTLB_* mapping) 174 #[repr(u8)] 175 #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] 176 pub enum VhostIotlbType { 177 /// Empty message (not valid). 178 #[default] 179 Empty = 0, 180 /// I/O virtual address mapping is missing or invalidated. 181 Miss = 1, 182 /// Update the I/O virtual address mapping. 183 Update = 2, 184 /// Invalidate the I/O virtual address mapping. 185 Invalidate = 3, 186 /// Access failed to an I/O virtual address. 187 AccessFail = 4, 188 /// Batch of multiple `Update` messages begins. 189 BatchBegin = 5, 190 /// Batch of multiple `Update` messages ends. 191 BatchEnd = 6, 192 } 193 194 /// Vhost IOTLB message structure. 195 #[derive(Default, Clone, Copy)] 196 pub struct VhostIotlbMsg { 197 /// I/O virtual address. 198 pub iova: u64, 199 /// Size of the I/O mapping. 200 pub size: u64, 201 /// Virtual address in the current process. 202 pub userspace_addr: u64, 203 /// Access permissions. 204 pub perm: VhostAccess, 205 /// Type of the message. 206 pub msg_type: VhostIotlbType, 207 } 208 209 /// Vhost IOTLB message parser. 210 pub trait VhostIotlbMsgParser { 211 /// Parse the IOTLB message and fill a VhostIotlbMsg. 212 /// 213 /// # Arguments 214 /// * `msg` - IOTLB message parsed. parse(&self, msg: &mut VhostIotlbMsg) -> Result<()>215 fn parse(&self, msg: &mut VhostIotlbMsg) -> Result<()>; 216 } 217 218 /// An interface for IOTLB messages support for vhost-based backend 219 pub trait VhostIotlbBackend: std::marker::Sized { 220 /// Send an IOTLB message to the vhost-based backend. 221 /// 222 /// # Arguments 223 /// * `msg` - IOTLB message to send. send_iotlb_msg(&self, msg: &VhostIotlbMsg) -> Result<()>224 fn send_iotlb_msg(&self, msg: &VhostIotlbMsg) -> Result<()>; 225 } 226 227 /// An interface for setting up vhost-based backend drivers with interior mutability. 228 /// 229 /// Vhost devices are subset of virtio devices, which improve virtio device's performance by 230 /// delegating data plane operations to dedicated IO service processes. Vhost devices use the 231 /// same virtqueue layout as virtio devices to allow vhost devices to be mapped directly to 232 /// virtio devices. 233 /// 234 /// The purpose of vhost is to implement a subset of a virtio device's functionality outside the 235 /// VMM process. Typically fast paths for IO operations are delegated to the dedicated IO service 236 /// processes, and slow path for device configuration are still handled by the VMM process. It may 237 /// also be used to control access permissions of virtio backend devices. 238 pub trait VhostBackend: std::marker::Sized { 239 /// Get a bitmask of supported virtio/vhost features. get_features(&self) -> Result<u64>240 fn get_features(&self) -> Result<u64>; 241 242 /// Inform the vhost subsystem which features to enable. 243 /// This should be a subset of supported features from get_features(). 244 /// 245 /// # Arguments 246 /// * `features` - Bitmask of features to set. set_features(&self, features: u64) -> Result<()>247 fn set_features(&self, features: u64) -> Result<()>; 248 249 /// Set the current process as the owner of the vhost backend. 250 /// This must be run before any other vhost commands. set_owner(&self) -> Result<()>251 fn set_owner(&self) -> Result<()>; 252 253 /// Used to be sent to request disabling all rings 254 /// This is no longer used. reset_owner(&self) -> Result<()>255 fn reset_owner(&self) -> Result<()>; 256 257 /// Set the guest memory mappings for vhost to use. set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>258 fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>; 259 260 /// Set base address for page modification logging. set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>261 fn set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>; 262 263 /// Specify an eventfd file descriptor to signal on log write. set_log_fd(&self, fd: RawFd) -> Result<()>264 fn set_log_fd(&self, fd: RawFd) -> Result<()>; 265 266 /// Set the number of descriptors in the vring. 267 /// 268 /// # Arguments 269 /// * `queue_index` - Index of the queue to set descriptor count for. 270 /// * `num` - Number of descriptors in the queue. set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>271 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>; 272 273 /// Set the addresses for a given vring. 274 /// 275 /// # Arguments 276 /// * `queue_index` - Index of the queue to set addresses for. 277 /// * `config_data` - Configuration data for a vring. set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>278 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>; 279 280 /// Set the first index to look for available descriptors. 281 /// 282 /// # Arguments 283 /// * `queue_index` - Index of the queue to modify. 284 /// * `num` - Index where available descriptors start. set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>285 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>; 286 287 /// Get the available vring base offset. get_vring_base(&self, queue_index: usize) -> Result<u32>288 fn get_vring_base(&self, queue_index: usize) -> Result<u32>; 289 290 /// Set the eventfd to trigger when buffers have been used by the host. 291 /// 292 /// # Arguments 293 /// * `queue_index` - Index of the queue to modify. 294 /// * `fd` - EventFd to trigger. set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>295 fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>; 296 297 /// Set the eventfd that will be signaled by the guest when buffers are 298 /// available for the host to process. 299 /// 300 /// # Arguments 301 /// * `queue_index` - Index of the queue to modify. 302 /// * `fd` - EventFd that will be signaled from guest. set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>303 fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>; 304 305 /// Set the eventfd that will be signaled by the guest when error happens. 306 /// 307 /// # Arguments 308 /// * `queue_index` - Index of the queue to modify. 309 /// * `fd` - EventFd that will be signaled from guest. set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>310 fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>; 311 } 312 313 /// An interface for setting up vhost-based backend drivers. 314 /// 315 /// Vhost devices are subset of virtio devices, which improve virtio device's performance by 316 /// delegating data plane operations to dedicated IO service processes. Vhost devices use the 317 /// same virtqueue layout as virtio devices to allow vhost devices to be mapped directly to 318 /// virtio devices. 319 /// 320 /// The purpose of vhost is to implement a subset of a virtio device's functionality outside the 321 /// VMM process. Typically fast paths for IO operations are delegated to the dedicated IO service 322 /// processes, and slow path for device configuration are still handled by the VMM process. It may 323 /// also be used to control access permissions of virtio backend devices. 324 pub trait VhostBackendMut: std::marker::Sized { 325 /// Get a bitmask of supported virtio/vhost features. get_features(&mut self) -> Result<u64>326 fn get_features(&mut self) -> Result<u64>; 327 328 /// Inform the vhost subsystem which features to enable. 329 /// This should be a subset of supported features from get_features(). 330 /// 331 /// # Arguments 332 /// * `features` - Bitmask of features to set. set_features(&mut self, features: u64) -> Result<()>333 fn set_features(&mut self, features: u64) -> Result<()>; 334 335 /// Set the current process as the owner of the vhost backend. 336 /// This must be run before any other vhost commands. set_owner(&mut self) -> Result<()>337 fn set_owner(&mut self) -> Result<()>; 338 339 /// Used to be sent to request disabling all rings 340 /// This is no longer used. reset_owner(&mut self) -> Result<()>341 fn reset_owner(&mut self) -> Result<()>; 342 343 /// Set the guest memory mappings for vhost to use. set_mem_table(&mut self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>344 fn set_mem_table(&mut self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>; 345 346 /// Set base address for page modification logging. set_log_base(&mut self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>347 fn set_log_base(&mut self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>; 348 349 /// Specify an eventfd file descriptor to signal on log write. set_log_fd(&mut self, fd: RawFd) -> Result<()>350 fn set_log_fd(&mut self, fd: RawFd) -> Result<()>; 351 352 /// Set the number of descriptors in the vring. 353 /// 354 /// # Arguments 355 /// * `queue_index` - Index of the queue to set descriptor count for. 356 /// * `num` - Number of descriptors in the queue. set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()>357 fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()>; 358 359 /// Set the addresses for a given vring. 360 /// 361 /// # Arguments 362 /// * `queue_index` - Index of the queue to set addresses for. 363 /// * `config_data` - Configuration data for a vring. set_vring_addr(&mut self, queue_index: usize, config_data: &VringConfigData) -> Result<()>364 fn set_vring_addr(&mut self, queue_index: usize, config_data: &VringConfigData) -> Result<()>; 365 366 /// Set the first index to look for available descriptors. 367 /// 368 /// # Arguments 369 /// * `queue_index` - Index of the queue to modify. 370 /// * `num` - Index where available descriptors start. set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()>371 fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()>; 372 373 /// Get the available vring base offset. get_vring_base(&mut self, queue_index: usize) -> Result<u32>374 fn get_vring_base(&mut self, queue_index: usize) -> Result<u32>; 375 376 /// Set the eventfd to trigger when buffers have been used by the host. 377 /// 378 /// # Arguments 379 /// * `queue_index` - Index of the queue to modify. 380 /// * `fd` - EventFd to trigger. set_vring_call(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>381 fn set_vring_call(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>; 382 383 /// Set the eventfd that will be signaled by the guest when buffers are 384 /// available for the host to process. 385 /// 386 /// # Arguments 387 /// * `queue_index` - Index of the queue to modify. 388 /// * `fd` - EventFd that will be signaled from guest. set_vring_kick(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>389 fn set_vring_kick(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>; 390 391 /// Set the eventfd that will be signaled by the guest when error happens. 392 /// 393 /// # Arguments 394 /// * `queue_index` - Index of the queue to modify. 395 /// * `fd` - EventFd that will be signaled from guest. set_vring_err(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>396 fn set_vring_err(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>; 397 } 398 399 impl<T: VhostBackendMut> VhostBackend for RwLock<T> { get_features(&self) -> Result<u64>400 fn get_features(&self) -> Result<u64> { 401 self.write().unwrap().get_features() 402 } 403 set_features(&self, features: u64) -> Result<()>404 fn set_features(&self, features: u64) -> Result<()> { 405 self.write().unwrap().set_features(features) 406 } 407 set_owner(&self) -> Result<()>408 fn set_owner(&self) -> Result<()> { 409 self.write().unwrap().set_owner() 410 } 411 reset_owner(&self) -> Result<()>412 fn reset_owner(&self) -> Result<()> { 413 self.write().unwrap().reset_owner() 414 } 415 set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>416 fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { 417 self.write().unwrap().set_mem_table(regions) 418 } 419 set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>420 fn set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()> { 421 self.write().unwrap().set_log_base(base, region) 422 } 423 set_log_fd(&self, fd: RawFd) -> Result<()>424 fn set_log_fd(&self, fd: RawFd) -> Result<()> { 425 self.write().unwrap().set_log_fd(fd) 426 } 427 set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>428 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { 429 self.write().unwrap().set_vring_num(queue_index, num) 430 } 431 set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>432 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { 433 self.write() 434 .unwrap() 435 .set_vring_addr(queue_index, config_data) 436 } 437 set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>438 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { 439 self.write().unwrap().set_vring_base(queue_index, base) 440 } 441 get_vring_base(&self, queue_index: usize) -> Result<u32>442 fn get_vring_base(&self, queue_index: usize) -> Result<u32> { 443 self.write().unwrap().get_vring_base(queue_index) 444 } 445 set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>446 fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { 447 self.write().unwrap().set_vring_call(queue_index, fd) 448 } 449 set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>450 fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> { 451 self.write().unwrap().set_vring_kick(queue_index, fd) 452 } 453 set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>454 fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> { 455 self.write().unwrap().set_vring_err(queue_index, fd) 456 } 457 } 458 459 impl<T: VhostBackendMut> VhostBackend for RefCell<T> { get_features(&self) -> Result<u64>460 fn get_features(&self) -> Result<u64> { 461 self.borrow_mut().get_features() 462 } 463 set_features(&self, features: u64) -> Result<()>464 fn set_features(&self, features: u64) -> Result<()> { 465 self.borrow_mut().set_features(features) 466 } 467 set_owner(&self) -> Result<()>468 fn set_owner(&self) -> Result<()> { 469 self.borrow_mut().set_owner() 470 } 471 reset_owner(&self) -> Result<()>472 fn reset_owner(&self) -> Result<()> { 473 self.borrow_mut().reset_owner() 474 } 475 set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>476 fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { 477 self.borrow_mut().set_mem_table(regions) 478 } 479 set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>480 fn set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()> { 481 self.borrow_mut().set_log_base(base, region) 482 } 483 set_log_fd(&self, fd: RawFd) -> Result<()>484 fn set_log_fd(&self, fd: RawFd) -> Result<()> { 485 self.borrow_mut().set_log_fd(fd) 486 } 487 set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>488 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { 489 self.borrow_mut().set_vring_num(queue_index, num) 490 } 491 set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>492 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { 493 self.borrow_mut().set_vring_addr(queue_index, config_data) 494 } 495 set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>496 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { 497 self.borrow_mut().set_vring_base(queue_index, base) 498 } 499 get_vring_base(&self, queue_index: usize) -> Result<u32>500 fn get_vring_base(&self, queue_index: usize) -> Result<u32> { 501 self.borrow_mut().get_vring_base(queue_index) 502 } 503 set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>504 fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { 505 self.borrow_mut().set_vring_call(queue_index, fd) 506 } 507 set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>508 fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> { 509 self.borrow_mut().set_vring_kick(queue_index, fd) 510 } 511 set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>512 fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> { 513 self.borrow_mut().set_vring_err(queue_index, fd) 514 } 515 } 516 517 #[cfg(any(test, feature = "test-utils"))] 518 impl VhostUserMemoryRegionInfo { 519 /// creates instance of `VhostUserMemoryRegionInfo`. new( guest_phys_addr: u64, memory_size: u64, userspace_addr: u64, mmap_offset: u64, mmap_handle: RawFd, ) -> Self520 pub fn new( 521 guest_phys_addr: u64, 522 memory_size: u64, 523 userspace_addr: u64, 524 mmap_offset: u64, 525 mmap_handle: RawFd, 526 ) -> Self { 527 Self { 528 guest_phys_addr, 529 memory_size, 530 userspace_addr, 531 mmap_offset, 532 mmap_handle, 533 534 #[cfg(feature = "xen")] 535 xen_mmap_flags: vm_memory::MmapXenFlags::UNIX.bits(), 536 #[cfg(feature = "xen")] 537 xen_mmap_data: 0, 538 } 539 } 540 } 541 542 #[cfg(test)] 543 mod tests { 544 use super::*; 545 546 struct MockBackend {} 547 548 impl VhostBackendMut for MockBackend { get_features(&mut self) -> Result<u64>549 fn get_features(&mut self) -> Result<u64> { 550 Ok(0x1) 551 } 552 set_features(&mut self, features: u64) -> Result<()>553 fn set_features(&mut self, features: u64) -> Result<()> { 554 assert_eq!(features, 0x1); 555 Ok(()) 556 } 557 set_owner(&mut self) -> Result<()>558 fn set_owner(&mut self) -> Result<()> { 559 Ok(()) 560 } 561 reset_owner(&mut self) -> Result<()>562 fn reset_owner(&mut self) -> Result<()> { 563 Ok(()) 564 } 565 set_mem_table(&mut self, _regions: &[VhostUserMemoryRegionInfo]) -> Result<()>566 fn set_mem_table(&mut self, _regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { 567 Ok(()) 568 } 569 set_log_base( &mut self, base: u64, region: Option<VhostUserDirtyLogRegion>, ) -> Result<()>570 fn set_log_base( 571 &mut self, 572 base: u64, 573 region: Option<VhostUserDirtyLogRegion>, 574 ) -> Result<()> { 575 assert_eq!(base, 0x100); 576 let region = region.unwrap(); 577 assert_eq!(region.mmap_size, 0x1000); 578 assert_eq!(region.mmap_offset, 0x10); 579 assert_eq!(region.mmap_handle, 100); 580 Ok(()) 581 } 582 set_log_fd(&mut self, fd: RawFd) -> Result<()>583 fn set_log_fd(&mut self, fd: RawFd) -> Result<()> { 584 assert_eq!(fd, 100); 585 Ok(()) 586 } 587 set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()>588 fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()> { 589 assert_eq!(queue_index, 1); 590 assert_eq!(num, 256); 591 Ok(()) 592 } 593 set_vring_addr( &mut self, queue_index: usize, _config_data: &VringConfigData, ) -> Result<()>594 fn set_vring_addr( 595 &mut self, 596 queue_index: usize, 597 _config_data: &VringConfigData, 598 ) -> Result<()> { 599 assert_eq!(queue_index, 1); 600 Ok(()) 601 } 602 set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()>603 fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()> { 604 assert_eq!(queue_index, 1); 605 assert_eq!(base, 2); 606 Ok(()) 607 } 608 get_vring_base(&mut self, queue_index: usize) -> Result<u32>609 fn get_vring_base(&mut self, queue_index: usize) -> Result<u32> { 610 assert_eq!(queue_index, 1); 611 Ok(2) 612 } 613 set_vring_call(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()>614 fn set_vring_call(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> { 615 assert_eq!(queue_index, 1); 616 Ok(()) 617 } 618 set_vring_kick(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()>619 fn set_vring_kick(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> { 620 assert_eq!(queue_index, 1); 621 Ok(()) 622 } 623 set_vring_err(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()>624 fn set_vring_err(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> { 625 assert_eq!(queue_index, 1); 626 Ok(()) 627 } 628 } 629 630 #[test] test_vring_backend_mut()631 fn test_vring_backend_mut() { 632 let b = RwLock::new(MockBackend {}); 633 634 assert_eq!(b.get_features().unwrap(), 0x1); 635 b.set_features(0x1).unwrap(); 636 b.set_owner().unwrap(); 637 b.reset_owner().unwrap(); 638 b.set_mem_table(&[]).unwrap(); 639 b.set_log_base( 640 0x100, 641 Some(VhostUserDirtyLogRegion { 642 mmap_size: 0x1000, 643 mmap_offset: 0x10, 644 mmap_handle: 100, 645 }), 646 ) 647 .unwrap(); 648 b.set_log_fd(100).unwrap(); 649 b.set_vring_num(1, 256).unwrap(); 650 651 let config = VringConfigData { 652 queue_max_size: 0x1000, 653 queue_size: 0x2000, 654 flags: 0x0, 655 desc_table_addr: 0x4000, 656 used_ring_addr: 0x5000, 657 avail_ring_addr: 0x6000, 658 log_addr: None, 659 }; 660 b.set_vring_addr(1, &config).unwrap(); 661 662 b.set_vring_base(1, 2).unwrap(); 663 assert_eq!(b.get_vring_base(1).unwrap(), 2); 664 665 let eventfd = EventFd::new(0).unwrap(); 666 b.set_vring_call(1, &eventfd).unwrap(); 667 b.set_vring_kick(1, &eventfd).unwrap(); 668 b.set_vring_err(1, &eventfd).unwrap(); 669 } 670 671 #[test] test_vring_config_data()672 fn test_vring_config_data() { 673 let mut config = VringConfigData { 674 queue_max_size: 0x1000, 675 queue_size: 0x2000, 676 flags: 0x0, 677 desc_table_addr: 0x4000, 678 used_ring_addr: 0x5000, 679 avail_ring_addr: 0x6000, 680 log_addr: None, 681 }; 682 683 assert!(config.is_log_addr_valid()); 684 assert_eq!(config.get_log_addr(), 0); 685 686 config.flags = 0x1; 687 assert!(!config.is_log_addr_valid()); 688 assert_eq!(config.get_log_addr(), 0); 689 690 config.log_addr = Some(0x7000); 691 assert!(config.is_log_addr_valid()); 692 assert_eq!(config.get_log_addr(), 0x7000); 693 694 config.flags = 0x0; 695 assert!(config.is_log_addr_valid()); 696 assert_eq!(config.get_log_addr(), 0); 697 } 698 } 699