1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 //! Define communication messages for the vhost-user protocol. 5 //! 6 //! For message definition, please refer to the [vhost-user spec](https://github.com/qemu/qemu/blob/f7526eece29cd2e36a63b6703508b24453095eb8/docs/interop/vhost-user.txt). 7 8 #![allow(dead_code)] 9 #![allow(non_camel_case_types)] 10 #![allow(clippy::upper_case_acronyms)] 11 12 use std::fmt::Debug; 13 use std::marker::PhantomData; 14 15 use base::Protection; 16 use bitflags::bitflags; 17 use zerocopy::AsBytes; 18 use zerocopy::FromBytes; 19 use zerocopy::FromZeroes; 20 21 use crate::VringConfigData; 22 23 /// The VhostUserMemory message has variable message size and variable number of attached file 24 /// descriptors. Each user memory region entry in the message payload occupies 32 bytes, 25 /// so setting maximum number of attached file descriptors based on the maximum message size. 26 /// But rust only implements Default and AsMut traits for arrays with 0 - 32 entries, so further 27 /// reduce the maximum number... 28 // pub const MAX_ATTACHED_FD_ENTRIES: usize = (MAX_MSG_SIZE - 8) / 32; 29 pub const MAX_ATTACHED_FD_ENTRIES: usize = 32; 30 31 /// Starting position (inclusion) of the device configuration space in virtio devices. 32 pub const VHOST_USER_CONFIG_OFFSET: u32 = 0x100; 33 34 /// Ending position (exclusion) of the device configuration space in virtio devices. 35 pub const VHOST_USER_CONFIG_SIZE: u32 = 0x1000; 36 37 /// Maximum number of vrings supported. 38 pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64; 39 40 /// Message type. Either [[FrontendReq]] or [[BackendReq]]. 41 pub trait Req: 42 Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Into<u32> + TryFrom<u32> + Send + Sync 43 { 44 } 45 46 /// Error when converting an integer to an enum value. 47 #[derive(Copy, Clone, Debug, PartialEq, Eq, thiserror::Error)] 48 pub enum ReqError { 49 /// The value does not correspond to a valid message code. 50 #[error("The value {0} does not correspond to a valid message code.")] 51 InvalidValue(u32), 52 } 53 54 /// Type of requests sent to the backend. 55 /// 56 /// These are called "front-end message types" in the spec, so we call them `FrontendReq` here even 57 /// though it is somewhat confusing that the `BackendClient` sends `FrontendReq`s to a 58 /// `BackendServer`. 59 #[repr(u32)] 60 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, enumn::N)] 61 pub enum FrontendReq { 62 /// Get from the underlying vhost implementation the features bit mask. 63 GET_FEATURES = 1, 64 /// Enable features in the underlying vhost implementation using a bit mask. 65 SET_FEATURES = 2, 66 /// Set the current frontend as an owner of the session. 67 SET_OWNER = 3, 68 /// No longer used. 69 RESET_OWNER = 4, 70 /// Set the memory map regions on the backend so it can translate the vring addresses. 71 SET_MEM_TABLE = 5, 72 /// Set logging shared memory space. 73 SET_LOG_BASE = 6, 74 /// Set the logging file descriptor, which is passed as ancillary data. 75 SET_LOG_FD = 7, 76 /// Set the size of the queue. 77 SET_VRING_NUM = 8, 78 /// Set the addresses of the different aspects of the vring. 79 SET_VRING_ADDR = 9, 80 /// Set the base offset in the available vring. 81 SET_VRING_BASE = 10, 82 /// Get the available vring base offset. 83 GET_VRING_BASE = 11, 84 /// Set the event file descriptor for adding buffers to the vring. 85 SET_VRING_KICK = 12, 86 /// Set the event file descriptor to signal when buffers are used. 87 SET_VRING_CALL = 13, 88 /// Set the event file descriptor to signal when error occurs. 89 SET_VRING_ERR = 14, 90 /// Get the protocol feature bit mask from the underlying vhost implementation. 91 GET_PROTOCOL_FEATURES = 15, 92 /// Enable protocol features in the underlying vhost implementation. 93 SET_PROTOCOL_FEATURES = 16, 94 /// Query how many queues the backend supports. 95 GET_QUEUE_NUM = 17, 96 /// Signal backend to enable or disable corresponding vring. 97 SET_VRING_ENABLE = 18, 98 /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated 99 /// for guest that does not support GUEST_ANNOUNCE. 100 SEND_RARP = 19, 101 /// Set host MTU value exposed to the guest. 102 NET_SET_MTU = 20, 103 /// Set the socket file descriptor for backend initiated requests. 104 SET_BACKEND_REQ_FD = 21, 105 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 106 IOTLB_MSG = 22, 107 /// Set the endianness of a VQ for legacy devices. 108 SET_VRING_ENDIAN = 23, 109 /// Fetch the contents of the virtio device configuration space. 110 GET_CONFIG = 24, 111 /// Change the contents of the virtio device configuration space. 112 SET_CONFIG = 25, 113 /// Create a session for crypto operation. 114 CREATE_CRYPTO_SESSION = 26, 115 /// Close a session for crypto operation. 116 CLOSE_CRYPTO_SESSION = 27, 117 /// Advise backend that a migration with postcopy enabled is underway. 118 POSTCOPY_ADVISE = 28, 119 /// Advise backend that a transition to postcopy mode has happened. 120 POSTCOPY_LISTEN = 29, 121 /// Advise that postcopy migration has now completed. 122 POSTCOPY_END = 30, 123 /// Get a shared buffer from backend. 124 GET_INFLIGHT_FD = 31, 125 /// Send the shared inflight buffer back to backend. 126 SET_INFLIGHT_FD = 32, 127 /// Sets the GPU protocol socket file descriptor. 128 GPU_SET_SOCKET = 33, 129 /// Ask the vhost user backend to disable all rings and reset all internal 130 /// device state to the initial state. 131 RESET_DEVICE = 34, 132 /// Indicate that a buffer was added to the vring instead of signalling it 133 /// using the vring’s kick file descriptor. 134 VRING_KICK = 35, 135 /// Return a u64 payload containing the maximum number of memory slots. 136 GET_MAX_MEM_SLOTS = 36, 137 /// Update the memory tables by adding the region described. 138 ADD_MEM_REG = 37, 139 /// Update the memory tables by removing the region described. 140 REM_MEM_REG = 38, 141 /// Notify the backend with updated device status as defined in the VIRTIO 142 /// specification. 143 SET_STATUS = 39, 144 /// Query the backend for its device status as defined in the VIRTIO 145 /// specification. 146 GET_STATUS = 40, 147 /// Front-end and back-end negotiate a channel over which to transfer the back-end’s internal 148 /// state during migration. 149 SET_DEVICE_STATE_FD = 42, 150 /// After transferring the back-end’s internal state during migration, check whether the 151 /// back-end was able to successfully fully process the state. 152 CHECK_DEVICE_STATE = 43, 153 154 // Non-standard message types. 155 /// Get a list of the device's shared memory regions. 156 GET_SHARED_MEMORY_REGIONS = 1004, 157 } 158 159 impl From<FrontendReq> for u32 { from(req: FrontendReq) -> u32160 fn from(req: FrontendReq) -> u32 { 161 req as u32 162 } 163 } 164 165 impl Req for FrontendReq {} 166 167 impl TryFrom<u32> for FrontendReq { 168 type Error = ReqError; 169 try_from(value: u32) -> Result<Self, Self::Error>170 fn try_from(value: u32) -> Result<Self, Self::Error> { 171 FrontendReq::n(value).ok_or(ReqError::InvalidValue(value)) 172 } 173 } 174 175 /// Type of requests sending from backends to frontends. 176 /// 177 /// These are called "backend-end message types" in the spec, so we call them `BackendReq` here 178 /// even though it is somewhat confusing that the `FrontendClient` sends `BackendReq`s to a 179 /// `FrontendServer`. 180 #[repr(u32)] 181 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, enumn::N)] 182 pub enum BackendReq { 183 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 184 IOTLB_MSG = 1, 185 /// Notify that the virtio device's configuration space has changed. 186 CONFIG_CHANGE_MSG = 2, 187 /// Set host notifier for a specified queue. 188 VRING_HOST_NOTIFIER_MSG = 3, 189 /// Indicate that a buffer was used from the vring. 190 VRING_CALL = 4, 191 /// Indicate that an error occurred on the specific vring. 192 VRING_ERR = 5, 193 194 // Non-standard message types. 195 /// Indicates a request to map a fd into a shared memory region. 196 SHMEM_MAP = 1000, 197 /// Indicates a request to unmap part of a shared memory region. 198 SHMEM_UNMAP = 1001, 199 /// Virtio-fs draft: map file content into the window. 200 DEPRECATED__FS_MAP = 1002, 201 /// Virtio-fs draft: unmap file content from the window. 202 DEPRECATED__FS_UNMAP = 1003, 203 /// Virtio-fs draft: sync file content. 204 DEPRECATED__FS_SYNC = 1004, 205 /// Virtio-fs draft: perform a read/write from an fd directly to GPA. 206 DEPRECATED__FS_IO = 1005, 207 /// Indicates a request to map GPU memory into a shared memory region. 208 GPU_MAP = 1006, 209 /// Indicates a request to map external memory into a shared memory region. 210 EXTERNAL_MAP = 1007, 211 } 212 213 impl From<BackendReq> for u32 { from(req: BackendReq) -> u32214 fn from(req: BackendReq) -> u32 { 215 req as u32 216 } 217 } 218 219 impl Req for BackendReq {} 220 221 impl TryFrom<u32> for BackendReq { 222 type Error = ReqError; 223 try_from(value: u32) -> Result<Self, Self::Error>224 fn try_from(value: u32) -> Result<Self, Self::Error> { 225 BackendReq::n(value).ok_or(ReqError::InvalidValue(value)) 226 } 227 } 228 229 /// Vhost message Validator. 230 pub trait VhostUserMsgValidator { 231 /// Validate message syntax only. 232 /// It doesn't validate message semantics such as protocol version number and dependency 233 /// on feature flags etc. is_valid(&self) -> bool234 fn is_valid(&self) -> bool { 235 true 236 } 237 } 238 239 // Bit mask for common message flags. 240 bitflags! { 241 /// Common message flags for vhost-user requests and replies. 242 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 243 #[repr(transparent)] 244 pub struct VhostUserHeaderFlag: u32 { 245 /// Bits[0..2] is message version number. 246 const VERSION = 0x3; 247 /// Mark message as reply. 248 const REPLY = 0x4; 249 /// Sender anticipates a reply message from the peer. 250 const NEED_REPLY = 0x8; 251 /// All valid bits. 252 const ALL_FLAGS = 0xc; 253 /// All reserved bits. 254 const RESERVED_BITS = !0xf; 255 } 256 } 257 258 /// Common message header for vhost-user requests and replies. 259 /// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the 260 /// machine native byte order. 261 #[repr(C, packed)] 262 #[derive(Copy, FromZeroes, FromBytes, AsBytes)] 263 pub struct VhostUserMsgHeader<R: Req> { 264 request: u32, 265 flags: u32, 266 size: u32, 267 _r: PhantomData<R>, 268 } 269 270 impl<R: Req> Debug for VhostUserMsgHeader<R> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result271 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 272 f.debug_struct("VhostUserMsgHeader") 273 .field("request", &{ self.request }) 274 .field("flags", &{ self.flags }) 275 .field("size", &{ self.size }) 276 .finish() 277 } 278 } 279 280 impl<R: Req> Clone for VhostUserMsgHeader<R> { clone(&self) -> VhostUserMsgHeader<R>281 fn clone(&self) -> VhostUserMsgHeader<R> { 282 *self 283 } 284 } 285 286 impl<R: Req> PartialEq for VhostUserMsgHeader<R> { eq(&self, other: &Self) -> bool287 fn eq(&self, other: &Self) -> bool { 288 self.request == other.request && self.flags == other.flags && self.size == other.size 289 } 290 } 291 292 impl<R: Req> VhostUserMsgHeader<R> { 293 /// Create a new instance of `VhostUserMsgHeader`. new(request: R, flags: u32, size: u32) -> Self294 pub fn new(request: R, flags: u32, size: u32) -> Self { 295 // Default to protocol version 1 296 let fl = (flags & VhostUserHeaderFlag::ALL_FLAGS.bits()) | 0x1; 297 VhostUserMsgHeader { 298 request: request.into(), 299 flags: fl, 300 size, 301 _r: PhantomData, 302 } 303 } 304 305 /// Get message type. get_code(&self) -> std::result::Result<R, R::Error>306 pub fn get_code(&self) -> std::result::Result<R, R::Error> { 307 R::try_from(self.request) 308 } 309 310 /// Set message type. set_code(&mut self, request: R)311 pub fn set_code(&mut self, request: R) { 312 self.request = request.into(); 313 } 314 315 /// Get message version number. get_version(&self) -> u32316 pub fn get_version(&self) -> u32 { 317 self.flags & 0x3 318 } 319 320 /// Set message version number. set_version(&mut self, ver: u32)321 pub fn set_version(&mut self, ver: u32) { 322 self.flags &= !0x3; 323 self.flags |= ver & 0x3; 324 } 325 326 /// Check whether it's a reply message. is_reply(&self) -> bool327 pub fn is_reply(&self) -> bool { 328 (self.flags & VhostUserHeaderFlag::REPLY.bits()) != 0 329 } 330 331 /// Mark message as reply. set_reply(&mut self, is_reply: bool)332 pub fn set_reply(&mut self, is_reply: bool) { 333 if is_reply { 334 self.flags |= VhostUserHeaderFlag::REPLY.bits(); 335 } else { 336 self.flags &= !VhostUserHeaderFlag::REPLY.bits(); 337 } 338 } 339 340 /// Check whether reply for this message is requested. is_need_reply(&self) -> bool341 pub fn is_need_reply(&self) -> bool { 342 (self.flags & VhostUserHeaderFlag::NEED_REPLY.bits()) != 0 343 } 344 345 /// Mark that reply for this message is needed. set_need_reply(&mut self, need_reply: bool)346 pub fn set_need_reply(&mut self, need_reply: bool) { 347 if need_reply { 348 self.flags |= VhostUserHeaderFlag::NEED_REPLY.bits(); 349 } else { 350 self.flags &= !VhostUserHeaderFlag::NEED_REPLY.bits(); 351 } 352 } 353 354 /// Check whether it's the reply message for the request `req`. is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool355 pub fn is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool { 356 self.is_reply() && !req.is_reply() && self.request == req.request 357 } 358 359 /// Get message size. get_size(&self) -> u32360 pub fn get_size(&self) -> u32 { 361 self.size 362 } 363 364 /// Set message size. set_size(&mut self, size: u32)365 pub fn set_size(&mut self, size: u32) { 366 self.size = size; 367 } 368 } 369 370 impl<R: Req> Default for VhostUserMsgHeader<R> { default() -> Self371 fn default() -> Self { 372 VhostUserMsgHeader { 373 request: 0, 374 flags: 0x1, 375 size: 0, 376 _r: PhantomData, 377 } 378 } 379 } 380 381 impl<T: Req> VhostUserMsgValidator for VhostUserMsgHeader<T> { 382 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool383 fn is_valid(&self) -> bool { 384 if self.get_code().is_err() { 385 return false; 386 } else if self.get_version() != 0x1 { 387 return false; 388 } else if (self.flags & VhostUserHeaderFlag::RESERVED_BITS.bits()) != 0 { 389 return false; 390 } 391 true 392 } 393 } 394 395 pub const VIRTIO_F_RING_PACKED: u32 = 34; 396 397 /// Virtio feature flag for the vhost-user protocol features. 398 pub const VHOST_USER_F_PROTOCOL_FEATURES: u32 = 30; 399 400 // Bit mask for vhost-user protocol feature flags. 401 bitflags! { 402 /// Vhost-user protocol feature flags. 403 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 404 #[repr(transparent)] 405 pub struct VhostUserProtocolFeatures: u64 { 406 /// Support multiple queues. 407 const MQ = 0x0000_0001; 408 /// Support logging through shared memory fd. 409 const LOG_SHMFD = 0x0000_0002; 410 /// Support broadcasting fake RARP packet. 411 const RARP = 0x0000_0004; 412 /// Support sending reply messages for requests with NEED_REPLY flag set. 413 const REPLY_ACK = 0x0000_0008; 414 /// Support setting MTU for virtio-net devices. 415 const MTU = 0x0000_0010; 416 /// Allow the backend to send requests to the frontend by an optional communication channel. 417 const BACKEND_REQ = 0x0000_0020; 418 /// Support setting backend endian by SET_VRING_ENDIAN. 419 const CROSS_ENDIAN = 0x0000_0040; 420 /// Support crypto operations. 421 const CRYPTO_SESSION = 0x0000_0080; 422 /// Support sending userfault_fd from backends to frontends. 423 const PAGEFAULT = 0x0000_0100; 424 /// Support Virtio device configuration. 425 const CONFIG = 0x0000_0200; 426 /// Allow the backend to send fds (at most 8 descriptors in each message) to the frontend. 427 const BACKEND_SEND_FD = 0x0000_0400; 428 /// Allow the backend to register a host notifier. 429 const HOST_NOTIFIER = 0x0000_0800; 430 /// Support inflight shmfd. 431 const INFLIGHT_SHMFD = 0x0000_1000; 432 /// Support resetting the device. 433 const RESET_DEVICE = 0x0000_2000; 434 /// Support inband notifications. 435 const INBAND_NOTIFICATIONS = 0x0000_4000; 436 /// Support configuring memory slots. 437 const CONFIGURE_MEM_SLOTS = 0x0000_8000; 438 /// Support reporting status. 439 const STATUS = 0x0001_0000; 440 /// Support Xen mmap. 441 const XEN_MMAP = 0x0002_0000; 442 /// Support VHOST_USER_SET_DEVICE_STATE_FD and VHOST_USER_CHECK_DEVICE_STATE messages. 443 const DEVICE_STATE = 0x0008_0000; 444 /// Support shared memory regions. (Non-standard.) 445 const SHARED_MEMORY_REGIONS = 0x8000_0000; 446 } 447 } 448 449 /// A generic message to encapsulate a 64-bit value. 450 #[repr(C, packed)] 451 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 452 pub struct VhostUserU64 { 453 /// The encapsulated 64-bit common value. 454 pub value: u64, 455 } 456 457 impl VhostUserU64 { 458 /// Create a new instance. new(value: u64) -> Self459 pub fn new(value: u64) -> Self { 460 VhostUserU64 { value } 461 } 462 } 463 464 impl VhostUserMsgValidator for VhostUserU64 {} 465 466 /// An empty message. 467 #[repr(C)] 468 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 469 pub struct VhostUserEmptyMsg; 470 471 impl VhostUserMsgValidator for VhostUserEmptyMsg {} 472 473 /// A generic message for empty message. 474 /// ZST in repr(C) has same type layout as repr(rust) 475 #[repr(C)] 476 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 477 pub struct VhostUserEmptyMessage; 478 479 impl VhostUserMsgValidator for VhostUserEmptyMessage {} 480 481 /// Memory region descriptor for the SET_MEM_TABLE request. 482 #[repr(C, packed)] 483 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 484 pub struct VhostUserMemory { 485 /// Number of memory regions in the payload. 486 pub num_regions: u32, 487 /// Padding for alignment. 488 pub padding1: u32, 489 } 490 491 impl VhostUserMemory { 492 /// Create a new instance. new(cnt: u32) -> Self493 pub fn new(cnt: u32) -> Self { 494 VhostUserMemory { 495 num_regions: cnt, 496 padding1: 0, 497 } 498 } 499 } 500 501 impl VhostUserMsgValidator for VhostUserMemory { 502 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool503 fn is_valid(&self) -> bool { 504 if self.padding1 != 0 { 505 return false; 506 } else if self.num_regions == 0 || self.num_regions > MAX_ATTACHED_FD_ENTRIES as u32 { 507 return false; 508 } 509 true 510 } 511 } 512 513 /// Memory region descriptors as payload for the SET_MEM_TABLE request. 514 #[repr(C, packed)] 515 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 516 pub struct VhostUserMemoryRegion { 517 /// Guest physical address of the memory region. 518 pub guest_phys_addr: u64, 519 /// Size of the memory region. 520 pub memory_size: u64, 521 /// Virtual address in the current process. 522 pub user_addr: u64, 523 /// Offset where region starts in the mapped memory. 524 pub mmap_offset: u64, 525 } 526 527 impl VhostUserMemoryRegion { 528 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self529 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 530 VhostUserMemoryRegion { 531 guest_phys_addr, 532 memory_size, 533 user_addr, 534 mmap_offset, 535 } 536 } 537 } 538 539 impl VhostUserMsgValidator for VhostUserMemoryRegion { is_valid(&self) -> bool540 fn is_valid(&self) -> bool { 541 if self.memory_size == 0 542 || self.guest_phys_addr.checked_add(self.memory_size).is_none() 543 || self.user_addr.checked_add(self.memory_size).is_none() 544 || self.mmap_offset.checked_add(self.memory_size).is_none() 545 { 546 return false; 547 } 548 true 549 } 550 } 551 552 /// Payload of the VhostUserMemory message. 553 pub type VhostUserMemoryPayload = Vec<VhostUserMemoryRegion>; 554 555 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 556 /// requests. 557 #[repr(C)] 558 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 559 pub struct VhostUserSingleMemoryRegion { 560 /// Padding for correct alignment 561 padding: u64, 562 /// Guest physical address of the memory region. 563 pub guest_phys_addr: u64, 564 /// Size of the memory region. 565 pub memory_size: u64, 566 /// Virtual address in the current process. 567 pub user_addr: u64, 568 /// Offset where region starts in the mapped memory. 569 pub mmap_offset: u64, 570 } 571 572 impl VhostUserSingleMemoryRegion { 573 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self574 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 575 VhostUserSingleMemoryRegion { 576 padding: 0, 577 guest_phys_addr, 578 memory_size, 579 user_addr, 580 mmap_offset, 581 } 582 } 583 } 584 585 impl VhostUserMsgValidator for VhostUserSingleMemoryRegion { is_valid(&self) -> bool586 fn is_valid(&self) -> bool { 587 if self.memory_size == 0 588 || self.guest_phys_addr.checked_add(self.memory_size).is_none() 589 || self.user_addr.checked_add(self.memory_size).is_none() 590 || self.mmap_offset.checked_add(self.memory_size).is_none() 591 { 592 return false; 593 } 594 true 595 } 596 } 597 598 /// Vring state descriptor. 599 #[repr(C, packed)] 600 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 601 pub struct VhostUserVringState { 602 /// Vring index. 603 pub index: u32, 604 /// A common 32bit value to encapsulate vring state etc. 605 pub num: u32, 606 } 607 608 impl VhostUserVringState { 609 /// Create a new instance. new(index: u32, num: u32) -> Self610 pub fn new(index: u32, num: u32) -> Self { 611 VhostUserVringState { index, num } 612 } 613 } 614 615 impl VhostUserMsgValidator for VhostUserVringState {} 616 617 // Bit mask for vring address flags. 618 bitflags! { 619 /// Flags for vring address. 620 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 621 #[repr(transparent)] 622 pub struct VhostUserVringAddrFlags: u32 { 623 /// Support log of vring operations. 624 /// Modifications to "used" vring should be logged. 625 const VHOST_VRING_F_LOG = 0x1; 626 } 627 } 628 629 /// Vring address descriptor. 630 #[repr(C, packed)] 631 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 632 pub struct VhostUserVringAddr { 633 /// Vring index. 634 pub index: u32, 635 /// Vring flags defined by VhostUserVringAddrFlags. 636 pub flags: u32, 637 /// Ring address of the vring descriptor table. 638 pub descriptor: u64, 639 /// Ring address of the vring used ring. 640 pub used: u64, 641 /// Ring address of the vring available ring. 642 pub available: u64, 643 /// Guest address for logging. 644 pub log: u64, 645 } 646 647 impl VhostUserVringAddr { 648 /// Create a new instance. new( index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Self649 pub fn new( 650 index: u32, 651 flags: VhostUserVringAddrFlags, 652 descriptor: u64, 653 used: u64, 654 available: u64, 655 log: u64, 656 ) -> Self { 657 VhostUserVringAddr { 658 index, 659 flags: flags.bits(), 660 descriptor, 661 used, 662 available, 663 log, 664 } 665 } 666 667 /// Create a new instance from `VringConfigData`. from_config_data(index: u32, config_data: &VringConfigData) -> Self668 pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self { 669 let log_addr = config_data.log_addr.unwrap_or(0); 670 VhostUserVringAddr { 671 index, 672 flags: config_data.flags, 673 descriptor: config_data.desc_table_addr, 674 used: config_data.used_ring_addr, 675 available: config_data.avail_ring_addr, 676 log: log_addr, 677 } 678 } 679 } 680 681 impl VhostUserMsgValidator for VhostUserVringAddr { 682 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool683 fn is_valid(&self) -> bool { 684 if (self.flags & !VhostUserVringAddrFlags::all().bits()) != 0 { 685 return false; 686 } else if self.descriptor & 0xf != 0 { 687 return false; 688 } else if self.available & 0x1 != 0 { 689 return false; 690 } else if self.used & 0x3 != 0 { 691 return false; 692 } 693 true 694 } 695 } 696 697 // Bit mask for the vhost-user device configuration message. 698 bitflags! { 699 /// Flags for the device configuration message. 700 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 701 #[repr(transparent)] 702 pub struct VhostUserConfigFlags: u32 { 703 /// Vhost frontend messages used for writeable fields. 704 const WRITABLE = 0x1; 705 /// Vhost frontend messages used for live migration. 706 const LIVE_MIGRATION = 0x2; 707 } 708 } 709 710 /// Message to read/write device configuration space. 711 #[repr(C, packed)] 712 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 713 pub struct VhostUserConfig { 714 /// Offset of virtio device's configuration space. 715 pub offset: u32, 716 /// Configuration space access size in bytes. 717 pub size: u32, 718 /// Flags for the device configuration operation. 719 pub flags: u32, 720 } 721 722 impl VhostUserConfig { 723 /// Create a new instance. new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self724 pub fn new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self { 725 VhostUserConfig { 726 offset, 727 size, 728 flags: flags.bits(), 729 } 730 } 731 } 732 733 impl VhostUserMsgValidator for VhostUserConfig { 734 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool735 fn is_valid(&self) -> bool { 736 let end_addr = match self.size.checked_add(self.offset) { 737 Some(addr) => addr, 738 None => return false, 739 }; 740 if (self.flags & !VhostUserConfigFlags::all().bits()) != 0 { 741 return false; 742 } else if self.size == 0 || end_addr > VHOST_USER_CONFIG_SIZE { 743 return false; 744 } 745 true 746 } 747 } 748 749 /// Payload for the VhostUserConfig message. 750 pub type VhostUserConfigPayload = Vec<u8>; 751 752 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 753 /// requests. 754 /// This struct is defined by qemu and compiles with arch-dependent padding. 755 /// Interestingly, all our supported archs (arm, aarch64, x86_64) has same 756 /// data layout for this type. 757 #[repr(C)] 758 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 759 pub struct VhostUserInflight { 760 /// Size of the area to track inflight I/O. 761 pub mmap_size: u64, 762 /// Offset of this area from the start of the supplied file descriptor. 763 pub mmap_offset: u64, 764 /// Number of virtqueues. 765 pub num_queues: u16, 766 /// Size of virtqueues. 767 pub queue_size: u16, 768 /// implicit padding on 64-bit platforms 769 pub _padding: [u8; 4], 770 } 771 772 impl VhostUserInflight { 773 /// Create a new instance. new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self774 pub fn new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self { 775 VhostUserInflight { 776 mmap_size, 777 mmap_offset, 778 num_queues, 779 queue_size, 780 ..Default::default() 781 } 782 } 783 } 784 785 impl VhostUserMsgValidator for VhostUserInflight { is_valid(&self) -> bool786 fn is_valid(&self) -> bool { 787 if self.num_queues == 0 || self.queue_size == 0 { 788 return false; 789 } 790 true 791 } 792 } 793 794 /// VHOST_USER_SET_DEVICE_STATE_FD request payload. 795 #[repr(C)] 796 #[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)] 797 pub struct DeviceStateTransferParameters { 798 /// Direction in which the state is transferred 799 pub transfer_direction: u32, 800 /// State in which the VM guest and devices are. 801 pub migration_phase: u32, 802 } 803 804 impl VhostUserMsgValidator for DeviceStateTransferParameters { is_valid(&self) -> bool805 fn is_valid(&self) -> bool { 806 // Validated elsewhere. 807 true 808 } 809 } 810 811 /* 812 * TODO: support dirty log, live migration and IOTLB operations. 813 #[repr(C, packed)] 814 pub struct VhostUserVringArea { 815 pub index: u32, 816 pub flags: u32, 817 pub size: u64, 818 pub offset: u64, 819 } 820 821 #[repr(C, packed)] 822 pub struct VhostUserLog { 823 pub size: u64, 824 pub offset: u64, 825 } 826 827 #[repr(C, packed)] 828 pub struct VhostUserIotlb { 829 pub iova: u64, 830 pub size: u64, 831 pub user_addr: u64, 832 pub permission: u8, 833 pub optype: u8, 834 } 835 */ 836 837 /// Flags for SHMEM_MAP messages. 838 #[repr(transparent)] 839 #[derive( 840 AsBytes, 841 FromZeroes, 842 FromBytes, 843 Copy, 844 Clone, 845 Debug, 846 Default, 847 Eq, 848 Hash, 849 Ord, 850 PartialEq, 851 PartialOrd, 852 )] 853 pub struct VhostUserShmemMapMsgFlags(u8); 854 855 bitflags! { 856 impl VhostUserShmemMapMsgFlags: u8 { 857 /// Empty permission. 858 const EMPTY = 0x0; 859 /// Read permission. 860 const MAP_R = 0x1; 861 /// Write permission. 862 const MAP_W = 0x2; 863 } 864 } 865 866 impl From<Protection> for VhostUserShmemMapMsgFlags { from(prot: Protection) -> Self867 fn from(prot: Protection) -> Self { 868 let mut flags = Self::EMPTY; 869 flags.set(Self::MAP_R, prot.allows(&Protection::read())); 870 flags.set(Self::MAP_W, prot.allows(&Protection::write())); 871 flags 872 } 873 } 874 875 impl From<VhostUserShmemMapMsgFlags> for Protection { from(flags: VhostUserShmemMapMsgFlags) -> Self876 fn from(flags: VhostUserShmemMapMsgFlags) -> Self { 877 let mut prot = Protection::default(); 878 if flags.contains(VhostUserShmemMapMsgFlags::MAP_R) { 879 prot = prot.set_read(); 880 } 881 if flags.contains(VhostUserShmemMapMsgFlags::MAP_W) { 882 prot = prot.set_write(); 883 } 884 prot 885 } 886 } 887 888 /// Backend request message to map a file into a shared memory region. 889 #[repr(C, packed)] 890 #[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)] 891 pub struct VhostUserShmemMapMsg { 892 /// Flags for the mmap operation 893 pub flags: VhostUserShmemMapMsgFlags, 894 /// Shared memory region id. 895 pub shmid: u8, 896 padding: [u8; 6], 897 /// Offset into the shared memory region. 898 pub shm_offset: u64, 899 /// File offset. 900 pub fd_offset: u64, 901 /// Size of region to map. 902 pub len: u64, 903 } 904 905 impl VhostUserMsgValidator for VhostUserShmemMapMsg { is_valid(&self) -> bool906 fn is_valid(&self) -> bool { 907 (self.flags.bits() & !VhostUserShmemMapMsgFlags::all().bits()) == 0 908 && self.fd_offset.checked_add(self.len).is_some() 909 && self.shm_offset.checked_add(self.len).is_some() 910 } 911 } 912 913 impl VhostUserShmemMapMsg { 914 /// New instance of VhostUserShmemMapMsg struct new( shmid: u8, shm_offset: u64, fd_offset: u64, len: u64, flags: VhostUserShmemMapMsgFlags, ) -> Self915 pub fn new( 916 shmid: u8, 917 shm_offset: u64, 918 fd_offset: u64, 919 len: u64, 920 flags: VhostUserShmemMapMsgFlags, 921 ) -> Self { 922 Self { 923 flags, 924 shmid, 925 padding: [0; 6], 926 shm_offset, 927 fd_offset, 928 len, 929 } 930 } 931 } 932 933 /// Backend request message to map GPU memory into a shared memory region. 934 #[repr(C, packed)] 935 #[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)] 936 pub struct VhostUserGpuMapMsg { 937 /// Shared memory region id. 938 pub shmid: u8, 939 padding: [u8; 7], 940 /// Offset into the shared memory region. 941 pub shm_offset: u64, 942 /// Size of region to map. 943 pub len: u64, 944 /// Index of the memory type. 945 pub memory_idx: u32, 946 /// Type of share handle. 947 pub handle_type: u32, 948 /// Device UUID 949 pub device_uuid: [u8; 16], 950 /// Driver UUID 951 pub driver_uuid: [u8; 16], 952 } 953 954 impl VhostUserMsgValidator for VhostUserGpuMapMsg { is_valid(&self) -> bool955 fn is_valid(&self) -> bool { 956 self.len > 0 957 } 958 } 959 960 impl VhostUserGpuMapMsg { 961 /// New instance of VhostUserGpuMapMsg struct new( shmid: u8, shm_offset: u64, len: u64, memory_idx: u32, handle_type: u32, device_uuid: [u8; 16], driver_uuid: [u8; 16], ) -> Self962 pub fn new( 963 shmid: u8, 964 shm_offset: u64, 965 len: u64, 966 memory_idx: u32, 967 handle_type: u32, 968 device_uuid: [u8; 16], 969 driver_uuid: [u8; 16], 970 ) -> Self { 971 Self { 972 shmid, 973 padding: [0; 7], 974 shm_offset, 975 len, 976 memory_idx, 977 handle_type, 978 device_uuid, 979 driver_uuid, 980 } 981 } 982 } 983 984 /// Backend request message to map external memory into a shared memory region. 985 #[repr(C, packed)] 986 #[derive(Default, Copy, Clone, AsBytes, FromZeroes, FromBytes)] 987 pub struct VhostUserExternalMapMsg { 988 /// Shared memory region id. 989 pub shmid: u8, 990 padding: [u8; 7], 991 /// Offset into the shared memory region. 992 pub shm_offset: u64, 993 /// Size of region to map. 994 pub len: u64, 995 /// Pointer to the memory. 996 pub ptr: u64, 997 } 998 999 impl VhostUserMsgValidator for VhostUserExternalMapMsg { is_valid(&self) -> bool1000 fn is_valid(&self) -> bool { 1001 self.len > 0 1002 } 1003 } 1004 1005 impl VhostUserExternalMapMsg { 1006 /// New instance of VhostUserExternalMapMsg struct new(shmid: u8, shm_offset: u64, len: u64, ptr: u64) -> Self1007 pub fn new(shmid: u8, shm_offset: u64, len: u64, ptr: u64) -> Self { 1008 Self { 1009 shmid, 1010 padding: [0; 7], 1011 shm_offset, 1012 len, 1013 ptr, 1014 } 1015 } 1016 } 1017 1018 /// Backend request message to unmap part of a shared memory region. 1019 #[repr(C, packed)] 1020 #[derive(Default, Copy, Clone, FromZeroes, FromBytes, AsBytes)] 1021 pub struct VhostUserShmemUnmapMsg { 1022 /// Shared memory region id. 1023 pub shmid: u8, 1024 padding: [u8; 7], 1025 /// Offset into the shared memory region. 1026 pub shm_offset: u64, 1027 /// Size of region to unmap. 1028 pub len: u64, 1029 } 1030 1031 impl VhostUserMsgValidator for VhostUserShmemUnmapMsg { is_valid(&self) -> bool1032 fn is_valid(&self) -> bool { 1033 self.shm_offset.checked_add(self.len).is_some() 1034 } 1035 } 1036 1037 impl VhostUserShmemUnmapMsg { 1038 /// New instance of VhostUserShmemUnmapMsg struct new(shmid: u8, shm_offset: u64, len: u64) -> Self1039 pub fn new(shmid: u8, shm_offset: u64, len: u64) -> Self { 1040 Self { 1041 shmid, 1042 padding: [0; 7], 1043 shm_offset, 1044 len, 1045 } 1046 } 1047 } 1048 1049 /// Inflight I/O descriptor state for split virtqueues 1050 #[repr(C, packed)] 1051 #[derive(Clone, Copy, Default)] 1052 pub struct DescStateSplit { 1053 /// Indicate whether this descriptor (only head) is inflight or not. 1054 pub inflight: u8, 1055 /// Padding 1056 padding: [u8; 5], 1057 /// List of last batch of used descriptors, only when batching is used for submitting 1058 pub next: u16, 1059 /// Preserve order of fetching available descriptors, only for head descriptor 1060 pub counter: u64, 1061 } 1062 1063 impl DescStateSplit { 1064 /// New instance of DescStateSplit struct new() -> Self1065 pub fn new() -> Self { 1066 Self::default() 1067 } 1068 } 1069 1070 /// Inflight I/O queue region for split virtqueues 1071 #[repr(C, packed)] 1072 pub struct QueueRegionSplit { 1073 /// Features flags of this region 1074 pub features: u64, 1075 /// Version of this region 1076 pub version: u16, 1077 /// Number of DescStateSplit entries 1078 pub desc_num: u16, 1079 /// List to track last batch of used descriptors 1080 pub last_batch_head: u16, 1081 /// Idx value of used ring 1082 pub used_idx: u16, 1083 /// Pointer to an array of DescStateSplit entries 1084 pub desc: u64, 1085 } 1086 1087 impl QueueRegionSplit { 1088 /// New instance of QueueRegionSplit struct new(features: u64, queue_size: u16) -> Self1089 pub fn new(features: u64, queue_size: u16) -> Self { 1090 QueueRegionSplit { 1091 features, 1092 version: 1, 1093 desc_num: queue_size, 1094 last_batch_head: 0, 1095 used_idx: 0, 1096 desc: 0, 1097 } 1098 } 1099 } 1100 1101 /// Inflight I/O descriptor state for packed virtqueues 1102 #[repr(C, packed)] 1103 #[derive(Clone, Copy, Default)] 1104 pub struct DescStatePacked { 1105 /// Indicate whether this descriptor (only head) is inflight or not. 1106 pub inflight: u8, 1107 /// Padding 1108 padding: u8, 1109 /// Link to next free entry 1110 pub next: u16, 1111 /// Link to last entry of descriptor list, only for head 1112 pub last: u16, 1113 /// Length of descriptor list, only for head 1114 pub num: u16, 1115 /// Preserve order of fetching avail descriptors, only for head 1116 pub counter: u64, 1117 /// Buffer ID 1118 pub id: u16, 1119 /// Descriptor flags 1120 pub flags: u16, 1121 /// Buffer length 1122 pub len: u32, 1123 /// Buffer address 1124 pub addr: u64, 1125 } 1126 1127 impl DescStatePacked { 1128 /// New instance of DescStatePacked struct new() -> Self1129 pub fn new() -> Self { 1130 Self::default() 1131 } 1132 } 1133 1134 /// Inflight I/O queue region for packed virtqueues 1135 #[repr(C, packed)] 1136 pub struct QueueRegionPacked { 1137 /// Features flags of this region 1138 pub features: u64, 1139 /// version of this region 1140 pub version: u16, 1141 /// size of descriptor state array 1142 pub desc_num: u16, 1143 /// head of free DescStatePacked entry list 1144 pub free_head: u16, 1145 /// old head of free DescStatePacked entry list 1146 pub old_free_head: u16, 1147 /// used idx of descriptor ring 1148 pub used_idx: u16, 1149 /// old used idx of descriptor ring 1150 pub old_used_idx: u16, 1151 /// device ring wrap counter 1152 pub used_wrap_counter: u8, 1153 /// old device ring wrap counter 1154 pub old_used_wrap_counter: u8, 1155 /// Padding 1156 padding: [u8; 7], 1157 /// Pointer to array tracking state of each descriptor from descriptor ring 1158 pub desc: u64, 1159 } 1160 1161 impl QueueRegionPacked { 1162 /// New instance of QueueRegionPacked struct new(features: u64, queue_size: u16) -> Self1163 pub fn new(features: u64, queue_size: u16) -> Self { 1164 QueueRegionPacked { 1165 features, 1166 version: 1, 1167 desc_num: queue_size, 1168 free_head: 0, 1169 old_free_head: 0, 1170 used_idx: 0, 1171 old_used_idx: 0, 1172 used_wrap_counter: 0, 1173 old_used_wrap_counter: 0, 1174 padding: [0; 7], 1175 desc: 0, 1176 } 1177 } 1178 } 1179 1180 /// Virtio shared memory descriptor. 1181 #[repr(C, packed)] 1182 #[derive(Default, Copy, Clone, FromZeroes, FromBytes, AsBytes)] 1183 pub struct VhostSharedMemoryRegion { 1184 /// The shared memory region's shmid. 1185 pub id: u8, 1186 /// Padding 1187 padding: [u8; 7], 1188 /// The length of the shared memory region. 1189 pub length: u64, 1190 } 1191 1192 impl VhostSharedMemoryRegion { 1193 /// New instance of VhostSharedMemoryRegion struct new(id: u8, length: u64) -> Self1194 pub fn new(id: u8, length: u64) -> Self { 1195 VhostSharedMemoryRegion { 1196 id, 1197 padding: [0; 7], 1198 length, 1199 } 1200 } 1201 } 1202 1203 #[derive(Debug, PartialEq, Eq)] 1204 pub enum VhostUserTransferDirection { 1205 Save, 1206 Load, 1207 } 1208 1209 #[derive(Debug, PartialEq, Eq)] 1210 pub enum VhostUserMigrationPhase { 1211 Stopped, 1212 } 1213 1214 #[cfg(test)] 1215 mod tests { 1216 use super::*; 1217 1218 #[test] check_frontend_request_code()1219 fn check_frontend_request_code() { 1220 FrontendReq::try_from(0).expect_err("invalid value"); 1221 FrontendReq::try_from(46).expect_err("invalid value"); 1222 FrontendReq::try_from(10000).expect_err("invalid value"); 1223 1224 let code = FrontendReq::try_from(FrontendReq::GET_FEATURES as u32).unwrap(); 1225 assert_eq!(code, code.clone()); 1226 } 1227 1228 #[test] check_backend_request_code()1229 fn check_backend_request_code() { 1230 BackendReq::try_from(0).expect_err("invalid value"); 1231 BackendReq::try_from(14).expect_err("invalid value"); 1232 BackendReq::try_from(10000).expect_err("invalid value"); 1233 1234 let code = BackendReq::try_from(BackendReq::CONFIG_CHANGE_MSG as u32).unwrap(); 1235 assert_eq!(code, code.clone()); 1236 } 1237 1238 #[test] msg_header_ops()1239 fn msg_header_ops() { 1240 let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0x100); 1241 assert_eq!(hdr.get_code(), Ok(FrontendReq::GET_FEATURES)); 1242 hdr.set_code(FrontendReq::SET_FEATURES); 1243 assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_FEATURES)); 1244 1245 assert_eq!(hdr.get_version(), 0x1); 1246 1247 assert!(!hdr.is_reply()); 1248 hdr.set_reply(true); 1249 assert!(hdr.is_reply()); 1250 hdr.set_reply(false); 1251 1252 assert!(!hdr.is_need_reply()); 1253 hdr.set_need_reply(true); 1254 assert!(hdr.is_need_reply()); 1255 hdr.set_need_reply(false); 1256 1257 assert_eq!(hdr.get_size(), 0x100); 1258 hdr.set_size(0x200); 1259 assert_eq!(hdr.get_size(), 0x200); 1260 1261 assert!(!hdr.is_need_reply()); 1262 assert!(!hdr.is_reply()); 1263 assert_eq!(hdr.get_version(), 0x1); 1264 1265 // Check version 1266 hdr.set_version(0x0); 1267 assert!(!hdr.is_valid()); 1268 hdr.set_version(0x2); 1269 assert!(!hdr.is_valid()); 1270 hdr.set_version(0x1); 1271 assert!(hdr.is_valid()); 1272 1273 // Test Debug, Clone, PartiaEq trait 1274 assert_eq!(hdr, hdr.clone()); 1275 assert_eq!(hdr.clone().get_code(), hdr.get_code()); 1276 assert_eq!(format!("{:?}", hdr.clone()), format!("{:?}", hdr)); 1277 } 1278 1279 #[test] test_vhost_user_message_u64()1280 fn test_vhost_user_message_u64() { 1281 let val = VhostUserU64::default(); 1282 let val1 = VhostUserU64::new(0); 1283 1284 let a = val.value; 1285 let b = val1.value; 1286 assert_eq!(a, b); 1287 let a = VhostUserU64::new(1).value; 1288 assert_eq!(a, 1); 1289 } 1290 1291 #[test] check_user_memory()1292 fn check_user_memory() { 1293 let mut msg = VhostUserMemory::new(1); 1294 assert!(msg.is_valid()); 1295 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1296 assert!(msg.is_valid()); 1297 1298 msg.num_regions += 1; 1299 assert!(!msg.is_valid()); 1300 msg.num_regions = 0xFFFFFFFF; 1301 assert!(!msg.is_valid()); 1302 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1303 msg.padding1 = 1; 1304 assert!(!msg.is_valid()); 1305 } 1306 1307 #[test] check_user_memory_region()1308 fn check_user_memory_region() { 1309 let mut msg = VhostUserMemoryRegion { 1310 guest_phys_addr: 0, 1311 memory_size: 0x1000, 1312 user_addr: 0, 1313 mmap_offset: 0, 1314 }; 1315 assert!(msg.is_valid()); 1316 msg.guest_phys_addr = 0xFFFFFFFFFFFFEFFF; 1317 assert!(msg.is_valid()); 1318 msg.guest_phys_addr = 0xFFFFFFFFFFFFF000; 1319 assert!(!msg.is_valid()); 1320 msg.guest_phys_addr = 0xFFFFFFFFFFFF0000; 1321 msg.memory_size = 0; 1322 assert!(!msg.is_valid()); 1323 let a = msg.guest_phys_addr; 1324 let b = msg.guest_phys_addr; 1325 assert_eq!(a, b); 1326 1327 let msg = VhostUserMemoryRegion::default(); 1328 let a = msg.guest_phys_addr; 1329 assert_eq!(a, 0); 1330 let a = msg.memory_size; 1331 assert_eq!(a, 0); 1332 let a = msg.user_addr; 1333 assert_eq!(a, 0); 1334 let a = msg.mmap_offset; 1335 assert_eq!(a, 0); 1336 } 1337 1338 #[test] test_vhost_user_state()1339 fn test_vhost_user_state() { 1340 let state = VhostUserVringState::new(5, 8); 1341 1342 let a = state.index; 1343 assert_eq!(a, 5); 1344 let a = state.num; 1345 assert_eq!(a, 8); 1346 assert!(state.is_valid()); 1347 1348 let state = VhostUserVringState::default(); 1349 let a = state.index; 1350 assert_eq!(a, 0); 1351 let a = state.num; 1352 assert_eq!(a, 0); 1353 assert!(state.is_valid()); 1354 } 1355 1356 #[test] test_vhost_user_addr()1357 fn test_vhost_user_addr() { 1358 let mut addr = VhostUserVringAddr::new( 1359 2, 1360 VhostUserVringAddrFlags::VHOST_VRING_F_LOG, 1361 0x1000, 1362 0x2000, 1363 0x3000, 1364 0x4000, 1365 ); 1366 1367 let a = addr.index; 1368 assert_eq!(a, 2); 1369 let a = addr.flags; 1370 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1371 let a = addr.descriptor; 1372 assert_eq!(a, 0x1000); 1373 let a = addr.used; 1374 assert_eq!(a, 0x2000); 1375 let a = addr.available; 1376 assert_eq!(a, 0x3000); 1377 let a = addr.log; 1378 assert_eq!(a, 0x4000); 1379 assert!(addr.is_valid()); 1380 1381 addr.descriptor = 0x1001; 1382 assert!(!addr.is_valid()); 1383 addr.descriptor = 0x1000; 1384 1385 addr.available = 0x3001; 1386 assert!(!addr.is_valid()); 1387 addr.available = 0x3000; 1388 1389 addr.used = 0x2001; 1390 assert!(!addr.is_valid()); 1391 addr.used = 0x2000; 1392 assert!(addr.is_valid()); 1393 } 1394 1395 #[test] test_vhost_user_state_from_config()1396 fn test_vhost_user_state_from_config() { 1397 let config = VringConfigData { 1398 queue_size: 128, 1399 flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(), 1400 desc_table_addr: 0x1000, 1401 used_ring_addr: 0x2000, 1402 avail_ring_addr: 0x3000, 1403 log_addr: Some(0x4000), 1404 }; 1405 let addr = VhostUserVringAddr::from_config_data(2, &config); 1406 1407 let a = addr.index; 1408 assert_eq!(a, 2); 1409 let a = addr.flags; 1410 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1411 let a = addr.descriptor; 1412 assert_eq!(a, 0x1000); 1413 let a = addr.used; 1414 assert_eq!(a, 0x2000); 1415 let a = addr.available; 1416 assert_eq!(a, 0x3000); 1417 let a = addr.log; 1418 assert_eq!(a, 0x4000); 1419 assert!(addr.is_valid()); 1420 } 1421 1422 #[test] check_user_vring_addr()1423 fn check_user_vring_addr() { 1424 let mut msg = 1425 VhostUserVringAddr::new(0, VhostUserVringAddrFlags::all(), 0x0, 0x0, 0x0, 0x0); 1426 assert!(msg.is_valid()); 1427 1428 msg.descriptor = 1; 1429 assert!(!msg.is_valid()); 1430 msg.descriptor = 0; 1431 1432 msg.available = 1; 1433 assert!(!msg.is_valid()); 1434 msg.available = 0; 1435 1436 msg.used = 1; 1437 assert!(!msg.is_valid()); 1438 msg.used = 0; 1439 1440 msg.flags |= 0x80000000; 1441 assert!(!msg.is_valid()); 1442 msg.flags &= !0x80000000; 1443 } 1444 1445 #[test] check_user_config_msg()1446 fn check_user_config_msg() { 1447 let mut msg = 1448 VhostUserConfig::new(0, VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE); 1449 1450 assert!(msg.is_valid()); 1451 msg.size = 0; 1452 assert!(!msg.is_valid()); 1453 msg.size = 1; 1454 assert!(msg.is_valid()); 1455 msg.offset = u32::MAX; 1456 assert!(!msg.is_valid()); 1457 msg.offset = VHOST_USER_CONFIG_SIZE; 1458 assert!(!msg.is_valid()); 1459 msg.offset = VHOST_USER_CONFIG_SIZE - 1; 1460 assert!(msg.is_valid()); 1461 msg.size = 2; 1462 assert!(!msg.is_valid()); 1463 msg.size = 1; 1464 msg.flags |= VhostUserConfigFlags::LIVE_MIGRATION.bits(); 1465 assert!(msg.is_valid()); 1466 msg.flags |= 0x4; 1467 assert!(!msg.is_valid()); 1468 } 1469 } 1470