1 // Copyright 2023 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 //! virtqueue interface 6 7 #![deny(missing_docs)] 8 9 use std::ops::Deref; 10 use std::ops::DerefMut; 11 12 pub mod packed_descriptor_chain; 13 mod packed_queue; 14 pub mod split_descriptor_chain; 15 mod split_queue; 16 17 use std::num::Wrapping; 18 19 use anyhow::bail; 20 use anyhow::Context; 21 use anyhow::Result; 22 use base::warn; 23 use base::Event; 24 use cros_async::AsyncError; 25 use cros_async::EventAsync; 26 use futures::channel::oneshot; 27 use futures::select_biased; 28 use futures::FutureExt; 29 use packed_queue::PackedQueue; 30 use serde::Deserialize; 31 use serde::Serialize; 32 use split_queue::SplitQueue; 33 use virtio_sys::virtio_config::VIRTIO_F_RING_PACKED; 34 use vm_memory::GuestAddress; 35 use vm_memory::GuestMemory; 36 37 use crate::virtio::DescriptorChain; 38 use crate::virtio::Interrupt; 39 use crate::virtio::VIRTIO_MSI_NO_VECTOR; 40 41 /// A virtio queue's parameters. 42 /// 43 /// `QueueConfig` can be converted into a running `Queue` by calling [`QueueConfig::activate()`]. 44 pub struct QueueConfig { 45 /// Whether this queue has already been activated. 46 activated: bool, 47 48 /// The maximal size in elements offered by the device 49 max_size: u16, 50 51 /// The queue size in elements the driver selected. This is always guaranteed to be a power of 52 /// two less than or equal to `max_size`, as required for split virtqueues. These invariants 53 /// are enforced by `set_size()`. 54 size: u16, 55 56 /// Indicates if the queue is finished with configuration 57 ready: bool, 58 59 /// MSI-X vector for the queue. Don't care for INTx 60 vector: u16, 61 62 /// Ring features (e.g. `VIRTIO_RING_F_EVENT_IDX`, `VIRTIO_F_RING_PACKED`) offered by the 63 /// device 64 features: u64, 65 66 // Device feature bits accepted by the driver 67 acked_features: u64, 68 69 /// Guest physical address of the descriptor table 70 desc_table: GuestAddress, 71 72 /// Guest physical address of the available ring (driver area) 73 /// 74 /// TODO(b/290657008): update field and accessor names to match the current virtio spec 75 avail_ring: GuestAddress, 76 77 /// Guest physical address of the used ring (device area) 78 used_ring: GuestAddress, 79 80 /// Initial available ring index when the queue is activated. 81 next_avail: Wrapping<u16>, 82 83 /// Initial used ring index when the queue is activated. 84 next_used: Wrapping<u16>, 85 } 86 87 #[derive(Serialize, Deserialize)] 88 struct QueueConfigSnapshot { 89 activated: bool, 90 max_size: u16, 91 size: u16, 92 ready: bool, 93 vector: u16, 94 features: u64, 95 acked_features: u64, 96 desc_table: GuestAddress, 97 avail_ring: GuestAddress, 98 used_ring: GuestAddress, 99 next_avail: Wrapping<u16>, 100 next_used: Wrapping<u16>, 101 } 102 103 impl QueueConfig { 104 /// Constructs a virtio queue configuration with the given `max_size`. new(max_size: u16, features: u64) -> Self105 pub fn new(max_size: u16, features: u64) -> Self { 106 assert!(max_size > 0); 107 assert!(max_size <= Queue::MAX_SIZE); 108 QueueConfig { 109 activated: false, 110 max_size, 111 size: max_size, 112 ready: false, 113 vector: VIRTIO_MSI_NO_VECTOR, 114 desc_table: GuestAddress(0), 115 avail_ring: GuestAddress(0), 116 used_ring: GuestAddress(0), 117 features, 118 acked_features: 0, 119 next_used: Wrapping(0), 120 next_avail: Wrapping(0), 121 } 122 } 123 124 /// Returns the maximum size of this queue. max_size(&self) -> u16125 pub fn max_size(&self) -> u16 { 126 self.max_size 127 } 128 129 /// Returns the currently configured size of the queue. size(&self) -> u16130 pub fn size(&self) -> u16 { 131 self.size 132 } 133 134 /// Sets the queue size. set_size(&mut self, val: u16)135 pub fn set_size(&mut self, val: u16) { 136 if self.ready { 137 warn!("ignoring write to size on ready queue"); 138 return; 139 } 140 141 if val > self.max_size { 142 warn!( 143 "requested queue size {} is larger than max_size {}", 144 val, self.max_size 145 ); 146 return; 147 } 148 149 self.size = val; 150 } 151 152 /// Returns the currently configured interrupt vector. vector(&self) -> u16153 pub fn vector(&self) -> u16 { 154 self.vector 155 } 156 157 /// Sets the interrupt vector for this queue. set_vector(&mut self, val: u16)158 pub fn set_vector(&mut self, val: u16) { 159 if self.ready { 160 warn!("ignoring write to vector on ready queue"); 161 return; 162 } 163 164 self.vector = val; 165 } 166 167 /// Getter for descriptor area desc_table(&self) -> GuestAddress168 pub fn desc_table(&self) -> GuestAddress { 169 self.desc_table 170 } 171 172 /// Setter for descriptor area set_desc_table(&mut self, val: GuestAddress)173 pub fn set_desc_table(&mut self, val: GuestAddress) { 174 if self.ready { 175 warn!("ignoring write to desc_table on ready queue"); 176 return; 177 } 178 179 self.desc_table = val; 180 } 181 182 /// Getter for driver area avail_ring(&self) -> GuestAddress183 pub fn avail_ring(&self) -> GuestAddress { 184 self.avail_ring 185 } 186 187 /// Setter for driver area set_avail_ring(&mut self, val: GuestAddress)188 pub fn set_avail_ring(&mut self, val: GuestAddress) { 189 if self.ready { 190 warn!("ignoring write to avail_ring on ready queue"); 191 return; 192 } 193 194 self.avail_ring = val; 195 } 196 197 /// Getter for device area used_ring(&self) -> GuestAddress198 pub fn used_ring(&self) -> GuestAddress { 199 self.used_ring 200 } 201 202 /// Setter for device area set_used_ring(&mut self, val: GuestAddress)203 pub fn set_used_ring(&mut self, val: GuestAddress) { 204 if self.ready { 205 warn!("ignoring write to used_ring on ready queue"); 206 return; 207 } 208 209 self.used_ring = val; 210 } 211 212 /// Getter for next_avail index next_avail(&self) -> Wrapping<u16>213 pub fn next_avail(&self) -> Wrapping<u16> { 214 self.next_avail 215 } 216 217 /// Setter for next_avail index set_next_avail(&mut self, val: Wrapping<u16>)218 pub fn set_next_avail(&mut self, val: Wrapping<u16>) { 219 if self.ready { 220 warn!("ignoring write to next_avail on ready queue"); 221 return; 222 } 223 224 self.next_avail = val; 225 } 226 227 /// Getter for next_used index next_used(&self) -> Wrapping<u16>228 pub fn next_used(&self) -> Wrapping<u16> { 229 self.next_used 230 } 231 232 /// Setter for next_used index set_next_used(&mut self, val: Wrapping<u16>)233 pub fn set_next_used(&mut self, val: Wrapping<u16>) { 234 if self.ready { 235 warn!("ignoring write to next_used on ready queue"); 236 return; 237 } 238 239 self.next_used = val; 240 } 241 242 /// Returns the features that have been acknowledged by the driver. acked_features(&self) -> u64243 pub fn acked_features(&self) -> u64 { 244 self.acked_features 245 } 246 247 /// Acknowledges that this set of features should be enabled on this queue. ack_features(&mut self, features: u64)248 pub fn ack_features(&mut self, features: u64) { 249 self.acked_features |= features & self.features; 250 } 251 252 /// Return whether the driver has enabled this queue. ready(&self) -> bool253 pub fn ready(&self) -> bool { 254 self.ready 255 } 256 257 /// Signal that the driver has completed queue configuration. set_ready(&mut self, enable: bool)258 pub fn set_ready(&mut self, enable: bool) { 259 self.ready = enable; 260 } 261 262 /// Convert the queue configuration into an active queue. activate( &mut self, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> Result<Queue>263 pub fn activate( 264 &mut self, 265 mem: &GuestMemory, 266 event: Event, 267 interrupt: Interrupt, 268 ) -> Result<Queue> { 269 if !self.ready { 270 bail!("attempted to activate a non-ready queue"); 271 } 272 273 if self.activated { 274 bail!("queue is already activated"); 275 } 276 // If VIRTIO_F_RING_PACKED feature bit is set, create a packed queue, otherwise create a 277 // split queue 278 let queue: Queue = if ((self.acked_features >> VIRTIO_F_RING_PACKED) & 1) != 0 { 279 let pq = PackedQueue::new(self, mem, event, interrupt) 280 .context("Failed to create a packed queue.")?; 281 Queue::PackedVirtQueue(pq) 282 } else { 283 let sq = SplitQueue::new(self, mem, event, interrupt) 284 .context("Failed to create a split queue.")?; 285 Queue::SplitVirtQueue(sq) 286 }; 287 288 self.activated = true; 289 Ok(queue) 290 } 291 292 /// Reset queue to a clean state reset(&mut self)293 pub fn reset(&mut self) { 294 self.activated = false; 295 self.ready = false; 296 self.size = self.max_size; 297 self.vector = VIRTIO_MSI_NO_VECTOR; 298 self.desc_table = GuestAddress(0); 299 self.avail_ring = GuestAddress(0); 300 self.used_ring = GuestAddress(0); 301 self.next_avail = Wrapping(0); 302 self.next_used = Wrapping(0); 303 self.acked_features = 0; 304 } 305 306 /// Take snapshot of queue configuration snapshot(&self) -> Result<serde_json::Value>307 pub fn snapshot(&self) -> Result<serde_json::Value> { 308 serde_json::to_value(QueueConfigSnapshot { 309 activated: self.activated, 310 max_size: self.max_size, 311 size: self.size, 312 ready: self.ready, 313 vector: self.vector, 314 features: self.features, 315 acked_features: self.acked_features, 316 desc_table: self.desc_table, 317 avail_ring: self.avail_ring, 318 used_ring: self.used_ring, 319 next_avail: self.next_avail, 320 next_used: self.next_used, 321 }) 322 .context("error serializing") 323 } 324 325 /// Restore queue configuration from snapshot restore(&mut self, data: serde_json::Value) -> Result<()>326 pub fn restore(&mut self, data: serde_json::Value) -> Result<()> { 327 let snap: QueueConfigSnapshot = 328 serde_json::from_value(data).context("error deserializing")?; 329 self.activated = snap.activated; 330 self.max_size = snap.max_size; 331 self.size = snap.size; 332 self.ready = snap.ready; 333 self.vector = snap.vector; 334 self.features = snap.features; 335 self.acked_features = snap.acked_features; 336 self.desc_table = snap.desc_table; 337 self.avail_ring = snap.avail_ring; 338 self.used_ring = snap.used_ring; 339 self.next_avail = snap.next_avail; 340 self.next_used = snap.next_used; 341 Ok(()) 342 } 343 } 344 345 /// Usage: define_queue_method!(method_name, return_type[, mut][, arg1: arg1_type, arg2: arg2_type, 346 /// ...]) 347 /// 348 /// - `method_name`: The name of the method to be defined (as an identifier). 349 /// - `return_type`: The return type of the method. 350 /// - `mut` (optional): Include this keyword if the method requires a mutable reference to `self` 351 /// (`&mut self`). 352 /// - `arg1: arg1_type, arg2: arg2_type, ...` (optional): Include method parameters as a 353 /// comma-separated list of `name: type` pairs, if the method takes any arguments. 354 macro_rules! define_queue_method { 355 ( 356 $(#[$doc:meta])* 357 $method:ident, $return_type:ty, $( $var:ident : $vartype:ty ),* 358 ) => { 359 $(#[$doc])* 360 pub fn $method(&self, $($var: $vartype),*) -> $return_type { 361 match self { 362 Queue::SplitVirtQueue(sq) => sq.$method($($var),*), 363 Queue::PackedVirtQueue(pq) => pq.$method($($var),*), 364 } 365 } 366 }; 367 ( 368 $(#[$doc:meta])* 369 $method:ident, $return_type:ty, mut, $( $var:ident : $vartype:ty ),* 370 ) => { 371 $(#[$doc])* 372 pub fn $method(&mut self, $($var: $vartype),*) -> $return_type { 373 match self { 374 Queue::SplitVirtQueue(sq) => sq.$method($($var),*), 375 Queue::PackedVirtQueue(pq) => pq.$method($($var),*), 376 } 377 } 378 }; 379 } 380 381 /// Virtqueue interface representing different types of virtqueues 382 /// The struct of each queue type is wrapped in the enum variants 383 #[derive(Debug)] 384 pub enum Queue { 385 /// Split virtqueue type in virtio v1.2 spec: <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-350007> 386 SplitVirtQueue(SplitQueue), 387 /// Packed virtqueue type in virtio v1.2 spec: <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-720008> 388 PackedVirtQueue(PackedQueue), 389 } 390 391 impl Queue { 392 /// Largest valid number of entries in a virtqueue. 393 pub const MAX_SIZE: u16 = 32768; 394 395 /// Asynchronously read the next descriptor chain from the queue. 396 /// Returns a `DescriptorChain` when it is `await`ed. next_async( &mut self, eventfd: &mut EventAsync, ) -> std::result::Result<DescriptorChain, AsyncError>397 pub async fn next_async( 398 &mut self, 399 eventfd: &mut EventAsync, 400 ) -> std::result::Result<DescriptorChain, AsyncError> { 401 loop { 402 // Check if there are more descriptors available. 403 if let Some(chain) = self.pop() { 404 return Ok(chain); 405 } 406 eventfd.next_val().await?; 407 } 408 } 409 410 /// Get the first available descriptor chain without removing it from the queue. 411 /// Call `pop()` on the returned [`PeekedDescriptorChain`] to remove it from the queue. peek(&mut self) -> Option<PeekedDescriptorChain>412 pub fn peek(&mut self) -> Option<PeekedDescriptorChain> { 413 let desc_chain = match self { 414 Queue::SplitVirtQueue(q) => q.peek(), 415 Queue::PackedVirtQueue(q) => q.peek(), 416 }?; 417 418 Some(PeekedDescriptorChain::new(self, desc_chain)) 419 } 420 421 /// If a new DescriptorChain is available, returns one and removes it from the queue. pop(&mut self) -> Option<DescriptorChain>422 pub fn pop(&mut self) -> Option<DescriptorChain> { 423 self.peek().map(PeekedDescriptorChain::pop) 424 } 425 426 /// Returns `None` if stop_rx receives a value; otherwise returns the result 427 /// of waiting for the next descriptor. next_async_interruptable( &mut self, queue_event: &mut EventAsync, mut stop_rx: &mut oneshot::Receiver<()>, ) -> std::result::Result<Option<DescriptorChain>, AsyncError>428 pub async fn next_async_interruptable( 429 &mut self, 430 queue_event: &mut EventAsync, 431 mut stop_rx: &mut oneshot::Receiver<()>, 432 ) -> std::result::Result<Option<DescriptorChain>, AsyncError> { 433 select_biased! { 434 avail_desc_res = self.next_async(queue_event).fuse() => { 435 Ok(Some(avail_desc_res?)) 436 } 437 _ = stop_rx => Ok(None), 438 } 439 } 440 441 /// inject interrupt into guest on this queue 442 /// return true: interrupt is injected into guest for this queue 443 /// false: interrupt isn't injected trigger_interrupt(&mut self) -> bool444 pub fn trigger_interrupt(&mut self) -> bool { 445 match self { 446 Queue::SplitVirtQueue(sq) => sq.trigger_interrupt(), 447 Queue::PackedVirtQueue(pq) => pq.trigger_interrupt(), 448 } 449 } 450 451 /// Restore queue from snapshot restore( queue_config: &QueueConfig, queue_value: serde_json::Value, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> anyhow::Result<Queue>452 pub fn restore( 453 queue_config: &QueueConfig, 454 queue_value: serde_json::Value, 455 mem: &GuestMemory, 456 event: Event, 457 interrupt: Interrupt, 458 ) -> anyhow::Result<Queue> { 459 if queue_config.acked_features & 1 << VIRTIO_F_RING_PACKED != 0 { 460 PackedQueue::restore(queue_value, mem, event, interrupt).map(Queue::PackedVirtQueue) 461 } else { 462 SplitQueue::restore(queue_value, mem, event, interrupt).map(Queue::SplitVirtQueue) 463 } 464 } 465 466 /// "Reclaim" a queue that was given to a vhost-user backend and is now being taken back using 467 /// VHOST_USER_GET_VRING_BASE. 468 /// 469 /// The `Queue` will have stale fields if the vhost-user backend fulfilled any virtqueue 470 /// requests. This function updates the `Queue` to pick up where the backend left off. vhost_user_reclaim(&mut self, vring_base: u16)471 pub fn vhost_user_reclaim(&mut self, vring_base: u16) { 472 match self { 473 Queue::SplitVirtQueue(q) => q.vhost_user_reclaim(vring_base), 474 Queue::PackedVirtQueue(q) => q.vhost_user_reclaim(vring_base), 475 } 476 } 477 478 /// Getter for the next index of the available ring that device will process. 479 /// 480 /// Not to be confused with the available ring's index field, which is the next index for the 481 /// driver to fill. next_avail_to_process(&self) -> u16482 pub fn next_avail_to_process(&self) -> u16 { 483 match self { 484 Queue::SplitVirtQueue(q) => q.next_avail_to_process(), 485 Queue::PackedVirtQueue(q) => q.next_avail_to_process(), 486 } 487 } 488 489 define_queue_method!( 490 /// Getter for vector field 491 vector, 492 u16, 493 ); 494 495 define_queue_method!( 496 /// Getter for descriptor area 497 desc_table, 498 GuestAddress, 499 ); 500 501 define_queue_method!( 502 /// Getter for driver area 503 avail_ring, 504 GuestAddress, 505 ); 506 507 define_queue_method!( 508 /// Getter for device area 509 used_ring, 510 GuestAddress, 511 ); 512 513 define_queue_method!( 514 /// Return the actual size of the queue, as the driver may not set up a 515 /// queue as big as the device allows. 516 size, 517 u16, 518 ); 519 520 define_queue_method!( 521 /// Get a reference to the queue's event. 522 event, 523 &Event, 524 ); 525 526 define_queue_method!( 527 /// Get a reference to the queue's interrupt. 528 interrupt, 529 &Interrupt, 530 ); 531 532 define_queue_method!( 533 /// Puts an available descriptor head into the used ring for use by the guest. 534 add_used, 535 (), 536 mut, 537 desc_chain: DescriptorChain, 538 len: u32 539 ); 540 541 define_queue_method!( 542 /// Take snapshot of queue's current status 543 snapshot, 544 Result<serde_json::Value>, 545 ); 546 } 547 548 /// A `DescriptorChain` that has been peeked from a `Queue` but not popped yet. 549 /// 550 /// Call [`pop()`](Self::pop) to pop this descriptor chain from the `Queue` and receive the 551 /// contained `DescriptorChain` object. 552 /// 553 /// This object holds a mutable reference to the `Queue` to ensure it is not possible to pop or peek 554 /// another descriptor while a peek is already active. Either `pop()` or drop this object before 555 /// attempting to manipulate the `Queue` again. 556 pub struct PeekedDescriptorChain<'q> { 557 queue: &'q mut Queue, 558 desc_chain: DescriptorChain, 559 } 560 561 impl<'q> PeekedDescriptorChain<'q> { 562 /// Create a `PeekedDescriptorChain` that holds a mutable reference to its `Queue`. 563 /// Use [`Queue::peek()`] rather than calling this function. new(queue: &'q mut Queue, desc_chain: DescriptorChain) -> Self564 fn new(queue: &'q mut Queue, desc_chain: DescriptorChain) -> Self { 565 PeekedDescriptorChain { queue, desc_chain } 566 } 567 568 /// Pop this descriptor chain from the queue. pop(self) -> DescriptorChain569 pub fn pop(self) -> DescriptorChain { 570 match self.queue { 571 Queue::SplitVirtQueue(q) => q.pop_peeked(&self.desc_chain), 572 Queue::PackedVirtQueue(q) => q.pop_peeked(&self.desc_chain), 573 } 574 self.desc_chain 575 } 576 } 577 578 impl Deref for PeekedDescriptorChain<'_> { 579 type Target = DescriptorChain; 580 deref(&self) -> &Self::Target581 fn deref(&self) -> &Self::Target { 582 &self.desc_chain 583 } 584 } 585 586 impl DerefMut for PeekedDescriptorChain<'_> { deref_mut(&mut self) -> &mut Self::Target587 fn deref_mut(&mut self) -> &mut Self::Target { 588 &mut self.desc_chain 589 } 590 } 591