1 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE-BSD-3-Clause file. 4 // 5 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 // 7 // Copyright © 2019 Intel Corporation 8 // 9 // Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved. 10 // 11 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 12 13 use std::fmt::{self, Debug}; 14 use std::mem::size_of; 15 use std::ops::Deref; 16 17 use vm_memory::{Address, Bytes, GuestAddress, GuestMemory}; 18 19 use crate::{Descriptor, Error}; 20 use virtio_bindings::bindings::virtio_ring::VRING_DESC_ALIGN_SIZE; 21 22 /// A virtio descriptor chain. 23 #[derive(Clone, Debug)] 24 pub struct DescriptorChain<M> { 25 mem: M, 26 desc_table: GuestAddress, 27 queue_size: u16, 28 head_index: u16, 29 next_index: u16, 30 ttl: u16, 31 yielded_bytes: u32, 32 is_indirect: bool, 33 } 34 35 impl<M> DescriptorChain<M> 36 where 37 M: Deref, 38 M::Target: GuestMemory, 39 { with_ttl( mem: M, desc_table: GuestAddress, queue_size: u16, ttl: u16, head_index: u16, ) -> Self40 fn with_ttl( 41 mem: M, 42 desc_table: GuestAddress, 43 queue_size: u16, 44 ttl: u16, 45 head_index: u16, 46 ) -> Self { 47 DescriptorChain { 48 mem, 49 desc_table, 50 queue_size, 51 head_index, 52 next_index: head_index, 53 ttl, 54 is_indirect: false, 55 yielded_bytes: 0, 56 } 57 } 58 59 /// Create a new `DescriptorChain` instance. 60 /// 61 /// # Arguments 62 /// * `mem` - the `GuestMemory` object that can be used to access the buffers pointed to by the 63 /// descriptor chain. 64 /// * `desc_table` - the address of the descriptor table. 65 /// * `queue_size` - the size of the queue, which is also the maximum size of a descriptor 66 /// chain. 67 /// * `head_index` - the descriptor index of the chain head. new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self68 pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { 69 Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index) 70 } 71 72 /// Get the descriptor index of the chain head. head_index(&self) -> u1673 pub fn head_index(&self) -> u16 { 74 self.head_index 75 } 76 77 /// Return a `GuestMemory` object that can be used to access the buffers pointed to by the 78 /// descriptor chain. memory(&self) -> &M::Target79 pub fn memory(&self) -> &M::Target { 80 self.mem.deref() 81 } 82 83 /// Return an iterator that only yields the readable descriptors in the chain. readable(self) -> DescriptorChainRwIter<M>84 pub fn readable(self) -> DescriptorChainRwIter<M> { 85 DescriptorChainRwIter { 86 chain: self, 87 writable: false, 88 } 89 } 90 91 /// Return an iterator that only yields the writable descriptors in the chain. writable(self) -> DescriptorChainRwIter<M>92 pub fn writable(self) -> DescriptorChainRwIter<M> { 93 DescriptorChainRwIter { 94 chain: self, 95 writable: true, 96 } 97 } 98 99 // Alters the internal state of the `DescriptorChain` to switch iterating over an 100 // indirect descriptor table defined by `desc`. switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error>101 fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> { 102 // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside 103 // an indirect descriptor. 104 // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors) 105 if self.is_indirect { 106 return Err(Error::InvalidIndirectDescriptor); 107 } 108 109 // Alignment requirements for vring elements start from virtio 1.0, 110 // but this is not necessary for address of indirect descriptor. 111 if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 { 112 return Err(Error::InvalidIndirectDescriptorTable); 113 } 114 115 // It is safe to do a plain division since we checked above that desc.len() is a multiple of 116 // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0. 117 let table_len = desc.len() / VRING_DESC_ALIGN_SIZE; 118 if table_len > u32::from(u16::MAX) { 119 return Err(Error::InvalidIndirectDescriptorTable); 120 } 121 122 self.desc_table = desc.addr(); 123 // try_from cannot fail as we've checked table_len above 124 self.queue_size = u16::try_from(table_len).expect("invalid table_len"); 125 self.next_index = 0; 126 self.ttl = self.queue_size; 127 self.is_indirect = true; 128 129 Ok(()) 130 } 131 } 132 133 impl<M> Iterator for DescriptorChain<M> 134 where 135 M: Deref, 136 M::Target: GuestMemory, 137 { 138 type Item = Descriptor; 139 140 /// Return the next descriptor in this descriptor chain, if there is one. 141 /// 142 /// Note that this is distinct from the next descriptor chain returned by 143 /// [`AvailIter`](struct.AvailIter.html), which is the head of the next 144 /// _available_ descriptor chain. next(&mut self) -> Option<Self::Item>145 fn next(&mut self) -> Option<Self::Item> { 146 if self.ttl == 0 || self.next_index >= self.queue_size { 147 return None; 148 } 149 150 let desc_addr = self 151 .desc_table 152 // The multiplication can not overflow an u64 since we are multiplying an u16 with a 153 // small number. 154 .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?; 155 156 // The guest device driver should not touch the descriptor once submitted, so it's safe 157 // to use read_obj() here. 158 let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?; 159 160 if desc.refers_to_indirect_table() { 161 self.switch_to_indirect_table(desc).ok()?; 162 return self.next(); 163 } 164 165 // constructing a chain that is longer than 2^32 bytes is illegal, 166 // let's terminate the iteration if something violated this. 167 // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain 168 // longer than 2^32 bytes in total;") 169 match self.yielded_bytes.checked_add(desc.len()) { 170 Some(yielded_bytes) => self.yielded_bytes = yielded_bytes, 171 None => return None, 172 }; 173 174 if desc.has_next() { 175 self.next_index = desc.next(); 176 // It's ok to decrement `self.ttl` here because we check at the start of the method 177 // that it's greater than 0. 178 self.ttl -= 1; 179 } else { 180 self.ttl = 0; 181 } 182 183 Some(desc) 184 } 185 } 186 187 /// An iterator for readable or writable descriptors. 188 #[derive(Clone)] 189 pub struct DescriptorChainRwIter<M> { 190 chain: DescriptorChain<M>, 191 writable: bool, 192 } 193 194 impl<M> Iterator for DescriptorChainRwIter<M> 195 where 196 M: Deref, 197 M::Target: GuestMemory, 198 { 199 type Item = Descriptor; 200 201 /// Return the next readable/writeable descriptor (depending on the `writable` value) in this 202 /// descriptor chain, if there is one. 203 /// 204 /// Note that this is distinct from the next descriptor chain returned by 205 /// [`AvailIter`](struct.AvailIter.html), which is the head of the next 206 /// _available_ descriptor chain. next(&mut self) -> Option<Self::Item>207 fn next(&mut self) -> Option<Self::Item> { 208 loop { 209 match self.chain.next() { 210 Some(v) => { 211 if v.is_write_only() == self.writable { 212 return Some(v); 213 } 214 } 215 None => return None, 216 } 217 } 218 } 219 } 220 221 // We can't derive Debug, because rustc doesn't generate the `M::T: Debug` constraint 222 impl<M> Debug for DescriptorChainRwIter<M> 223 where 224 M: Debug, 225 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result226 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 227 f.debug_struct("DescriptorChainRwIter") 228 .field("chain", &self.chain) 229 .field("writable", &self.writable) 230 .finish() 231 } 232 } 233 234 #[cfg(test)] 235 mod tests { 236 use super::*; 237 use crate::mock::{DescriptorTable, MockSplitQueue}; 238 use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT}; 239 use vm_memory::GuestMemoryMmap; 240 241 #[test] test_checked_new_descriptor_chain()242 fn test_checked_new_descriptor_chain() { 243 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 244 let vq = MockSplitQueue::new(m, 16); 245 246 assert!(vq.end().0 < 0x1000); 247 248 // index >= queue_size 249 assert!( 250 DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16) 251 .next() 252 .is_none() 253 ); 254 255 // desc_table address is way off 256 assert!( 257 DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0) 258 .next() 259 .is_none() 260 ); 261 262 { 263 // the first desc has a normal len, and the next_descriptor flag is set 264 // but the the index of the next descriptor is too large 265 let desc = Descriptor::new(0x1000, 0x1000, VRING_DESC_F_NEXT as u16, 16); 266 vq.desc_table().store(0, desc).unwrap(); 267 268 let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0); 269 c.next().unwrap(); 270 assert!(c.next().is_none()); 271 } 272 273 // finally, let's test an ok chain 274 { 275 let desc = Descriptor::new(0x1000, 0x1000, VRING_DESC_F_NEXT as u16, 1); 276 vq.desc_table().store(0, desc).unwrap(); 277 278 let desc = Descriptor::new(0x2000, 0x1000, 0, 0); 279 vq.desc_table().store(1, desc).unwrap(); 280 281 let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0); 282 283 assert_eq!( 284 c.memory() as *const GuestMemoryMmap, 285 m as *const GuestMemoryMmap 286 ); 287 288 assert_eq!(c.desc_table, vq.start()); 289 assert_eq!(c.queue_size, 16); 290 assert_eq!(c.ttl, c.queue_size); 291 292 let desc = c.next().unwrap(); 293 assert_eq!(desc.addr(), GuestAddress(0x1000)); 294 assert_eq!(desc.len(), 0x1000); 295 assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16); 296 assert_eq!(desc.next(), 1); 297 assert_eq!(c.ttl, c.queue_size - 1); 298 299 assert!(c.next().is_some()); 300 // The descriptor above was the last from the chain, so `ttl` should be 0 now. 301 assert_eq!(c.ttl, 0); 302 assert!(c.next().is_none()); 303 assert_eq!(c.ttl, 0); 304 } 305 } 306 307 #[test] test_ttl_wrap_around()308 fn test_ttl_wrap_around() { 309 const QUEUE_SIZE: u16 = 16; 310 311 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x100000)]).unwrap(); 312 let vq = MockSplitQueue::new(m, QUEUE_SIZE); 313 314 // Populate the entire descriptor table with entries. Only the last one should not have the 315 // VIRTQ_DESC_F_NEXT set. 316 for i in 0..QUEUE_SIZE - 1 { 317 let desc = Descriptor::new( 318 0x1000 * (i + 1) as u64, 319 0x1000, 320 VRING_DESC_F_NEXT as u16, 321 i + 1, 322 ); 323 vq.desc_table().store(i, desc).unwrap(); 324 } 325 let desc = Descriptor::new((0x1000 * 16) as u64, 0x1000, 0, 0); 326 vq.desc_table().store(QUEUE_SIZE - 1, desc).unwrap(); 327 328 let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0); 329 assert_eq!(c.ttl, c.queue_size); 330 331 // Validate that `ttl` wraps around even when the entire descriptor table is populated. 332 for i in 0..QUEUE_SIZE { 333 let _desc = c.next().unwrap(); 334 assert_eq!(c.ttl, c.queue_size - i - 1); 335 } 336 assert!(c.next().is_none()); 337 } 338 339 #[test] test_new_from_indirect_descriptor()340 fn test_new_from_indirect_descriptor() { 341 // This is testing that chaining an indirect table works as expected. It is also a negative 342 // test for the following requirement from the spec: 343 // `A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags.`. In 344 // case the driver is setting both of these flags, we check that the device doesn't panic. 345 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 346 let vq = MockSplitQueue::new(m, 16); 347 let dtable = vq.desc_table(); 348 349 // Create a chain with one normal descriptor and one pointing to an indirect table. 350 let desc = Descriptor::new(0x6000, 0x1000, VRING_DESC_F_NEXT as u16, 1); 351 dtable.store(0, desc).unwrap(); 352 // The spec forbids setting both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags. We do 353 // not currently enforce this rule, we just ignore the VIRTQ_DESC_F_NEXT flag. 354 let desc = Descriptor::new( 355 0x7000, 356 0x1000, 357 (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16, 358 2, 359 ); 360 dtable.store(1, desc).unwrap(); 361 let desc = Descriptor::new(0x8000, 0x1000, 0, 0); 362 dtable.store(2, desc).unwrap(); 363 364 let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0); 365 366 // create an indirect table with 4 chained descriptors 367 let idtable = DescriptorTable::new(m, GuestAddress(0x7000), 4); 368 for i in 0..4u16 { 369 let desc: Descriptor = if i < 3 { 370 Descriptor::new(0x1000 * i as u64, 0x1000, VRING_DESC_F_NEXT as u16, i + 1) 371 } else { 372 Descriptor::new(0x1000 * i as u64, 0x1000, 0, 0) 373 }; 374 idtable.store(i, desc).unwrap(); 375 } 376 377 assert_eq!(c.head_index(), 0); 378 // Consume the first descriptor. 379 c.next().unwrap(); 380 381 // The chain logic hasn't parsed the indirect descriptor yet. 382 assert!(!c.is_indirect); 383 384 // Try to iterate through the indirect descriptor chain. 385 for i in 0..4 { 386 let desc = c.next().unwrap(); 387 assert!(c.is_indirect); 388 if i < 3 { 389 assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16); 390 assert_eq!(desc.next(), i + 1); 391 } 392 } 393 // Even though we added a new descriptor after the one that is pointing to the indirect 394 // table, this descriptor won't be available when parsing the chain. 395 assert!(c.next().is_none()); 396 } 397 398 #[test] test_indirect_descriptor_address_noaligned()399 fn test_indirect_descriptor_address_noaligned() { 400 // Alignment requirements for vring elements start from virtio 1.0, 401 // but this is not necessary for address of indirect descriptor. 402 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 403 let vq = MockSplitQueue::new(m, 16); 404 let dtable = vq.desc_table(); 405 406 // Create a chain with a descriptor pointing to an indirect table with unaligned address. 407 let desc = Descriptor::new( 408 0x7001, 409 0x1000, 410 (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16, 411 2, 412 ); 413 dtable.store(0, desc).unwrap(); 414 415 let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0); 416 417 // Create an indirect table with 4 chained descriptors. 418 let idtable = DescriptorTable::new(m, GuestAddress(0x7001), 4); 419 for i in 0..4u16 { 420 let desc: Descriptor = if i < 3 { 421 Descriptor::new(0x1000 * i as u64, 0x1000, VRING_DESC_F_NEXT as u16, i + 1) 422 } else { 423 Descriptor::new(0x1000 * i as u64, 0x1000, 0, 0) 424 }; 425 idtable.store(i, desc).unwrap(); 426 } 427 428 // Try to iterate through the indirect descriptor chain. 429 for i in 0..4 { 430 let desc = c.next().unwrap(); 431 assert!(c.is_indirect); 432 if i < 3 { 433 assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16); 434 assert_eq!(desc.next(), i + 1); 435 } 436 } 437 } 438 439 #[test] test_indirect_descriptor_err()440 fn test_indirect_descriptor_err() { 441 // We are testing here different misconfigurations of the indirect table. For these error 442 // case scenarios, the iterator over the descriptor chain won't return a new descriptor. 443 { 444 let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 445 let vq = MockSplitQueue::new(m, 16); 446 447 // Create a chain with a descriptor pointing to an invalid indirect table: len not a 448 // multiple of descriptor size. 449 let desc = Descriptor::new(0x1000, 0x1001, VRING_DESC_F_INDIRECT as u16, 0); 450 vq.desc_table().store(0, desc).unwrap(); 451 452 let mut c: DescriptorChain<&GuestMemoryMmap> = 453 DescriptorChain::new(m, vq.start(), 16, 0); 454 455 assert!(c.next().is_none()); 456 } 457 458 { 459 let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 460 let vq = MockSplitQueue::new(m, 16); 461 462 // Create a chain with a descriptor pointing to an invalid indirect table: table len > 463 // u16::MAX. 464 let desc = Descriptor::new( 465 0x1000, 466 (u16::MAX as u32 + 1) * VRING_DESC_ALIGN_SIZE, 467 VRING_DESC_F_INDIRECT as u16, 468 0, 469 ); 470 vq.desc_table().store(0, desc).unwrap(); 471 472 let mut c: DescriptorChain<&GuestMemoryMmap> = 473 DescriptorChain::new(m, vq.start(), 16, 0); 474 475 assert!(c.next().is_none()); 476 } 477 478 { 479 let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 480 let vq = MockSplitQueue::new(m, 16); 481 482 // Create a chain with a descriptor pointing to an indirect table. 483 let desc = Descriptor::new(0x1000, 0x1000, VRING_DESC_F_INDIRECT as u16, 0); 484 vq.desc_table().store(0, desc).unwrap(); 485 // It's ok for an indirect descriptor to have flags = 0. 486 let desc = Descriptor::new(0x3000, 0x1000, 0, 0); 487 m.write_obj(desc, GuestAddress(0x1000)).unwrap(); 488 489 let mut c: DescriptorChain<&GuestMemoryMmap> = 490 DescriptorChain::new(m, vq.start(), 16, 0); 491 assert!(c.next().is_some()); 492 493 // But it's not allowed to have an indirect descriptor that points to another indirect 494 // table. 495 let desc = Descriptor::new(0x3000, 0x1000, VRING_DESC_F_INDIRECT as u16, 0); 496 m.write_obj(desc, GuestAddress(0x1000)).unwrap(); 497 498 let mut c: DescriptorChain<&GuestMemoryMmap> = 499 DescriptorChain::new(m, vq.start(), 16, 0); 500 501 assert!(c.next().is_none()); 502 } 503 } 504 } 505