1 // Copyright 2018 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::cmp; 6 use std::collections::BTreeSet; 7 use std::collections::HashMap; 8 use std::ops::Bound; 9 10 use crate::AddressRange; 11 use crate::Alloc; 12 use crate::Error; 13 use crate::Result; 14 15 /// Manages allocating address ranges. 16 /// Use `AddressAllocator` whenever an address range needs to be allocated to different users. 17 /// Allocations must be uniquely tagged with an Alloc enum, which can be used for lookup. 18 /// An human-readable tag String must also be provided for debugging / reference. 19 #[derive(Debug, Eq, PartialEq)] 20 pub struct AddressAllocator { 21 /// The list of pools from which address are allocated. The union 22 /// of all regions from |allocs| and |regions| equals the pools. 23 pools: Vec<AddressRange>, 24 min_align: u64, 25 preferred_align: u64, 26 /// The region that is allocated. 27 allocs: HashMap<Alloc, (AddressRange, String)>, 28 /// The region that is not allocated yet. 29 regions: BTreeSet<AddressRange>, 30 } 31 32 impl AddressAllocator { 33 /// Creates a new `AddressAllocator` for managing a range of addresses. 34 /// Can return an error if `pool` is empty or if alignment isn't a power of two. 35 /// 36 /// * `pool` - The address range to allocate from. 37 /// * `min_align` - The minimum size of an address region to align to, defaults to four. 38 /// * `preferred_align` - The preferred alignment of an address region, used if possible. 39 /// 40 /// If an allocation cannot be satisfied with the preferred alignment, the minimum alignment 41 /// will be used instead. new( pool: AddressRange, min_align: Option<u64>, preferred_align: Option<u64>, ) -> Result<Self>42 pub fn new( 43 pool: AddressRange, 44 min_align: Option<u64>, 45 preferred_align: Option<u64>, 46 ) -> Result<Self> { 47 Self::new_from_list(vec![pool], min_align, preferred_align) 48 } 49 50 /// Creates a new `AddressAllocator` for managing a range of addresses. 51 /// Can return `None` if all pools are empty alignment isn't a power of two. 52 /// 53 /// * `pools` - The list of pools to initialize the allocator with. 54 /// * `min_align` - The minimum size of an address region to align to, defaults to four. 55 /// * `preferred_align` - The preferred alignment of an address region, used if possible. 56 /// 57 /// If an allocation cannot be satisfied with the preferred alignment, the minimum alignment 58 /// will be used instead. new_from_list<T>( pools: T, min_align: Option<u64>, preferred_align: Option<u64>, ) -> Result<Self> where T: IntoIterator<Item = AddressRange>,59 pub fn new_from_list<T>( 60 pools: T, 61 min_align: Option<u64>, 62 preferred_align: Option<u64>, 63 ) -> Result<Self> 64 where 65 T: IntoIterator<Item = AddressRange>, 66 { 67 let pools: Vec<AddressRange> = pools.into_iter().filter(|p| !p.is_empty()).collect(); 68 69 let min_align = min_align.unwrap_or(4); 70 if !min_align.is_power_of_two() || min_align == 0 { 71 return Err(Error::BadAlignment); 72 } 73 74 let preferred_align = preferred_align.unwrap_or(min_align); 75 if !preferred_align.is_power_of_two() || preferred_align < min_align { 76 return Err(Error::BadAlignment); 77 } 78 79 let mut regions = BTreeSet::new(); 80 for r in pools.iter() { 81 regions.insert(*r); 82 } 83 Ok(AddressAllocator { 84 pools, 85 min_align, 86 preferred_align, 87 allocs: HashMap::new(), 88 regions, 89 }) 90 } 91 92 /// Gets the regions managed by the allocator. 93 /// 94 /// This returns the original `pools` value provided to `AddressAllocator::new()`. pools(&self) -> &[AddressRange]95 pub fn pools(&self) -> &[AddressRange] { 96 &self.pools 97 } 98 internal_allocate_from_slot( &mut self, slot: AddressRange, range: AddressRange, alloc: Alloc, tag: String, ) -> Result<u64>99 fn internal_allocate_from_slot( 100 &mut self, 101 slot: AddressRange, 102 range: AddressRange, 103 alloc: Alloc, 104 tag: String, 105 ) -> Result<u64> { 106 let slot_was_present = self.regions.remove(&slot); 107 assert!(slot_was_present); 108 109 let (before, after) = slot.non_overlapping_ranges(range); 110 111 if !before.is_empty() { 112 self.regions.insert(before); 113 } 114 if !after.is_empty() { 115 self.regions.insert(after); 116 } 117 118 self.allocs.insert(alloc, (range, tag)); 119 Ok(range.start) 120 } 121 internal_allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, reverse: bool, ) -> Result<u64>122 fn internal_allocate_with_align( 123 &mut self, 124 size: u64, 125 alloc: Alloc, 126 tag: String, 127 alignment: u64, 128 reverse: bool, 129 ) -> Result<u64> { 130 let alignment = cmp::max(self.min_align, alignment); 131 132 if self.allocs.contains_key(&alloc) { 133 return Err(Error::ExistingAlloc(alloc)); 134 } 135 if size == 0 { 136 return Err(Error::AllocSizeZero); 137 } 138 if !alignment.is_power_of_two() { 139 return Err(Error::BadAlignment); 140 } 141 142 let region = if !reverse { 143 // finds first region matching alignment and size. 144 self.regions 145 .iter() 146 .find(|range| { 147 match range.start % alignment { 148 0 => range.start.checked_add(size - 1), 149 r => range.start.checked_add(size - 1 + alignment - r), 150 } 151 .map_or(false, |end| end <= range.end) 152 }) 153 .cloned() 154 } else { 155 // finds last region matching alignment and size. 156 self.regions 157 .iter() 158 .rev() 159 .find(|range| { 160 range 161 .end 162 .checked_sub(size - 1) 163 .map_or(false, |start| start & !(alignment - 1) >= range.start) 164 }) 165 .cloned() 166 }; 167 168 match region { 169 Some(slot) => { 170 let start = if !reverse { 171 match slot.start % alignment { 172 0 => slot.start, 173 r => slot.start + alignment - r, 174 } 175 } else { 176 (slot.end - (size - 1)) & !(alignment - 1) 177 }; 178 let end = start + size - 1; 179 let range = AddressRange { start, end }; 180 181 self.internal_allocate_from_slot(slot, range, alloc, tag) 182 } 183 None => Err(Error::OutOfSpace), 184 } 185 } 186 187 /// Allocates a range of addresses from the reverse managed region with an optional tag 188 /// and minimal alignment. Returns allocated_address. (allocated_address, size, tag) 189 /// can be retrieved through the `get` method. reverse_allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, ) -> Result<u64>190 pub fn reverse_allocate_with_align( 191 &mut self, 192 size: u64, 193 alloc: Alloc, 194 tag: String, 195 alignment: u64, 196 ) -> Result<u64> { 197 self.internal_allocate_with_align(size, alloc, tag, alignment, true) 198 } 199 200 /// Allocates a range of addresses, preferring to allocate from high rather than low addresses. reverse_allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64>201 pub fn reverse_allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> { 202 if let Ok(pref_alloc) = 203 self.reverse_allocate_with_align(size, alloc, tag.clone(), self.preferred_align) 204 { 205 return Ok(pref_alloc); 206 } 207 self.reverse_allocate_with_align(size, alloc, tag, self.min_align) 208 } 209 210 /// Allocates a range of addresses from the managed region with an optional tag 211 /// and minimal alignment. Returns allocated_address. (allocated_address, size, tag) 212 /// can be retrieved through the `get` method. allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, ) -> Result<u64>213 pub fn allocate_with_align( 214 &mut self, 215 size: u64, 216 alloc: Alloc, 217 tag: String, 218 alignment: u64, 219 ) -> Result<u64> { 220 self.internal_allocate_with_align(size, alloc, tag, alignment, false) 221 } 222 allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64>223 pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> { 224 if let Ok(pref_alloc) = 225 self.allocate_with_align(size, alloc, tag.clone(), self.preferred_align) 226 { 227 return Ok(pref_alloc); 228 } 229 self.allocate_with_align(size, alloc, tag, self.min_align) 230 } 231 232 /// Allocates a range of addresses from the managed region with an optional tag 233 /// and required location. Allocation alignment is not enforced. 234 /// Returns OutOfSpace if requested range is not available or ExistingAlloc if the requested 235 /// range overlaps an existing allocation. allocate_at(&mut self, range: AddressRange, alloc: Alloc, tag: String) -> Result<()>236 pub fn allocate_at(&mut self, range: AddressRange, alloc: Alloc, tag: String) -> Result<()> { 237 if self.allocs.contains_key(&alloc) { 238 return Err(Error::ExistingAlloc(alloc)); 239 } 240 241 if range.is_empty() { 242 return Err(Error::AllocSizeZero); 243 } 244 245 match self 246 .regions 247 .iter() 248 .find(|avail_range| avail_range.contains_range(range)) 249 { 250 Some(&slot) => { 251 let _address = self.internal_allocate_from_slot(slot, range, alloc, tag)?; 252 Ok(()) 253 } 254 None => { 255 if let Some(existing_alloc) = self.find_overlapping(range) { 256 Err(Error::ExistingAlloc(existing_alloc)) 257 } else { 258 Err(Error::OutOfSpace) 259 } 260 } 261 } 262 } 263 264 /// Releases exising allocation back to free pool and returns the range that was released. release(&mut self, alloc: Alloc) -> Result<AddressRange>265 pub fn release(&mut self, alloc: Alloc) -> Result<AddressRange> { 266 if let Some((range, _tag)) = self.allocs.remove(&alloc) { 267 self.insert_at(range)?; 268 Ok(range) 269 } else { 270 Err(Error::BadAlloc(alloc)) 271 } 272 } 273 274 /// Release a allocation contains the value. release_containing(&mut self, value: u64) -> Result<AddressRange>275 pub fn release_containing(&mut self, value: u64) -> Result<AddressRange> { 276 if let Some(alloc) = self.find_overlapping(AddressRange { 277 start: value, 278 end: value, 279 }) { 280 self.release(alloc) 281 } else { 282 Err(Error::OutOfSpace) 283 } 284 } 285 286 // Find an existing allocation that overlaps the region defined by `range`. If more 287 // than one allocation overlaps the given region, any of them may be returned, since the HashMap 288 // iterator is not ordered in any particular way. find_overlapping(&self, range: AddressRange) -> Option<Alloc>289 fn find_overlapping(&self, range: AddressRange) -> Option<Alloc> { 290 if range.is_empty() { 291 return None; 292 } 293 294 self.allocs 295 .iter() 296 .find(|(_, &(alloc_range, _))| alloc_range.overlaps(range)) 297 .map(|(&alloc, _)| alloc) 298 } 299 300 // Return the max address of the allocated address ranges. get_max_addr(&self) -> u64301 pub fn get_max_addr(&self) -> u64 { 302 self.regions.iter().fold(0, |x, range| x.max(range.end)) 303 } 304 305 /// Returns allocation associated with `alloc`, or None if no such allocation exists. get(&self, alloc: &Alloc) -> Option<&(AddressRange, String)>306 pub fn get(&self, alloc: &Alloc) -> Option<&(AddressRange, String)> { 307 self.allocs.get(alloc) 308 } 309 310 /// Insert range of addresses into the pool, coalescing neighboring regions. insert_at(&mut self, mut slot: AddressRange) -> Result<()>311 fn insert_at(&mut self, mut slot: AddressRange) -> Result<()> { 312 if slot.is_empty() { 313 return Err(Error::AllocSizeZero); 314 } 315 316 // Find the region with the highest starting address that is at most 317 // |slot.start|. Check if it overlaps with |slot|, or if it is adjacent to 318 // (and thus can be coalesced with) |slot|. 319 let mut smaller_merge = None; 320 if let Some(smaller) = self 321 .regions 322 .range((Bound::Unbounded, Bound::Included(slot))) 323 .max() 324 { 325 // If there is overflow, then |smaller| covers up through u64::MAX 326 let next_addr = smaller 327 .end 328 .checked_add(1) 329 .ok_or(Error::RegionOverlap(slot))?; 330 match next_addr.cmp(&slot.start) { 331 cmp::Ordering::Less => (), 332 cmp::Ordering::Equal => smaller_merge = Some(*smaller), 333 cmp::Ordering::Greater => return Err(Error::RegionOverlap(slot)), 334 } 335 } 336 337 // Find the region with the smallest starting address that is greater than 338 // |slot.start|. Check if it overlaps with |slot|, or if it is adjacent to 339 // (and thus can be coalesced with) |slot|. 340 let mut larger_merge = None; 341 if let Some(larger) = self 342 .regions 343 .range((Bound::Excluded(slot), Bound::Unbounded)) 344 .min() 345 { 346 // If there is underflow, then |larger| covers down through 0 347 let prev_addr = larger 348 .start 349 .checked_sub(1) 350 .ok_or(Error::RegionOverlap(slot))?; 351 match slot.end.cmp(&prev_addr) { 352 cmp::Ordering::Less => (), 353 cmp::Ordering::Equal => larger_merge = Some(*larger), 354 cmp::Ordering::Greater => return Err(Error::RegionOverlap(slot)), 355 } 356 } 357 358 if let Some(smaller) = smaller_merge { 359 self.regions.remove(&smaller); 360 slot.start = smaller.start; 361 } 362 if let Some(larger) = larger_merge { 363 self.regions.remove(&larger); 364 slot.end = larger.end; 365 } 366 self.regions.insert(slot); 367 368 Ok(()) 369 } 370 371 /// Returns an address from associated PCI `alloc` given an allocation offset and size. address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64>372 pub fn address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64> { 373 match alloc { 374 Alloc::PciBar { .. } => (), 375 _ => return Err(Error::InvalidAlloc(alloc)), 376 }; 377 378 match self.allocs.get(&alloc) { 379 Some((pci_bar_range, _)) => { 380 let address = pci_bar_range 381 .start 382 .checked_add(offset) 383 .ok_or(Error::OutOfBounds)?; 384 let offset_range = 385 AddressRange::from_start_and_size(address, size).ok_or(Error::OutOfBounds)?; 386 if pci_bar_range.contains_range(offset_range) { 387 Ok(address) 388 } else { 389 Err(Error::OutOfBounds) 390 } 391 } 392 None => Err(Error::InvalidAlloc(alloc)), 393 } 394 } 395 } 396 397 /// Contains a set of `AddressAllocator`s for allocating address ranges. 398 /// When attempting an allocation, each allocator will be tried in order until 399 /// the allocation is successful. 400 /// See `AddressAllocator` for function documentation. 401 pub struct AddressAllocatorSet<'a> { 402 allocators: &'a mut [AddressAllocator], 403 } 404 405 impl<'a> AddressAllocatorSet<'a> { new(allocators: &'a mut [AddressAllocator]) -> Self406 pub fn new(allocators: &'a mut [AddressAllocator]) -> Self { 407 AddressAllocatorSet { allocators } 408 } 409 allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, ) -> Result<u64>410 pub fn allocate_with_align( 411 &mut self, 412 size: u64, 413 alloc: Alloc, 414 tag: String, 415 alignment: u64, 416 ) -> Result<u64> { 417 let mut last_res = Err(Error::OutOfSpace); 418 for allocator in self.allocators.iter_mut() { 419 last_res = allocator.allocate_with_align(size, alloc, tag.clone(), alignment); 420 if last_res.is_ok() { 421 return last_res; 422 } 423 } 424 last_res 425 } 426 allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64>427 pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> { 428 let mut last_res = Err(Error::OutOfSpace); 429 for allocator in self.allocators.iter_mut() { 430 last_res = allocator.allocate(size, alloc, tag.clone()); 431 if last_res.is_ok() { 432 return last_res; 433 } 434 } 435 last_res 436 } 437 allocate_at(&mut self, range: AddressRange, alloc: Alloc, tag: String) -> Result<()>438 pub fn allocate_at(&mut self, range: AddressRange, alloc: Alloc, tag: String) -> Result<()> { 439 let mut last_res = Err(Error::OutOfSpace); 440 for allocator in self.allocators.iter_mut() { 441 last_res = allocator.allocate_at(range, alloc, tag.clone()); 442 if last_res.is_ok() { 443 return last_res; 444 } 445 } 446 last_res 447 } 448 release(&mut self, alloc: Alloc) -> Result<AddressRange>449 pub fn release(&mut self, alloc: Alloc) -> Result<AddressRange> { 450 let mut last_res = Err(Error::OutOfSpace); 451 for allocator in self.allocators.iter_mut() { 452 last_res = allocator.release(alloc); 453 if last_res.is_ok() { 454 return last_res; 455 } 456 } 457 last_res 458 } 459 get(&self, alloc: &Alloc) -> Option<&(AddressRange, String)>460 pub fn get(&self, alloc: &Alloc) -> Option<&(AddressRange, String)> { 461 for allocator in self.allocators.iter() { 462 let opt = allocator.get(alloc); 463 if opt.is_some() { 464 return opt; 465 } 466 } 467 None 468 } 469 address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64>470 pub fn address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64> { 471 let mut last_res = Err(Error::OutOfSpace); 472 for allocator in self.allocators.iter() { 473 last_res = allocator.address_from_pci_offset(alloc, offset, size); 474 if last_res.is_ok() { 475 return last_res; 476 } 477 } 478 last_res 479 } 480 } 481 482 #[cfg(test)] 483 mod tests { 484 use super::*; 485 486 #[test] example()487 fn example() { 488 // Anon is used for brevity. Don't manually instantiate Anon allocs! 489 let mut pool = AddressAllocator::new( 490 AddressRange { 491 start: 0x1000, 492 end: 0xFFFF, 493 }, 494 Some(0x100), 495 None, 496 ) 497 .unwrap(); 498 assert_eq!( 499 pool.allocate(0x110, Alloc::Anon(0), "caps".to_string()), 500 Ok(0x1000) 501 ); 502 assert_eq!( 503 pool.allocate(0x100, Alloc::Anon(1), "cache".to_string()), 504 Ok(0x1200) 505 ); 506 assert_eq!( 507 pool.allocate(0x100, Alloc::Anon(2), "etc".to_string()), 508 Ok(0x1300) 509 ); 510 assert_eq!( 511 pool.get(&Alloc::Anon(1)), 512 Some(&( 513 AddressRange { 514 start: 0x1200, 515 end: 0x12FF 516 }, 517 "cache".to_string() 518 )) 519 ); 520 } 521 522 #[test] empty_allocator()523 fn empty_allocator() { 524 let mut pool = AddressAllocator::new_from_list(Vec::new(), None, None).unwrap(); 525 assert_eq!(pool.pools(), &[]); 526 assert_eq!( 527 pool.allocate(1, Alloc::Anon(0), "test".to_string()), 528 Err(Error::OutOfSpace) 529 ); 530 } 531 532 #[test] new_fails_alignment_zero()533 fn new_fails_alignment_zero() { 534 assert!(AddressAllocator::new( 535 AddressRange { 536 start: 0x1000, 537 end: 0xFFFF 538 }, 539 Some(0), 540 None 541 ) 542 .is_err()); 543 } 544 545 #[test] new_fails_alignment_non_power_of_two()546 fn new_fails_alignment_non_power_of_two() { 547 assert!(AddressAllocator::new( 548 AddressRange { 549 start: 0x1000, 550 end: 0xFFFF 551 }, 552 Some(200), 553 None 554 ) 555 .is_err()); 556 } 557 558 #[test] allocate_fails_exising_alloc()559 fn allocate_fails_exising_alloc() { 560 let mut pool = AddressAllocator::new( 561 AddressRange { 562 start: 0x1000, 563 end: 0x1FFF, 564 }, 565 Some(0x100), 566 None, 567 ) 568 .unwrap(); 569 assert_eq!( 570 pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), 571 Ok(0x1000) 572 ); 573 assert_eq!( 574 pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), 575 Err(Error::ExistingAlloc(Alloc::Anon(0))) 576 ); 577 } 578 579 #[test] allocate_fails_not_enough_space()580 fn allocate_fails_not_enough_space() { 581 let mut pool = AddressAllocator::new( 582 AddressRange { 583 start: 0x1000, 584 end: 0x1FFF, 585 }, 586 Some(0x100), 587 None, 588 ) 589 .unwrap(); 590 assert_eq!( 591 pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), 592 Ok(0x1000) 593 ); 594 assert_eq!( 595 pool.allocate(0x900, Alloc::Anon(1), String::from("bar1")), 596 Err(Error::OutOfSpace) 597 ); 598 assert_eq!( 599 pool.allocate(0x800, Alloc::Anon(2), String::from("bar2")), 600 Ok(0x1800) 601 ); 602 } 603 604 #[test] allocate_with_special_alignment()605 fn allocate_with_special_alignment() { 606 let mut pool = AddressAllocator::new( 607 AddressRange { 608 start: 0x1000, 609 end: 0x1FFF, 610 }, 611 Some(0x100), 612 None, 613 ) 614 .unwrap(); 615 assert_eq!( 616 pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")), 617 Ok(0x1000) 618 ); 619 assert_eq!( 620 pool.allocate_at( 621 AddressRange { 622 start: 0x1200, 623 end: 0x13ff, 624 }, 625 Alloc::Anon(1), 626 String::from("bar1") 627 ), 628 Ok(()) 629 ); 630 assert_eq!( 631 pool.allocate_with_align(0x800, Alloc::Anon(2), String::from("bar2"), 0x800), 632 Ok(0x1800) 633 ); 634 } 635 636 #[test] allocate_and_split_allocate_at()637 fn allocate_and_split_allocate_at() { 638 let mut pool = AddressAllocator::new( 639 AddressRange { 640 start: 0x1000, 641 end: 0x1fff, 642 }, 643 Some(1), 644 None, 645 ) 646 .unwrap(); 647 // 0x1200..0x1a00 648 assert_eq!( 649 pool.allocate_at( 650 AddressRange { 651 start: 0x1200, 652 end: 0x19ff, 653 }, 654 Alloc::Anon(0), 655 String::from("bar0") 656 ), 657 Ok(()) 658 ); 659 assert_eq!( 660 pool.allocate(0x800, Alloc::Anon(1), String::from("bar1")), 661 Err(Error::OutOfSpace) 662 ); 663 // 0x600..0x2000 664 assert_eq!( 665 pool.allocate(0x600, Alloc::Anon(2), String::from("bar2")), 666 Ok(0x1a00) 667 ); 668 // 0x1000..0x1200 669 assert_eq!( 670 pool.allocate(0x200, Alloc::Anon(3), String::from("bar3")), 671 Ok(0x1000) 672 ); 673 // 0x1b00..0x1c00 (overlaps with 0x600..0x2000) 674 assert_eq!( 675 pool.allocate_at( 676 AddressRange { 677 start: 0x1b00, 678 end: 0x1bff, 679 }, 680 Alloc::Anon(4), 681 String::from("bar4") 682 ), 683 Err(Error::ExistingAlloc(Alloc::Anon(2))) 684 ); 685 // 0x1fff..0x2000 (overlaps with 0x600..0x2000) 686 assert_eq!( 687 pool.allocate_at( 688 AddressRange { 689 start: 0x1fff, 690 end: 0x1fff, 691 }, 692 Alloc::Anon(5), 693 String::from("bar5") 694 ), 695 Err(Error::ExistingAlloc(Alloc::Anon(2))) 696 ); 697 // 0x1200..0x1201 (overlaps with 0x1200..0x1a00) 698 assert_eq!( 699 pool.allocate_at( 700 AddressRange { 701 start: 0x1200, 702 end: 0x1200, 703 }, 704 Alloc::Anon(6), 705 String::from("bar6") 706 ), 707 Err(Error::ExistingAlloc(Alloc::Anon(0))) 708 ); 709 // 0x11ff..0x1200 (overlaps with 0x1000..0x1200) 710 assert_eq!( 711 pool.allocate_at( 712 AddressRange { 713 start: 0x11ff, 714 end: 0x11ff, 715 }, 716 Alloc::Anon(7), 717 String::from("bar7") 718 ), 719 Err(Error::ExistingAlloc(Alloc::Anon(3))) 720 ); 721 // 0x1100..0x1300 (overlaps with 0x1000..0x1200 and 0x1200..0x1a00) 722 match pool.allocate_at( 723 AddressRange { 724 start: 0x1100, 725 end: 0x12ff, 726 }, 727 Alloc::Anon(8), 728 String::from("bar8"), 729 ) { 730 Err(Error::ExistingAlloc(Alloc::Anon(0) | Alloc::Anon(3))) => {} 731 x => panic!("unexpected result {:?}", x), 732 } 733 } 734 735 #[test] allocate_alignment()736 fn allocate_alignment() { 737 let mut pool = AddressAllocator::new( 738 AddressRange { 739 start: 0x1000, 740 end: 0xFFFF, 741 }, 742 Some(0x100), 743 None, 744 ) 745 .unwrap(); 746 assert_eq!( 747 pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")), 748 Ok(0x1000) 749 ); 750 assert_eq!( 751 pool.allocate(0x100, Alloc::Anon(1), String::from("bar1")), 752 Ok(0x1200) 753 ); 754 } 755 756 #[test] allocate_retrieve_alloc()757 fn allocate_retrieve_alloc() { 758 let mut pool = AddressAllocator::new( 759 AddressRange { 760 start: 0x1000, 761 end: 0xFFFF, 762 }, 763 Some(0x100), 764 None, 765 ) 766 .unwrap(); 767 assert_eq!( 768 pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")), 769 Ok(0x1000) 770 ); 771 assert_eq!( 772 pool.get(&Alloc::Anon(0)), 773 Some(&( 774 AddressRange { 775 start: 0x1000, 776 end: 0x110f, 777 }, 778 String::from("bar0") 779 )) 780 ); 781 } 782 783 #[test] allocate_with_alignment_allocator_alignment()784 fn allocate_with_alignment_allocator_alignment() { 785 let mut pool = AddressAllocator::new( 786 AddressRange { 787 start: 0x1000, 788 end: 0xFFFF, 789 }, 790 Some(0x100), 791 None, 792 ) 793 .unwrap(); 794 assert_eq!( 795 pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x1), 796 Ok(0x1000) 797 ); 798 assert_eq!( 799 pool.allocate_with_align(0x100, Alloc::Anon(1), String::from("bar1"), 0x1), 800 Ok(0x1200) 801 ); 802 } 803 804 #[test] allocate_with_alignment_custom_alignment()805 fn allocate_with_alignment_custom_alignment() { 806 let mut pool = AddressAllocator::new( 807 AddressRange { 808 start: 0x1000, 809 end: 0xFFFF, 810 }, 811 Some(0x4), 812 None, 813 ) 814 .unwrap(); 815 assert_eq!( 816 pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100), 817 Ok(0x1000) 818 ); 819 assert_eq!( 820 pool.allocate_with_align(0x100, Alloc::Anon(1), String::from("bar1"), 0x100), 821 Ok(0x1200) 822 ); 823 } 824 825 #[test] allocate_with_alignment_no_allocator_alignment()826 fn allocate_with_alignment_no_allocator_alignment() { 827 let mut pool = AddressAllocator::new( 828 AddressRange { 829 start: 0x1000, 830 end: 0xFFFF, 831 }, 832 None, 833 None, 834 ) 835 .unwrap(); 836 assert_eq!( 837 pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100), 838 Ok(0x1000) 839 ); 840 assert_eq!( 841 pool.allocate_with_align(0x100, Alloc::Anon(1), String::from("bar1"), 0x100), 842 Ok(0x1200) 843 ); 844 } 845 846 #[test] allocate_with_alignment_alignment_non_power_of_two()847 fn allocate_with_alignment_alignment_non_power_of_two() { 848 let mut pool = AddressAllocator::new( 849 AddressRange { 850 start: 0x1000, 851 end: 0xFFFF, 852 }, 853 None, 854 None, 855 ) 856 .unwrap(); 857 assert!(pool 858 .allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 200) 859 .is_err()); 860 } 861 862 #[test] allocate_with_release()863 fn allocate_with_release() { 864 let mut pool = AddressAllocator::new( 865 AddressRange { 866 start: 0x1000, 867 end: 0x1FFF, 868 }, 869 None, 870 None, 871 ) 872 .unwrap(); 873 assert_eq!( 874 pool.allocate_with_align(0x100, Alloc::Anon(0), String::from("bar0"), 0x100), 875 Ok(0x1000) 876 ); 877 assert!(pool.release(Alloc::Anon(0)).is_ok()); 878 assert_eq!( 879 pool.allocate_with_align(0x1000, Alloc::Anon(0), String::from("bar0"), 0x100), 880 Ok(0x1000) 881 ); 882 } 883 884 #[test] coalescing_and_overlap()885 fn coalescing_and_overlap() { 886 let mut pool = AddressAllocator::new( 887 AddressRange { 888 start: 0x1000, 889 end: 0x1FFF, 890 }, 891 None, 892 None, 893 ) 894 .unwrap(); 895 assert!(pool 896 .insert_at(AddressRange { 897 start: 0x3000, 898 end: 0x3fff, 899 }) 900 .is_ok()); 901 assert!(pool 902 .insert_at(AddressRange { 903 start: 0x1fff, 904 end: 0x201e, 905 }) 906 .is_err()); 907 assert!(pool 908 .insert_at(AddressRange { 909 start: 0x2ff1, 910 end: 0x3000, 911 }) 912 .is_err()); 913 assert!(pool 914 .insert_at(AddressRange { 915 start: 0x1800, 916 end: 0x27ff, 917 }) 918 .is_err()); 919 assert!(pool 920 .insert_at(AddressRange { 921 start: 0x2000, 922 end: 0x2fff, 923 }) 924 .is_ok()); 925 assert_eq!( 926 pool.allocate(0x3000, Alloc::Anon(0), String::from("bar0")), 927 Ok(0x1000) 928 ); 929 } 930 931 #[test] coalescing_single_addresses()932 fn coalescing_single_addresses() { 933 let mut pool = AddressAllocator::new( 934 AddressRange { 935 start: 0x1000, 936 end: 0x1FFF, 937 }, 938 None, 939 None, 940 ) 941 .unwrap(); 942 assert!(pool 943 .insert_at(AddressRange { 944 start: 0x2001, 945 end: 0x2001, 946 }) 947 .is_ok()); 948 assert!(pool 949 .insert_at(AddressRange { 950 start: 0x2003, 951 end: 0x2003, 952 }) 953 .is_ok()); 954 assert!(pool 955 .insert_at(AddressRange { 956 start: 0x2000, 957 end: 0x2000, 958 }) 959 .is_ok()); 960 assert!(pool 961 .insert_at(AddressRange { 962 start: 0x2002, 963 end: 0x2002, 964 }) 965 .is_ok()); 966 assert_eq!( 967 pool.allocate(0x1004, Alloc::Anon(0), String::from("bar0")), 968 Ok(0x1000) 969 ); 970 } 971 972 #[test] coalescing_u64_limits()973 fn coalescing_u64_limits() { 974 let mut pool = AddressAllocator::new( 975 AddressRange { 976 start: 0, 977 end: u64::MAX - 1, 978 }, 979 None, 980 None, 981 ) 982 .unwrap(); 983 assert!(pool 984 .insert_at(AddressRange { 985 start: u64::MAX, 986 end: u64::MAX, 987 }) 988 .is_ok()); 989 assert!(pool 990 .insert_at(AddressRange { 991 start: u64::MAX, 992 end: u64::MAX, 993 }) 994 .is_err()); 995 assert_eq!( 996 pool.allocate(u64::MAX, Alloc::Anon(0), String::from("bar0")), 997 Ok(0) 998 ); 999 1000 let mut pool = AddressAllocator::new( 1001 AddressRange { 1002 start: 1, 1003 end: u64::MAX, 1004 }, 1005 None, 1006 None, 1007 ) 1008 .unwrap(); 1009 assert!(pool.insert_at(AddressRange { start: 0, end: 0 }).is_ok()); 1010 assert!(pool.insert_at(AddressRange { start: 0, end: 0 }).is_err()); 1011 assert_eq!( 1012 pool.allocate(u64::MAX, Alloc::Anon(0), String::from("bar0")), 1013 Ok(0) 1014 ); 1015 } 1016 1017 #[test] allocate_and_verify_pci_offset()1018 fn allocate_and_verify_pci_offset() { 1019 let mut pool = AddressAllocator::new( 1020 AddressRange { 1021 start: 0x1000, 1022 end: 0xFFFF, 1023 }, 1024 None, 1025 None, 1026 ) 1027 .unwrap(); 1028 let pci_bar0 = Alloc::PciBar { 1029 bus: 1, 1030 dev: 2, 1031 func: 0, 1032 bar: 0, 1033 }; 1034 let pci_bar1 = Alloc::PciBar { 1035 bus: 1, 1036 dev: 2, 1037 func: 0, 1038 bar: 1, 1039 }; 1040 let pci_bar2 = Alloc::PciBar { 1041 bus: 1, 1042 dev: 2, 1043 func: 0, 1044 bar: 2, 1045 }; 1046 let anon = Alloc::Anon(1); 1047 1048 assert_eq!( 1049 pool.allocate(0x800, pci_bar0, String::from("bar0")), 1050 Ok(0x1000) 1051 ); 1052 assert_eq!( 1053 pool.allocate(0x800, pci_bar1, String::from("bar1")), 1054 Ok(0x1800) 1055 ); 1056 assert_eq!(pool.allocate(0x800, anon, String::from("anon")), Ok(0x2000)); 1057 1058 assert_eq!( 1059 pool.address_from_pci_offset(pci_bar0, 0x600, 0x100), 1060 Ok(0x1600) 1061 ); 1062 assert_eq!( 1063 pool.address_from_pci_offset(pci_bar1, 0x600, 0x100), 1064 Ok(0x1E00) 1065 ); 1066 assert_eq!( 1067 pool.address_from_pci_offset(pci_bar0, 0x7FE, 0x001), 1068 Ok(0x17FE) 1069 ); 1070 assert_eq!( 1071 pool.address_from_pci_offset(pci_bar0, 0x7FF, 0x001), 1072 Ok(0x17FF) 1073 ); 1074 assert_eq!( 1075 pool.address_from_pci_offset(pci_bar0, 0x800, 0x001), 1076 Err(Error::OutOfBounds) 1077 ); 1078 1079 assert_eq!( 1080 pool.address_from_pci_offset(pci_bar2, 0x7FF, 0x001), 1081 Err(Error::InvalidAlloc(pci_bar2)) 1082 ); 1083 1084 assert_eq!( 1085 pool.address_from_pci_offset(anon, 0x600, 0x100), 1086 Err(Error::InvalidAlloc(anon)) 1087 ); 1088 } 1089 1090 #[test] get_max_address_of_ranges()1091 fn get_max_address_of_ranges() { 1092 let ranges = vec![ 1093 AddressRange { 1094 start: 0x1000, 1095 end: 0xFFFF, 1096 }, 1097 AddressRange { 1098 start: 0x20000, 1099 end: 0xFFFFF, 1100 }, 1101 ]; 1102 let pool = AddressAllocator::new_from_list(ranges, None, None).unwrap(); 1103 1104 assert_eq!(pool.get_max_addr(), 0xFFFFF); 1105 } 1106 } 1107