1 // Copyright 2018 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::collections::btree_map; 6 use std::collections::BTreeMap; 7 8 use base::pagesize; 9 10 use crate::address_allocator::AddressAllocator; 11 use crate::address_allocator::AddressAllocatorSet; 12 use crate::AddressRange; 13 use crate::Alloc; 14 use crate::Error; 15 use crate::Result; 16 17 /// Manages allocating system resources such as address space and interrupt numbers. 18 19 /// MMIO address Type 20 /// Low: address allocated from low_address_space 21 /// High: address allocated from high_address_space 22 #[derive(Copy, Clone, PartialEq, Eq)] 23 pub enum MmioType { 24 Low, 25 High, 26 } 27 28 /// Memory allocation options. 29 #[derive(Copy, Clone, Debug)] 30 pub struct AllocOptions { 31 prefetchable: bool, 32 max_address: u64, 33 alignment: Option<u64>, 34 top_down: bool, 35 } 36 37 impl Default for AllocOptions { default() -> Self38 fn default() -> Self { 39 AllocOptions::new() 40 } 41 } 42 43 impl AllocOptions { new() -> Self44 pub fn new() -> Self { 45 AllocOptions { 46 prefetchable: false, 47 max_address: u64::MAX, 48 alignment: None, 49 top_down: false, 50 } 51 } 52 53 /// If `true`, memory may be allocated in a prefetchable/cacheable region. 54 /// If `false`, memory must be allocated within a non-prefetechable region, appropriate for 55 /// device registers. 56 /// Default: `false` prefetchable(&mut self, prefetchable: bool) -> &mut Self57 pub fn prefetchable(&mut self, prefetchable: bool) -> &mut Self { 58 self.prefetchable = prefetchable; 59 self 60 } 61 62 /// Largest valid address for the end of the allocated region. 63 /// For example, `u32::MAX` may be used to allocate a region that is addressable with a 32-bit 64 /// pointer. 65 /// Default: `u64::MAX` max_address(&mut self, max_address: u64) -> &mut Self66 pub fn max_address(&mut self, max_address: u64) -> &mut Self { 67 self.max_address = max_address; 68 self 69 } 70 71 /// Minimum alignment of the allocated address. 72 /// Default: `None` (allocation preference of the address allocator pool will be used) align(&mut self, alignment: u64) -> &mut Self73 pub fn align(&mut self, alignment: u64) -> &mut Self { 74 self.alignment = Some(alignment); 75 self 76 } 77 78 /// If `true`, prefer allocating from the upper end of the region rather than the low end. 79 /// Default: `false` top_down(&mut self, top_down: bool) -> &mut Self80 pub fn top_down(&mut self, top_down: bool) -> &mut Self { 81 self.top_down = top_down; 82 self 83 } 84 } 85 86 pub struct SystemAllocatorConfig { 87 /// IO ports. Only for x86_64. 88 pub io: Option<AddressRange>, 89 /// Low (<=4GB) MMIO region. 90 /// 91 /// Parts of this region may be reserved or otherwise excluded from the 92 /// created SystemAllocator's MmioType::Low allocator. However, no new 93 /// regions will be added. 94 pub low_mmio: AddressRange, 95 /// High (>4GB) MMIO region. 96 /// 97 /// Parts of this region may be reserved or otherwise excluded from the 98 /// created SystemAllocator's MmioType::High allocator. However, no new 99 /// regions will be added. 100 pub high_mmio: AddressRange, 101 /// Platform MMIO space. Only for ARM. 102 pub platform_mmio: Option<AddressRange>, 103 /// The first IRQ number to give out. 104 pub first_irq: u32, 105 } 106 107 #[derive(Debug)] 108 pub struct SystemAllocator { 109 io_address_space: Option<AddressAllocator>, 110 111 // Indexed by MmioType::Low and MmioType::High. 112 mmio_address_spaces: [AddressAllocator; 2], 113 mmio_platform_address_spaces: Option<AddressAllocator>, 114 115 reserved_region: Option<AddressRange>, 116 117 // Each bus number has a AddressAllocator 118 pci_allocator: BTreeMap<u8, AddressAllocator>, 119 irq_allocator: AddressAllocator, 120 gpe_allocator: AddressAllocator, 121 next_anon_id: usize, 122 } 123 124 impl SystemAllocator { 125 /// Creates a new `SystemAllocator` for managing addresses and irq numbers. 126 /// Will return an error if `base` + `size` overflows u64 (or allowed 127 /// maximum for the specific type), or if alignment isn't a power of two. 128 /// 129 /// If `reserve_region_size` is not None, then a region is reserved from 130 /// the start of `config.high_mmio` before the mmio allocator is created. 131 /// 132 /// If `mmio_address_ranges` is not empty, then `config.low_mmio` and 133 /// `config.high_mmio` are intersected with the ranges specified. new( config: SystemAllocatorConfig, reserve_region_size: Option<u64>, mmio_address_ranges: &[AddressRange], ) -> Result<Self>134 pub fn new( 135 config: SystemAllocatorConfig, 136 reserve_region_size: Option<u64>, 137 mmio_address_ranges: &[AddressRange], 138 ) -> Result<Self> { 139 let page_size = pagesize() as u64; 140 141 let (high_mmio, reserved_region) = match reserve_region_size { 142 Some(reserved_len) => { 143 let high_mmio_len = config.high_mmio.len().ok_or(Error::OutOfBounds)?; 144 if reserved_len > high_mmio_len { 145 return Err(Error::OutOfSpace); 146 } 147 let reserved_start = config.high_mmio.start; 148 let reserved_end = reserved_start + reserved_len - 1; 149 let high_mmio_start = reserved_end + 1; 150 let high_mmio_end = config.high_mmio.end; 151 ( 152 AddressRange { 153 start: high_mmio_start, 154 end: high_mmio_end, 155 }, 156 Some(AddressRange { 157 start: reserved_start, 158 end: reserved_end, 159 }), 160 ) 161 } 162 None => (config.high_mmio, None), 163 }; 164 165 let intersect_mmio_range = |src_range: AddressRange| -> Result<Vec<AddressRange>> { 166 Ok(if mmio_address_ranges.is_empty() { 167 vec![src_range] 168 } else { 169 mmio_address_ranges 170 .iter() 171 .map(|r| r.intersect(src_range)) 172 .collect() 173 }) 174 }; 175 176 Ok(SystemAllocator { 177 io_address_space: if let Some(io) = config.io { 178 // TODO make sure we don't overlap with existing well known 179 // ports such as 0xcf8 (serial ports). 180 if io.end > 0xffff { 181 return Err(Error::IOPortOutOfRange(io)); 182 } 183 Some(AddressAllocator::new(io, Some(0x400), None)?) 184 } else { 185 None 186 }, 187 mmio_address_spaces: [ 188 // MmioType::Low 189 AddressAllocator::new_from_list( 190 intersect_mmio_range(config.low_mmio)?, 191 Some(page_size), 192 None, 193 )?, 194 // MmioType::High 195 AddressAllocator::new_from_list( 196 intersect_mmio_range(high_mmio)?, 197 Some(page_size), 198 None, 199 )?, 200 ], 201 202 pci_allocator: BTreeMap::new(), 203 204 mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio { 205 Some(AddressAllocator::new(platform, Some(page_size), None)?) 206 } else { 207 None 208 }, 209 210 reserved_region, 211 212 irq_allocator: AddressAllocator::new( 213 AddressRange { 214 start: config.first_irq as u64, 215 end: 1023, 216 }, 217 Some(1), 218 None, 219 )?, 220 221 // GPE range depends on ACPIPM_RESOURCE_GPE0_BLK_LEN, which is used to determine 222 // ACPIPM_GPE_MAX. The AddressRange should be in sync with ACPIPM_GPE_MAX. The 223 // hard-coded value is used since devices lib (where ACPIPM_* consts are defined) 224 // depends on resource lib. Therefore using ACPI_* const from device lib will not be 225 // possible because it will require introducing cyclic dependencies. 226 gpe_allocator: AddressAllocator::new( 227 AddressRange { start: 0, end: 255 }, 228 Some(1), 229 None, 230 )?, 231 next_anon_id: 0, 232 }) 233 } 234 235 /// Reserves the next available system irq number. allocate_irq(&mut self) -> Option<u32>236 pub fn allocate_irq(&mut self) -> Option<u32> { 237 let id = self.get_anon_alloc(); 238 self.irq_allocator 239 .allocate(1, id, "irq-auto".to_string()) 240 .map(|v| v as u32) 241 .ok() 242 } 243 244 /// release irq to system irq number pool release_irq(&mut self, irq: u32)245 pub fn release_irq(&mut self, irq: u32) { 246 let _ = self.irq_allocator.release_containing(irq.into()); 247 } 248 249 /// Reserves the next available system irq number. reserve_irq(&mut self, irq: u32) -> bool250 pub fn reserve_irq(&mut self, irq: u32) -> bool { 251 let id = self.get_anon_alloc(); 252 self.irq_allocator 253 .allocate_at( 254 AddressRange { 255 start: irq.into(), 256 end: irq.into(), 257 }, 258 id, 259 "irq-fixed".to_string(), 260 ) 261 .is_ok() 262 } 263 264 /// Reserve the next available system GPE number allocate_gpe(&mut self) -> Option<u32>265 pub fn allocate_gpe(&mut self) -> Option<u32> { 266 let id = self.get_anon_alloc(); 267 self.gpe_allocator 268 .allocate(1, id, "gpe-auto".to_string()) 269 .map(|v| v as u32) 270 .ok() 271 } 272 get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator>273 fn get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator> { 274 match self.pci_allocator.entry(bus) { 275 btree_map::Entry::Occupied(entry) => Some(entry.into_mut()), 276 btree_map::Entry::Vacant(entry) => { 277 // pci root is 00:00.0, Bus 0 next device is 00:01.0 with mandatory function number 278 // zero. 279 let base = if bus == 0 { 8 } else { 0 }; 280 281 // Each bus supports up to 32 (devices) x 8 (functions). 282 // Prefer allocating at device granularity (preferred_align = 8), but fall back to 283 // allocating individual functions (min_align = 1) when we run out of devices. 284 let pci_alloc = AddressAllocator::new( 285 AddressRange { 286 start: base, 287 end: (32 * 8) - 1, 288 }, 289 Some(1), 290 Some(8), 291 ) 292 .ok()?; 293 294 Some(entry.insert(pci_alloc)) 295 } 296 } 297 } 298 299 // Check whether devices exist or not on the specified bus pci_bus_empty(&self, bus: u8) -> bool300 pub fn pci_bus_empty(&self, bus: u8) -> bool { 301 !self.pci_allocator.contains_key(&bus) 302 } 303 304 /// Allocate PCI slot location. allocate_pci(&mut self, bus: u8, tag: String) -> Option<Alloc>305 pub fn allocate_pci(&mut self, bus: u8, tag: String) -> Option<Alloc> { 306 let id = self.get_anon_alloc(); 307 let allocator = match self.get_pci_allocator_mut(bus) { 308 Some(v) => v, 309 None => return None, 310 }; 311 allocator 312 .allocate(1, id, tag) 313 .map(|v| Alloc::PciBar { 314 bus, 315 dev: (v >> 3) as u8, 316 func: (v & 7) as u8, 317 bar: 0, 318 }) 319 .ok() 320 } 321 322 /// Reserve PCI slot location. reserve_pci(&mut self, alloc: Alloc, tag: String) -> bool323 pub fn reserve_pci(&mut self, alloc: Alloc, tag: String) -> bool { 324 let id = self.get_anon_alloc(); 325 match alloc { 326 Alloc::PciBar { 327 bus, 328 dev, 329 func, 330 bar: _, 331 } => { 332 let allocator = match self.get_pci_allocator_mut(bus) { 333 Some(v) => v, 334 None => return false, 335 }; 336 let df = ((dev as u64) << 3) | (func as u64); 337 allocator 338 .allocate_at(AddressRange { start: df, end: df }, id, tag) 339 .is_ok() 340 } 341 _ => false, 342 } 343 } 344 345 /// release PCI slot location. release_pci(&mut self, bus: u8, dev: u8, func: u8) -> bool346 pub fn release_pci(&mut self, bus: u8, dev: u8, func: u8) -> bool { 347 let allocator = match self.get_pci_allocator_mut(bus) { 348 Some(v) => v, 349 None => return false, 350 }; 351 let df = ((dev as u64) << 3) | (func as u64); 352 allocator.release_containing(df).is_ok() 353 } 354 355 /// Allocate a memory-mapped I/O region with properties requested in `opts`. allocate_mmio( &mut self, size: u64, alloc: Alloc, tag: String, opts: &AllocOptions, ) -> Result<u64>356 pub fn allocate_mmio( 357 &mut self, 358 size: u64, 359 alloc: Alloc, 360 tag: String, 361 opts: &AllocOptions, 362 ) -> Result<u64> { 363 // For now, there is no way to ensure allocations fit in less than 32 bits. 364 // This can be removed once AddressAllocator accepts AllocOptions. 365 if opts.max_address < u32::MAX as u64 { 366 return Err(Error::OutOfSpace); 367 } 368 369 let mut mmio_type = MmioType::High; 370 if opts.max_address < u64::MAX || !opts.prefetchable { 371 mmio_type = MmioType::Low; 372 } 373 374 let res = self.allocate_mmio_internal(size, alloc, tag.clone(), opts, mmio_type); 375 // If a high allocation failed, retry in low. The reverse is not valid, since the address 376 // may be out of range and/or prefetchable memory may not be appropriate. 377 if mmio_type == MmioType::High && matches!(res, Err(Error::OutOfSpace)) { 378 self.allocate_mmio_internal(size, alloc, tag, opts, MmioType::Low) 379 } else { 380 res 381 } 382 } 383 allocate_mmio_internal( &mut self, size: u64, alloc: Alloc, tag: String, opts: &AllocOptions, mmio_type: MmioType, ) -> Result<u64>384 fn allocate_mmio_internal( 385 &mut self, 386 size: u64, 387 alloc: Alloc, 388 tag: String, 389 opts: &AllocOptions, 390 mmio_type: MmioType, 391 ) -> Result<u64> { 392 let allocator = &mut self.mmio_address_spaces[mmio_type as usize]; 393 match (opts.alignment, opts.top_down) { 394 (Some(align), true) => allocator.reverse_allocate_with_align(size, alloc, tag, align), 395 (Some(align), false) => allocator.allocate_with_align(size, alloc, tag, align), 396 (None, true) => allocator.reverse_allocate(size, alloc, tag), 397 (None, false) => allocator.allocate(size, alloc, tag), 398 } 399 } 400 401 /// Reserve specified range from pci mmio, get the overlap of specified 402 /// range with mmio pools, exclude the overlap from mmio allocator. 403 /// 404 /// If any part of the specified range has been allocated, return Error. reserve_mmio(&mut self, range: AddressRange) -> Result<()>405 pub fn reserve_mmio(&mut self, range: AddressRange) -> Result<()> { 406 let mut pools = Vec::new(); 407 for pool in self.mmio_pools() { 408 pools.push(*pool); 409 } 410 pools.sort_by(|a, b| a.start.cmp(&b.start)); 411 for pool in &pools { 412 if pool.start > range.end { 413 break; 414 } 415 416 let overlap = pool.intersect(range); 417 if !overlap.is_empty() { 418 let id = self.get_anon_alloc(); 419 self.mmio_allocator_any().allocate_at( 420 overlap, 421 id, 422 "pci mmio reserve".to_string(), 423 )?; 424 } 425 } 426 427 Ok(()) 428 } 429 430 /// Gets an allocator to be used for platform device MMIO allocation. mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator>431 pub fn mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator> { 432 self.mmio_platform_address_spaces.as_mut() 433 } 434 435 /// Gets an allocator to be used for IO memory. io_allocator(&mut self) -> Option<&mut AddressAllocator>436 pub fn io_allocator(&mut self) -> Option<&mut AddressAllocator> { 437 self.io_address_space.as_mut() 438 } 439 440 /// Gets an allocator to be used for MMIO allocation. 441 /// MmioType::Low: low mmio allocator 442 /// MmioType::High: high mmio allocator mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator443 pub fn mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator { 444 &mut self.mmio_address_spaces[mmio_type as usize] 445 } 446 447 /// Gets a set of allocators to be used for MMIO allocation. 448 /// The set of allocators will try the low and high MMIO allocators, in that order. mmio_allocator_any(&mut self) -> AddressAllocatorSet449 pub fn mmio_allocator_any(&mut self) -> AddressAllocatorSet { 450 AddressAllocatorSet::new(&mut self.mmio_address_spaces) 451 } 452 453 /// Gets the pools of all mmio allocators. mmio_pools(&self) -> Vec<&AddressRange>454 pub fn mmio_pools(&self) -> Vec<&AddressRange> { 455 self.mmio_address_spaces 456 .iter() 457 .flat_map(|mmio_as| mmio_as.pools()) 458 .collect() 459 } 460 461 /// Gets the reserved address space region. reserved_region(&self) -> Option<AddressRange>462 pub fn reserved_region(&self) -> Option<AddressRange> { 463 self.reserved_region 464 } 465 466 /// Gets a unique anonymous allocation get_anon_alloc(&mut self) -> Alloc467 pub fn get_anon_alloc(&mut self) -> Alloc { 468 self.next_anon_id += 1; 469 Alloc::Anon(self.next_anon_id) 470 } 471 } 472 473 #[cfg(test)] 474 mod tests { 475 use super::*; 476 477 #[test] example()478 fn example() { 479 let mut a = SystemAllocator::new( 480 SystemAllocatorConfig { 481 io: Some(AddressRange { 482 start: 0x1000, 483 end: 0xffff, 484 }), 485 low_mmio: AddressRange { 486 start: 0x3000_0000, 487 end: 0x3000_ffff, 488 }, 489 high_mmio: AddressRange { 490 start: 0x1000_0000, 491 end: 0x1fffffff, 492 }, 493 platform_mmio: None, 494 first_irq: 5, 495 }, 496 None, 497 &[], 498 ) 499 .unwrap(); 500 501 assert_eq!(a.allocate_irq(), Some(5)); 502 assert_eq!(a.allocate_irq(), Some(6)); 503 assert_eq!(a.allocate_gpe(), Some(0)); 504 assert_eq!(a.allocate_gpe(), Some(1)); 505 assert_eq!( 506 a.mmio_allocator(MmioType::High).allocate( 507 0x100, 508 Alloc::PciBar { 509 bus: 0, 510 dev: 0, 511 func: 0, 512 bar: 0 513 }, 514 "bar0".to_string() 515 ), 516 Ok(0x10000000) 517 ); 518 assert_eq!( 519 a.mmio_allocator(MmioType::High).get(&Alloc::PciBar { 520 bus: 0, 521 dev: 0, 522 func: 0, 523 bar: 0 524 }), 525 Some(&( 526 AddressRange { 527 start: 0x10000000, 528 end: 0x100000ff 529 }, 530 "bar0".to_string() 531 )) 532 ); 533 534 let id = a.get_anon_alloc(); 535 assert_eq!( 536 a.mmio_allocator(MmioType::Low).allocate_at( 537 AddressRange { 538 start: 0x3000_5000, 539 end: 0x30009fff 540 }, 541 id, 542 "Test".to_string() 543 ), 544 Ok(()) 545 ); 546 assert_eq!( 547 a.mmio_allocator(MmioType::Low).release(id), 548 Ok(AddressRange { 549 start: 0x3000_5000, 550 end: 0x30009fff 551 }) 552 ); 553 assert_eq!( 554 a.reserve_mmio(AddressRange { 555 start: 0x3000_2000, 556 end: 0x30005fff 557 }), 558 Ok(()) 559 ); 560 assert_eq!( 561 a.mmio_allocator(MmioType::Low) 562 .allocate_at( 563 AddressRange { 564 start: 0x3000_5000, 565 end: 0x3000_9fff 566 }, 567 id, 568 "Test".to_string() 569 ) 570 .is_err(), 571 true 572 ); 573 } 574 } 575