1 // Copyright 2019 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::fmt; 6 use std::sync::atomic::AtomicUsize; 7 use std::sync::atomic::Ordering; 8 use std::sync::Arc; 9 #[cfg(target_arch = "x86_64")] 10 use std::time::Instant; 11 12 #[cfg(target_arch = "x86_64")] 13 use base::error; 14 use base::Event; 15 #[cfg(target_arch = "x86_64")] 16 use metrics::log_metric; 17 #[cfg(target_arch = "x86_64")] 18 use metrics::MetricEventType; 19 use serde::Deserialize; 20 use serde::Serialize; 21 use sync::Mutex; 22 23 use super::INTERRUPT_STATUS_CONFIG_CHANGED; 24 use super::INTERRUPT_STATUS_USED_RING; 25 use super::VIRTIO_MSI_NO_VECTOR; 26 #[cfg(target_arch = "x86_64")] 27 use crate::acpi::PmWakeupEvent; 28 use crate::irq_event::IrqEdgeEvent; 29 use crate::irq_event::IrqLevelEvent; 30 use crate::pci::MsixConfig; 31 32 struct TransportPci { 33 irq_evt_lvl: IrqLevelEvent, 34 msix_config: Option<Arc<Mutex<MsixConfig>>>, 35 config_msix_vector: u16, 36 } 37 38 enum Transport { 39 Pci { 40 pci: TransportPci, 41 }, 42 Mmio { 43 irq_evt_edge: IrqEdgeEvent, 44 }, 45 VhostUser { 46 call_evt: Event, 47 signal_config_changed_fn: Box<dyn Fn() + Send + Sync>, 48 }, 49 } 50 51 struct InterruptInner { 52 interrupt_status: AtomicUsize, 53 transport: Transport, 54 async_intr_status: bool, 55 pm_state: Arc<Mutex<PmState>>, 56 } 57 58 impl InterruptInner { 59 /// Add `interrupt_status_mask` to any existing interrupt status. 60 /// 61 /// Returns `true` if the interrupt should be triggered after this update. update_interrupt_status(&self, interrupt_status_mask: u32) -> bool62 fn update_interrupt_status(&self, interrupt_status_mask: u32) -> bool { 63 // Set bit in ISR and inject the interrupt if it was not already pending. 64 // Don't need to inject the interrupt if the guest hasn't processed it. 65 // In hypervisors where interrupt_status is updated asynchronously, inject the 66 // interrupt even if the previous interrupt appears to be already pending. 67 self.interrupt_status 68 .fetch_or(interrupt_status_mask as usize, Ordering::SeqCst) 69 == 0 70 || self.async_intr_status 71 } 72 } 73 74 #[derive(Clone)] 75 pub struct Interrupt { 76 inner: Arc<InterruptInner>, 77 } 78 79 impl fmt::Debug for Interrupt { fmt(&self, f: &mut fmt::Formatter) -> fmt::Result80 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 81 write!(f, "Interrupt") 82 } 83 } 84 85 #[derive(Serialize, Deserialize)] 86 pub struct InterruptSnapshot { 87 interrupt_status: usize, 88 } 89 90 impl Interrupt { 91 /// Writes to the irqfd to VMM to deliver virtual interrupt to the guest. 92 /// 93 /// If MSI-X is enabled in this device, MSI-X interrupt is preferred. 94 /// Write to the irqfd to VMM to deliver virtual interrupt to the guest signal(&self, vector: u16, interrupt_status_mask: u32)95 pub fn signal(&self, vector: u16, interrupt_status_mask: u32) { 96 if self 97 .inner 98 .pm_state 99 .lock() 100 .handle_interrupt(vector, interrupt_status_mask) 101 { 102 return; 103 } 104 105 match &self.inner.transport { 106 Transport::Pci { pci } => { 107 // Don't need to set ISR for MSI-X interrupts 108 if let Some(msix_config) = &pci.msix_config { 109 let mut msix_config = msix_config.lock(); 110 if msix_config.enabled() { 111 if vector != VIRTIO_MSI_NO_VECTOR { 112 msix_config.trigger(vector); 113 } 114 return; 115 } 116 } 117 118 if self.inner.update_interrupt_status(interrupt_status_mask) { 119 pci.irq_evt_lvl.trigger().unwrap(); 120 } 121 } 122 Transport::Mmio { irq_evt_edge } => { 123 if self.inner.update_interrupt_status(interrupt_status_mask) { 124 irq_evt_edge.trigger().unwrap(); 125 } 126 } 127 Transport::VhostUser { call_evt, .. } => { 128 // TODO(b/187487351): To avoid sending unnecessary events, we might want to support 129 // interrupt status. For this purpose, we need a mechanism to share interrupt status 130 // between the vmm and the device process. 131 call_evt.signal().unwrap(); 132 } 133 } 134 } 135 136 /// Notify the driver that buffers have been placed in the used queue. signal_used_queue(&self, vector: u16)137 pub fn signal_used_queue(&self, vector: u16) { 138 self.signal(vector, INTERRUPT_STATUS_USED_RING) 139 } 140 141 /// Notify the driver that the device configuration has changed. signal_config_changed(&self)142 pub fn signal_config_changed(&self) { 143 match &self.inner.as_ref().transport { 144 Transport::Pci { pci } => { 145 self.signal(pci.config_msix_vector, INTERRUPT_STATUS_CONFIG_CHANGED) 146 } 147 Transport::Mmio { .. } => { 148 self.signal(VIRTIO_MSI_NO_VECTOR, INTERRUPT_STATUS_CONFIG_CHANGED) 149 } 150 Transport::VhostUser { 151 signal_config_changed_fn, 152 .. 153 } => signal_config_changed_fn(), 154 } 155 } 156 157 /// Get the event to signal resampling is needed if it exists. get_resample_evt(&self) -> Option<&Event>158 pub fn get_resample_evt(&self) -> Option<&Event> { 159 match &self.inner.as_ref().transport { 160 Transport::Pci { pci } => Some(pci.irq_evt_lvl.get_resample()), 161 _ => None, 162 } 163 } 164 165 /// Reads the status and writes to the interrupt event. Doesn't read the resample event, it 166 /// assumes the resample has been requested. do_interrupt_resample(&self)167 pub fn do_interrupt_resample(&self) { 168 if self.inner.interrupt_status.load(Ordering::SeqCst) != 0 { 169 match &self.inner.as_ref().transport { 170 Transport::Pci { pci } => pci.irq_evt_lvl.trigger().unwrap(), 171 _ => panic!("do_interrupt_resample() not supported"), 172 } 173 } 174 } 175 } 176 177 impl Interrupt { new( irq_evt_lvl: IrqLevelEvent, msix_config: Option<Arc<Mutex<MsixConfig>>>, config_msix_vector: u16, #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>, ) -> Interrupt178 pub fn new( 179 irq_evt_lvl: IrqLevelEvent, 180 msix_config: Option<Arc<Mutex<MsixConfig>>>, 181 config_msix_vector: u16, 182 #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>, 183 ) -> Interrupt { 184 Interrupt { 185 inner: Arc::new(InterruptInner { 186 interrupt_status: AtomicUsize::new(0), 187 async_intr_status: false, 188 transport: Transport::Pci { 189 pci: TransportPci { 190 irq_evt_lvl, 191 msix_config, 192 config_msix_vector, 193 }, 194 }, 195 pm_state: PmState::new( 196 #[cfg(target_arch = "x86_64")] 197 wakeup_event, 198 ), 199 }), 200 } 201 } 202 203 /// Create a new `Interrupt`, restoring internal state to match `snapshot`. 204 /// 205 /// The other arguments are assumed to be snapshot'd and restore'd elsewhere. new_from_snapshot( irq_evt_lvl: IrqLevelEvent, msix_config: Option<Arc<Mutex<MsixConfig>>>, config_msix_vector: u16, snapshot: InterruptSnapshot, #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>, ) -> Interrupt206 pub fn new_from_snapshot( 207 irq_evt_lvl: IrqLevelEvent, 208 msix_config: Option<Arc<Mutex<MsixConfig>>>, 209 config_msix_vector: u16, 210 snapshot: InterruptSnapshot, 211 #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>, 212 ) -> Interrupt { 213 Interrupt { 214 inner: Arc::new(InterruptInner { 215 interrupt_status: AtomicUsize::new(snapshot.interrupt_status), 216 async_intr_status: false, 217 transport: Transport::Pci { 218 pci: TransportPci { 219 irq_evt_lvl, 220 msix_config, 221 config_msix_vector, 222 }, 223 }, 224 pm_state: PmState::new( 225 #[cfg(target_arch = "x86_64")] 226 wakeup_event, 227 ), 228 }), 229 } 230 } 231 new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt232 pub fn new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt { 233 Interrupt { 234 inner: Arc::new(InterruptInner { 235 interrupt_status: AtomicUsize::new(0), 236 transport: Transport::Mmio { irq_evt_edge }, 237 async_intr_status, 238 pm_state: PmState::new( 239 #[cfg(target_arch = "x86_64")] 240 None, 241 ), 242 }), 243 } 244 } 245 246 /// Create an `Interrupt` wrapping a vhost-user vring call event and function that sends a 247 /// VHOST_USER_BACKEND_CONFIG_CHANGE_MSG to the frontend. new_vhost_user( call_evt: Event, signal_config_changed_fn: Box<dyn Fn() + Send + Sync>, ) -> Interrupt248 pub fn new_vhost_user( 249 call_evt: Event, 250 signal_config_changed_fn: Box<dyn Fn() + Send + Sync>, 251 ) -> Interrupt { 252 Interrupt { 253 inner: Arc::new(InterruptInner { 254 interrupt_status: AtomicUsize::new(0), 255 transport: Transport::VhostUser { 256 call_evt, 257 signal_config_changed_fn, 258 }, 259 async_intr_status: false, 260 pm_state: PmState::new( 261 #[cfg(target_arch = "x86_64")] 262 None, 263 ), 264 }), 265 } 266 } 267 268 #[cfg(test)] new_for_test() -> Interrupt269 pub fn new_for_test() -> Interrupt { 270 Interrupt::new( 271 IrqLevelEvent::new().unwrap(), 272 None, 273 VIRTIO_MSI_NO_VECTOR, 274 #[cfg(target_arch = "x86_64")] 275 None, 276 ) 277 } 278 279 #[cfg(test)] new_for_test_with_msix() -> Interrupt280 pub fn new_for_test_with_msix() -> Interrupt { 281 let (_, unused_config_tube) = base::Tube::pair().unwrap(); 282 let msix_vectors = 2; 283 let msix_cfg = MsixConfig::new( 284 msix_vectors, 285 unused_config_tube, 286 0, 287 "test_device".to_owned(), 288 ); 289 290 Interrupt::new( 291 IrqLevelEvent::new().unwrap(), 292 Some(Arc::new(Mutex::new(msix_cfg))), 293 msix_vectors, 294 #[cfg(target_arch = "x86_64")] 295 None, 296 ) 297 } 298 299 /// Get a reference to the interrupt event. get_interrupt_evt(&self) -> &Event300 pub fn get_interrupt_evt(&self) -> &Event { 301 match &self.inner.as_ref().transport { 302 Transport::Pci { pci } => pci.irq_evt_lvl.get_trigger(), 303 Transport::Mmio { irq_evt_edge } => irq_evt_edge.get_trigger(), 304 Transport::VhostUser { call_evt, .. } => call_evt, 305 } 306 } 307 308 /// Handle interrupt resampling event, reading the value from the event and doing the resample. interrupt_resample(&self)309 pub fn interrupt_resample(&self) { 310 match &self.inner.as_ref().transport { 311 Transport::Pci { pci } => { 312 pci.irq_evt_lvl.clear_resample(); 313 self.do_interrupt_resample(); 314 } 315 _ => panic!("interrupt_resample() not supported"), 316 } 317 } 318 319 /// Get a reference to the msix configuration get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>>320 pub fn get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>> { 321 match &self.inner.as_ref().transport { 322 Transport::Pci { pci } => &pci.msix_config, 323 _ => &None, 324 } 325 } 326 327 /// Reads the current value of the interrupt status. read_interrupt_status(&self) -> u8328 pub fn read_interrupt_status(&self) -> u8 { 329 self.inner.interrupt_status.load(Ordering::SeqCst) as u8 330 } 331 332 /// Reads the current value of the interrupt status and resets it to 0. read_and_reset_interrupt_status(&self) -> u8333 pub fn read_and_reset_interrupt_status(&self) -> u8 { 334 self.inner.interrupt_status.swap(0, Ordering::SeqCst) as u8 335 } 336 337 /// Clear the bits set in `mask` in the interrupt status. clear_interrupt_status_bits(&self, mask: u8)338 pub fn clear_interrupt_status_bits(&self, mask: u8) { 339 self.inner 340 .interrupt_status 341 .fetch_and(!(mask as usize), Ordering::SeqCst); 342 } 343 344 /// Snapshot internal state. Can be restored with with `Interrupt::new_from_snapshot`. snapshot(&self) -> InterruptSnapshot345 pub fn snapshot(&self) -> InterruptSnapshot { 346 InterruptSnapshot { 347 interrupt_status: self.inner.interrupt_status.load(Ordering::SeqCst), 348 } 349 } 350 set_suspended(&self, suspended: bool)351 pub fn set_suspended(&self, suspended: bool) { 352 let retrigger_evts = self.inner.pm_state.lock().set_suspended(suspended); 353 for (vector, interrupt_status_mask) in retrigger_evts.into_iter() { 354 self.signal(vector, interrupt_status_mask); 355 } 356 } 357 358 #[cfg(target_arch = "x86_64")] set_wakeup_event_active(&self, active: bool)359 pub fn set_wakeup_event_active(&self, active: bool) { 360 self.inner.pm_state.lock().set_wakeup_event_active(active); 361 } 362 } 363 364 #[cfg(target_arch = "x86_64")] 365 struct WakeupState { 366 wakeup_event: PmWakeupEvent, 367 wakeup_enabled: bool, 368 armed_time: Instant, 369 metrics_event: MetricEventType, 370 wakeup_clear_evt: Option<Event>, 371 } 372 373 #[cfg(target_arch = "x86_64")] 374 impl WakeupState { new(wakeup_event: Option<(PmWakeupEvent, MetricEventType)>) -> Option<Self>375 fn new(wakeup_event: Option<(PmWakeupEvent, MetricEventType)>) -> Option<Self> { 376 wakeup_event.map(|(wakeup_event, metrics_event)| Self { 377 wakeup_event, 378 wakeup_enabled: false, 379 // Not actually armed, but simpler than wrapping with an Option. 380 armed_time: Instant::now(), 381 metrics_event, 382 wakeup_clear_evt: None, 383 }) 384 } 385 trigger_wakeup(&mut self)386 fn trigger_wakeup(&mut self) { 387 if self.wakeup_clear_evt.is_some() { 388 return; 389 } 390 391 let elapsed = self.armed_time.elapsed().as_millis(); 392 log_metric( 393 self.metrics_event.clone(), 394 elapsed.try_into().unwrap_or(i64::MAX), 395 ); 396 397 match self.wakeup_event.trigger_wakeup() { 398 Ok(clear_evt) => self.wakeup_clear_evt = clear_evt, 399 Err(err) => error!("Wakeup trigger failed {:?}", err), 400 } 401 } 402 } 403 404 // Power management state of the interrupt. 405 struct PmState { 406 // Whether or not the virtio device that owns this interrupt is suspended. A 407 // suspended virtio device MUST NOT send notifications (i.e. interrupts) to the 408 // driver. 409 suspended: bool, 410 // The queue of interrupts that the virtio device has generated while suspended. 411 // These are deferred and sent in order when the device is un-suspended. 412 pending_signals: Vec<(u16, u32)>, 413 #[cfg(target_arch = "x86_64")] 414 wakeup_state: Option<WakeupState>, 415 } 416 417 impl PmState { new( #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>, ) -> Arc<Mutex<Self>>418 fn new( 419 #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>, 420 ) -> Arc<Mutex<Self>> { 421 Arc::new(Mutex::new(Self { 422 suspended: false, 423 pending_signals: Vec::new(), 424 #[cfg(target_arch = "x86_64")] 425 wakeup_state: WakeupState::new(wakeup_event), 426 })) 427 } 428 handle_interrupt(&mut self, vector: u16, mask: u32) -> bool429 fn handle_interrupt(&mut self, vector: u16, mask: u32) -> bool { 430 if self.suspended { 431 self.pending_signals.push((vector, mask)); 432 #[cfg(target_arch = "x86_64")] 433 if let Some(wakeup_state) = self.wakeup_state.as_mut() { 434 if wakeup_state.wakeup_enabled { 435 wakeup_state.trigger_wakeup(); 436 } 437 } 438 } 439 self.suspended 440 } 441 set_suspended(&mut self, suspended: bool) -> Vec<(u16, u32)>442 fn set_suspended(&mut self, suspended: bool) -> Vec<(u16, u32)> { 443 self.suspended = suspended; 444 std::mem::take(&mut self.pending_signals) 445 } 446 447 #[cfg(target_arch = "x86_64")] set_wakeup_event_active(&mut self, active: bool)448 fn set_wakeup_event_active(&mut self, active: bool) { 449 let Some(wakeup_state) = self.wakeup_state.as_mut() else { 450 return; 451 }; 452 453 wakeup_state.wakeup_enabled = active; 454 if active { 455 wakeup_state.armed_time = Instant::now(); 456 if !self.pending_signals.is_empty() { 457 wakeup_state.trigger_wakeup(); 458 } 459 } else if let Some(clear_evt) = wakeup_state.wakeup_clear_evt.take() { 460 if let Err(e) = clear_evt.signal() { 461 error!("failed to signal clear event {}", e); 462 } 463 } 464 } 465 } 466