xref: /aosp_15_r20/external/crosvm/devices/src/acpi.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::collections::BTreeMap;
6 use std::str::FromStr;
7 use std::sync::Arc;
8 use std::time::Duration;
9 
10 use acpi_tables::aml;
11 use acpi_tables::aml::Aml;
12 use anyhow::bail;
13 use anyhow::Context;
14 use base::custom_serde::serialize_arc_mutex;
15 use base::error;
16 use base::warn;
17 use base::Error as SysError;
18 use base::Event;
19 use base::EventToken;
20 use base::EventWaitResult;
21 use base::SendTube;
22 use base::Tube;
23 use base::VmEventType;
24 use base::WaitContext;
25 use base::WorkerThread;
26 use serde::Deserialize;
27 use serde::Serialize;
28 use sync::Mutex;
29 use thiserror::Error;
30 use vm_control::GpeNotify;
31 use vm_control::PmResource;
32 use vm_control::PmeNotify;
33 use vm_control::VmRequest;
34 use vm_control::VmResponse;
35 
36 use crate::ac_adapter::AcAdapter;
37 use crate::pci::pm::PmConfig;
38 use crate::pci::CrosvmDeviceId;
39 use crate::BusAccessInfo;
40 use crate::BusDevice;
41 use crate::BusResumeDevice;
42 use crate::DeviceId;
43 use crate::IrqLevelEvent;
44 use crate::Suspendable;
45 
46 #[derive(Error, Debug)]
47 pub enum ACPIPMError {
48     /// Creating WaitContext failed.
49     #[error("failed to create wait context: {0}")]
50     CreateWaitContext(SysError),
51     /// Error while waiting for events.
52     #[error("failed to wait for events: {0}")]
53     WaitError(SysError),
54     #[error("Did not find group_id corresponding to acpi_mc_group")]
55     AcpiMcGroupError,
56     #[error("Failed to create and bind NETLINK_GENERIC socket for acpi_mc_group: {0}")]
57     AcpiEventSockError(base::Error),
58     #[error("GPE {0} is out of bound")]
59     GpeOutOfBound(u32),
60 }
61 
62 #[derive(Debug, Copy, Clone, Serialize, Deserialize)]
63 pub enum ACPIPMFixedEvent {
64     GlobalLock,
65     PowerButton,
66     SleepButton,
67     RTC,
68 }
69 
70 #[derive(Serialize)]
71 pub(crate) struct Pm1Resource {
72     pub(crate) status: u16,
73     enable: u16,
74     control: u16,
75     #[serde(skip_serializing)]
76     suspend_tube: Arc<Mutex<SendTube>>,
77     #[serde(skip_serializing)]
78     rtc_clear_evt: Option<Event>,
79 }
80 
81 #[derive(Deserialize)]
82 struct Pm1ResourceSerializable {
83     status: u16,
84     enable: u16,
85     control: u16,
86 }
87 
88 #[derive(Serialize)]
89 pub(crate) struct GpeResource {
90     pub(crate) status: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
91     enable: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
92     #[serde(skip_serializing)]
93     pub(crate) gpe_notify: BTreeMap<u32, Vec<Arc<Mutex<dyn GpeNotify>>>>,
94     // For each triggered GPE, a vector of events to check when resampling
95     // sci_evt. If any events are un-signaled, then sci_evt should be re-asserted.
96     #[serde(skip_serializing)]
97     pending_clear_evts: BTreeMap<u32, Vec<Event>>,
98     #[serde(skip_serializing)]
99     suspend_tube: Arc<Mutex<SendTube>>,
100 }
101 
102 #[derive(Deserialize)]
103 struct GpeResourceSerializable {
104     status: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
105     enable: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
106 }
107 
108 #[derive(Serialize, Deserialize, Clone)]
109 pub(crate) struct PciResource {
110     #[serde(skip_serializing, skip_deserializing)]
111     pub(crate) pme_notify: BTreeMap<u8, Vec<Arc<Mutex<dyn PmeNotify>>>>,
112 }
113 
114 /// ACPI PM resource for handling OS suspend/resume request
115 #[allow(dead_code)]
116 #[derive(Serialize)]
117 pub struct ACPIPMResource {
118     // This is SCI interrupt that will be raised in the VM.
119     #[serde(skip_serializing)]
120     sci_evt: IrqLevelEvent,
121     #[serde(skip_serializing)]
122     worker_thread: Option<WorkerThread<()>>,
123     #[serde(skip_serializing)]
124     suspend_tube: Arc<Mutex<SendTube>>,
125     #[serde(skip_serializing)]
126     exit_evt_wrtube: SendTube,
127     #[serde(serialize_with = "serialize_arc_mutex")]
128     pm1: Arc<Mutex<Pm1Resource>>,
129     #[serde(serialize_with = "serialize_arc_mutex")]
130     gpe0: Arc<Mutex<GpeResource>>,
131     #[serde(serialize_with = "serialize_arc_mutex")]
132     pci: Arc<Mutex<PciResource>>,
133     #[serde(skip_serializing)]
134     acdc: Option<Arc<Mutex<AcAdapter>>>,
135 }
136 
137 #[derive(Deserialize)]
138 struct ACPIPMResrourceSerializable {
139     pm1: Pm1ResourceSerializable,
140     gpe0: GpeResourceSerializable,
141 }
142 
143 impl ACPIPMResource {
144     /// Constructs ACPI Power Management Resouce.
145     #[allow(dead_code)]
new( sci_evt: IrqLevelEvent, suspend_tube: Arc<Mutex<SendTube>>, exit_evt_wrtube: SendTube, acdc: Option<Arc<Mutex<AcAdapter>>>, ) -> ACPIPMResource146     pub fn new(
147         sci_evt: IrqLevelEvent,
148         suspend_tube: Arc<Mutex<SendTube>>,
149         exit_evt_wrtube: SendTube,
150         acdc: Option<Arc<Mutex<AcAdapter>>>,
151     ) -> ACPIPMResource {
152         let pm1 = Pm1Resource {
153             status: 0,
154             enable: 0,
155             control: 0,
156             suspend_tube: suspend_tube.clone(),
157             rtc_clear_evt: None,
158         };
159         let gpe0 = GpeResource {
160             status: Default::default(),
161             enable: Default::default(),
162             gpe_notify: BTreeMap::new(),
163             pending_clear_evts: BTreeMap::new(),
164             suspend_tube: suspend_tube.clone(),
165         };
166         let pci = PciResource {
167             pme_notify: BTreeMap::new(),
168         };
169 
170         ACPIPMResource {
171             sci_evt,
172             worker_thread: None,
173             suspend_tube,
174             exit_evt_wrtube,
175             pm1: Arc::new(Mutex::new(pm1)),
176             gpe0: Arc::new(Mutex::new(gpe0)),
177             pci: Arc::new(Mutex::new(pci)),
178             acdc,
179         }
180     }
181 
start(&mut self)182     pub fn start(&mut self) {
183         let sci_evt = self.sci_evt.try_clone().expect("failed to clone event");
184         let pm1 = self.pm1.clone();
185         let gpe0 = self.gpe0.clone();
186         let acdc = self.acdc.clone();
187 
188         let acpi_event_ignored_gpe = Vec::new();
189 
190         self.worker_thread = Some(WorkerThread::start("ACPI PM worker", move |kill_evt| {
191             if let Err(e) = run_worker(sci_evt, kill_evt, pm1, gpe0, acpi_event_ignored_gpe, acdc) {
192                 error!("{}", e);
193             }
194         }));
195     }
196 }
197 
198 impl Suspendable for ACPIPMResource {
snapshot(&mut self) -> anyhow::Result<serde_json::Value>199     fn snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
200         if !self.gpe0.lock().pending_clear_evts.is_empty() {
201             bail!("ACPIPMResource is busy");
202         }
203         serde_json::to_value(&self)
204             .with_context(|| format!("error serializing {}", self.debug_label()))
205     }
206 
restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>207     fn restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
208         let acpi_snapshot: ACPIPMResrourceSerializable = serde_json::from_value(data)
209             .with_context(|| format!("error deserializing {}", self.debug_label()))?;
210         {
211             let mut pm1 = self.pm1.lock();
212             pm1.status = acpi_snapshot.pm1.status;
213             pm1.enable = acpi_snapshot.pm1.enable;
214             pm1.control = acpi_snapshot.pm1.control;
215         }
216         {
217             let mut gpe0 = self.gpe0.lock();
218             gpe0.status = acpi_snapshot.gpe0.status;
219             gpe0.enable = acpi_snapshot.gpe0.enable;
220         }
221         Ok(())
222     }
223 
sleep(&mut self) -> anyhow::Result<()>224     fn sleep(&mut self) -> anyhow::Result<()> {
225         if let Some(worker_thread) = self.worker_thread.take() {
226             worker_thread.stop();
227         }
228         Ok(())
229     }
230 
wake(&mut self) -> anyhow::Result<()>231     fn wake(&mut self) -> anyhow::Result<()> {
232         self.start();
233         Ok(())
234     }
235 }
236 
run_worker( sci_evt: IrqLevelEvent, kill_evt: Event, pm1: Arc<Mutex<Pm1Resource>>, gpe0: Arc<Mutex<GpeResource>>, acpi_event_ignored_gpe: Vec<u32>, arced_ac_adapter: Option<Arc<Mutex<AcAdapter>>>, ) -> Result<(), ACPIPMError>237 fn run_worker(
238     sci_evt: IrqLevelEvent,
239     kill_evt: Event,
240     pm1: Arc<Mutex<Pm1Resource>>,
241     gpe0: Arc<Mutex<GpeResource>>,
242     acpi_event_ignored_gpe: Vec<u32>,
243     arced_ac_adapter: Option<Arc<Mutex<AcAdapter>>>,
244 ) -> Result<(), ACPIPMError> {
245     let acpi_event_sock = crate::sys::get_acpi_event_sock()?;
246     #[derive(EventToken)]
247     enum Token {
248         AcpiEvent,
249         InterruptResample,
250         Kill,
251     }
252 
253     let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
254         (sci_evt.get_resample(), Token::InterruptResample),
255         (&kill_evt, Token::Kill),
256     ])
257     .map_err(ACPIPMError::CreateWaitContext)?;
258     if let Some(acpi_event_sock) = &acpi_event_sock {
259         wait_ctx
260             .add(acpi_event_sock, Token::AcpiEvent)
261             .map_err(ACPIPMError::CreateWaitContext)?;
262     }
263 
264     loop {
265         let events = wait_ctx.wait().map_err(ACPIPMError::WaitError)?;
266         for event in events.iter().filter(|e| e.is_readable) {
267             match event.token {
268                 Token::AcpiEvent => {
269                     crate::sys::acpi_event_run(
270                         &sci_evt,
271                         &acpi_event_sock,
272                         &gpe0,
273                         &acpi_event_ignored_gpe,
274                         &arced_ac_adapter,
275                     );
276                 }
277                 Token::InterruptResample => {
278                     sci_evt.clear_resample();
279 
280                     // Re-trigger SCI if PM1 or GPE status is still not cleared.
281                     pm1.lock().resample_clear_evts_and_trigger(&sci_evt);
282                     gpe0.lock().resample_clear_evts_and_trigger(&sci_evt);
283                 }
284                 Token::Kill => return Ok(()),
285             }
286         }
287     }
288 }
289 
290 impl Pm1Resource {
trigger_sci(&self, sci_evt: &IrqLevelEvent)291     fn trigger_sci(&self, sci_evt: &IrqLevelEvent) {
292         if self.status & self.enable & ACPIPMFixedEvent::bitmask_all() != 0 {
293             if let Err(e) = sci_evt.trigger() {
294                 error!("ACPIPM: failed to trigger sci event for pm1: {}", e);
295             }
296             if let Err(e) = self.suspend_tube.lock().send(&false) {
297                 error!("ACPIPM: failed to trigger wake event: {}", e);
298             }
299         }
300     }
301 
resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent)302     fn resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent) {
303         if let Some(clear_evt) = self.rtc_clear_evt.take() {
304             if clear_evt.wait_timeout(Duration::ZERO) == Ok(EventWaitResult::TimedOut) {
305                 self.rtc_clear_evt = Some(clear_evt);
306                 self.status |= ACPIPMFixedEvent::RTC.bitmask();
307             }
308         }
309         self.trigger_sci(sci_evt);
310     }
311 }
312 
313 impl GpeResource {
trigger_sci(&self, sci_evt: &IrqLevelEvent)314     pub fn trigger_sci(&self, sci_evt: &IrqLevelEvent) {
315         if (0..self.status.len()).any(|i| self.status[i] & self.enable[i] != 0) {
316             if let Err(e) = sci_evt.trigger() {
317                 error!("ACPIPM: failed to trigger sci event for gpe: {}", e);
318             }
319             if let Err(e) = self.suspend_tube.lock().send(&false) {
320                 error!("ACPIPM: failed to trigger wake event: {}", e);
321             }
322         }
323     }
324 
set_active(&mut self, gpe: u32) -> Result<(), ACPIPMError>325     pub fn set_active(&mut self, gpe: u32) -> Result<(), ACPIPMError> {
326         if let Some(status_byte) = self.status.get_mut(gpe as usize / 8) {
327             *status_byte |= 1 << (gpe % 8);
328         } else {
329             return Err(ACPIPMError::GpeOutOfBound(gpe));
330         }
331         Ok(())
332     }
333 
resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent)334     pub fn resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent) {
335         let mut retained = Vec::new();
336         self.pending_clear_evts.retain(|gpe, clear_evts| {
337             clear_evts.retain(|clear_evt| {
338                 clear_evt.wait_timeout(Duration::ZERO) == Ok(EventWaitResult::TimedOut)
339             });
340             if !clear_evts.is_empty() {
341                 retained.push(*gpe);
342             }
343             !clear_evts.is_empty()
344         });
345         for gpe in retained.into_iter() {
346             self.set_active(gpe).expect("bad gpe index");
347         }
348 
349         self.trigger_sci(sci_evt);
350     }
351 }
352 
353 /// the ACPI PM register length.
354 pub const ACPIPM_RESOURCE_EVENTBLK_LEN: u8 = 4;
355 pub const ACPIPM_RESOURCE_CONTROLBLK_LEN: u8 = 2;
356 pub const ACPIPM_RESOURCE_GPE0_BLK_LEN: u8 = 64;
357 pub const ACPIPM_RESOURCE_LEN: u8 = ACPIPM_RESOURCE_EVENTBLK_LEN + 4 + ACPIPM_RESOURCE_GPE0_BLK_LEN;
358 
359 // Should be in sync with gpe_allocator range
360 pub const ACPIPM_GPE_MAX: u16 = ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2 * 8 - 1;
361 
362 /// ACPI PM register value definitions
363 
364 /// Section 4.8.4.1.1 PM1 Status Registers, ACPI Spec Version 6.4
365 /// Register Location: <PM1a_EVT_BLK / PM1b_EVT_BLK> System I/O or Memory Space (defined in FADT)
366 /// Size: PM1_EVT_LEN / 2 (defined in FADT)
367 const PM1_STATUS: u16 = 0;
368 
369 /// Section 4.8.4.1.2 PM1Enable Registers, ACPI Spec Version 6.4
370 /// Register Location: <<PM1a_EVT_BLK / PM1b_EVT_BLK> + PM1_EVT_LEN / 2 System I/O or Memory Space
371 /// (defined in FADT)
372 /// Size: PM1_EVT_LEN / 2 (defined in FADT)
373 const PM1_ENABLE: u16 = PM1_STATUS + (ACPIPM_RESOURCE_EVENTBLK_LEN as u16 / 2);
374 
375 /// Section 4.8.4.2.1 PM1 Control Registers, ACPI Spec Version 6.4
376 /// Register Location: <PM1a_CNT_BLK / PM1b_CNT_BLK> System I/O or Memory Space (defined in FADT)
377 /// Size: PM1_CNT_LEN (defined in FADT)
378 const PM1_CONTROL: u16 = PM1_STATUS + ACPIPM_RESOURCE_EVENTBLK_LEN as u16;
379 
380 /// Section 4.8.5.1 General-Purpose Event Register Blocks, ACPI Spec Version 6.4
381 /// - Each register block contains two registers: an enable and a status register.
382 /// - Each register block is 32-bit aligned.
383 /// - Each register in the block is accessed as a byte.
384 ///
385 /// Section 4.8.5.1.1 General-Purpose Event 0 Register Block, ACPI Spec Version 6.4
386 /// This register block consists of two registers: The GPE0_STS and the GPE0_EN registers. Each
387 /// register’s length is defined to be half the length of the GPE0 register block, and is described
388 /// in the ACPI FADT’s GPE0_BLK and GPE0_BLK_LEN operators.
389 ///
390 /// Section 4.8.5.1.1.1 General-Purpose Event 0 Status Register, ACPI Spec Version 6.4
391 /// Register Location: <GPE0_STS> System I/O or System Memory Space (defined in FADT)
392 /// Size: GPE0_BLK_LEN/2 (defined in FADT)
393 const GPE0_STATUS: u16 = PM1_STATUS + ACPIPM_RESOURCE_EVENTBLK_LEN as u16 + 4; // ensure alignment
394 
395 /// Section 4.8.5.1.1.2 General-Purpose Event 0 Enable Register, ACPI Spec Version 6.4
396 /// Register Location: <GPE0_EN> System I/O or System Memory Space (defined in FADT)
397 /// Size: GPE0_BLK_LEN/2 (defined in FADT)
398 const GPE0_ENABLE: u16 = GPE0_STATUS + (ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2);
399 
400 /// Section 4.8.4.1.1, 4.8.4.1.2 Fixed event bits in both PM1 Status and PM1 Enable registers.
401 const BITSHIFT_PM1_GBL: u16 = 5;
402 const BITSHIFT_PM1_PWRBTN: u16 = 8;
403 const BITSHIFT_PM1_SLPBTN: u16 = 9;
404 const BITSHIFT_PM1_RTC: u16 = 10;
405 
406 const BITMASK_PM1CNT_SLEEP_ENABLE: u16 = 0x2000;
407 const BITMASK_PM1CNT_WAKE_STATUS: u16 = 0x8000;
408 
409 const BITMASK_PM1CNT_SLEEP_TYPE: u16 = 0x1C00;
410 const SLEEP_TYPE_S1: u16 = 1 << 10;
411 const SLEEP_TYPE_S5: u16 = 0 << 10;
412 
413 impl ACPIPMFixedEvent {
bitshift(self) -> u16414     fn bitshift(self) -> u16 {
415         match self {
416             ACPIPMFixedEvent::GlobalLock => BITSHIFT_PM1_GBL,
417             ACPIPMFixedEvent::PowerButton => BITSHIFT_PM1_PWRBTN,
418             ACPIPMFixedEvent::SleepButton => BITSHIFT_PM1_SLPBTN,
419             ACPIPMFixedEvent::RTC => BITSHIFT_PM1_RTC,
420         }
421     }
422 
bitmask(self) -> u16423     pub(crate) fn bitmask(self) -> u16 {
424         1 << self.bitshift()
425     }
426 
bitmask_all() -> u16427     fn bitmask_all() -> u16 {
428         (1 << BITSHIFT_PM1_GBL)
429             | (1 << BITSHIFT_PM1_PWRBTN)
430             | (1 << BITSHIFT_PM1_SLPBTN)
431             | (1 << BITSHIFT_PM1_RTC)
432     }
433 }
434 
435 impl FromStr for ACPIPMFixedEvent {
436     type Err = &'static str;
437 
from_str(s: &str) -> Result<Self, Self::Err>438     fn from_str(s: &str) -> Result<Self, Self::Err> {
439         match s {
440             "gbllock" => Ok(ACPIPMFixedEvent::GlobalLock),
441             "powerbtn" => Ok(ACPIPMFixedEvent::PowerButton),
442             "sleepbtn" => Ok(ACPIPMFixedEvent::SleepButton),
443             "rtc" => Ok(ACPIPMFixedEvent::RTC),
444             _ => Err("unknown event, must be: gbllock|powerbtn|sleepbtn|rtc"),
445         }
446     }
447 }
448 
449 impl PmResource for ACPIPMResource {
pwrbtn_evt(&mut self)450     fn pwrbtn_evt(&mut self) {
451         let mut pm1 = self.pm1.lock();
452 
453         pm1.status |= ACPIPMFixedEvent::PowerButton.bitmask();
454         pm1.trigger_sci(&self.sci_evt);
455     }
456 
slpbtn_evt(&mut self)457     fn slpbtn_evt(&mut self) {
458         let mut pm1 = self.pm1.lock();
459 
460         pm1.status |= ACPIPMFixedEvent::SleepButton.bitmask();
461         pm1.trigger_sci(&self.sci_evt);
462     }
463 
rtc_evt(&mut self, clear_evt: Event)464     fn rtc_evt(&mut self, clear_evt: Event) {
465         let mut pm1 = self.pm1.lock();
466 
467         pm1.rtc_clear_evt = Some(clear_evt);
468         pm1.status |= ACPIPMFixedEvent::RTC.bitmask();
469         pm1.trigger_sci(&self.sci_evt);
470     }
471 
gpe_evt(&mut self, gpe: u32, clear_evt: Option<Event>)472     fn gpe_evt(&mut self, gpe: u32, clear_evt: Option<Event>) {
473         let mut gpe0 = self.gpe0.lock();
474         match gpe0.set_active(gpe) {
475             Ok(_) => {
476                 if let Some(clear_evt) = clear_evt {
477                     gpe0.pending_clear_evts
478                         .entry(gpe)
479                         .or_default()
480                         .push(clear_evt);
481                 }
482                 gpe0.trigger_sci(&self.sci_evt)
483             }
484             Err(e) => error!("{}", e),
485         }
486     }
487 
pme_evt(&mut self, requester_id: u16)488     fn pme_evt(&mut self, requester_id: u16) {
489         let bus = ((requester_id >> 8) & 0xFF) as u8;
490         let mut pci = self.pci.lock();
491         if let Some(root_ports) = pci.pme_notify.get_mut(&bus) {
492             for root_port in root_ports {
493                 root_port.lock().notify(requester_id);
494             }
495         }
496     }
497 
register_gpe_notify_dev(&mut self, gpe: u32, notify_dev: Arc<Mutex<dyn GpeNotify>>)498     fn register_gpe_notify_dev(&mut self, gpe: u32, notify_dev: Arc<Mutex<dyn GpeNotify>>) {
499         let mut gpe0 = self.gpe0.lock();
500         match gpe0.gpe_notify.get_mut(&gpe) {
501             Some(v) => v.push(notify_dev),
502             None => {
503                 gpe0.gpe_notify.insert(gpe, vec![notify_dev]);
504             }
505         }
506     }
507 
register_pme_notify_dev(&mut self, bus: u8, notify_dev: Arc<Mutex<dyn PmeNotify>>)508     fn register_pme_notify_dev(&mut self, bus: u8, notify_dev: Arc<Mutex<dyn PmeNotify>>) {
509         let mut pci = self.pci.lock();
510         match pci.pme_notify.get_mut(&bus) {
511             Some(v) => v.push(notify_dev),
512             None => {
513                 pci.pme_notify.insert(bus, vec![notify_dev]);
514             }
515         }
516     }
517 }
518 
519 const PM1_STATUS_LAST: u16 = PM1_STATUS + (ACPIPM_RESOURCE_EVENTBLK_LEN as u16 / 2) - 1;
520 const PM1_ENABLE_LAST: u16 = PM1_ENABLE + (ACPIPM_RESOURCE_EVENTBLK_LEN as u16 / 2) - 1;
521 const PM1_CONTROL_LAST: u16 = PM1_CONTROL + ACPIPM_RESOURCE_CONTROLBLK_LEN as u16 - 1;
522 const GPE0_STATUS_LAST: u16 = GPE0_STATUS + (ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2) - 1;
523 const GPE0_ENABLE_LAST: u16 = GPE0_ENABLE + (ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2) - 1;
524 
525 impl BusDevice for ACPIPMResource {
device_id(&self) -> DeviceId526     fn device_id(&self) -> DeviceId {
527         CrosvmDeviceId::ACPIPMResource.into()
528     }
529 
debug_label(&self) -> String530     fn debug_label(&self) -> String {
531         "ACPIPMResource".to_owned()
532     }
533 
read(&mut self, info: BusAccessInfo, data: &mut [u8])534     fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
535         match info.offset as u16 {
536             // Accesses to the PM1 registers are done through byte or word accesses
537             PM1_STATUS..=PM1_STATUS_LAST => {
538                 if data.len() > std::mem::size_of::<u16>()
539                     || info.offset + data.len() as u64 > (PM1_STATUS_LAST + 1).into()
540                 {
541                     warn!("ACPIPM: bad read size: {}", data.len());
542                     return;
543                 }
544                 let offset = (info.offset - PM1_STATUS as u64) as usize;
545 
546                 let v = self.pm1.lock().status.to_ne_bytes();
547                 for (i, j) in (offset..offset + data.len()).enumerate() {
548                     data[i] = v[j];
549                 }
550             }
551             PM1_ENABLE..=PM1_ENABLE_LAST => {
552                 if data.len() > std::mem::size_of::<u16>()
553                     || info.offset + data.len() as u64 > (PM1_ENABLE_LAST + 1).into()
554                 {
555                     warn!("ACPIPM: bad read size: {}", data.len());
556                     return;
557                 }
558                 let offset = (info.offset - PM1_ENABLE as u64) as usize;
559 
560                 let v = self.pm1.lock().enable.to_ne_bytes();
561                 for (i, j) in (offset..offset + data.len()).enumerate() {
562                     data[i] = v[j];
563                 }
564             }
565             PM1_CONTROL..=PM1_CONTROL_LAST => {
566                 if data.len() > std::mem::size_of::<u16>()
567                     || info.offset + data.len() as u64 > (PM1_CONTROL_LAST + 1).into()
568                 {
569                     warn!("ACPIPM: bad read size: {}", data.len());
570                     return;
571                 }
572                 let offset = (info.offset - PM1_CONTROL as u64) as usize;
573                 data.copy_from_slice(
574                     &self.pm1.lock().control.to_ne_bytes()[offset..offset + data.len()],
575                 );
576             }
577             // OSPM accesses GPE registers through byte accesses (regardless of their length)
578             GPE0_STATUS..=GPE0_STATUS_LAST => {
579                 if data.len() > std::mem::size_of::<u8>()
580                     || info.offset + data.len() as u64 > (GPE0_STATUS_LAST + 1).into()
581                 {
582                     warn!("ACPIPM: bad read size: {}", data.len());
583                     return;
584                 }
585                 let offset = (info.offset - GPE0_STATUS as u64) as usize;
586                 data[0] = self.gpe0.lock().status[offset];
587             }
588             GPE0_ENABLE..=GPE0_ENABLE_LAST => {
589                 if data.len() > std::mem::size_of::<u8>()
590                     || info.offset + data.len() as u64 > (GPE0_ENABLE_LAST + 1).into()
591                 {
592                     warn!("ACPIPM: bad read size: {}", data.len());
593                     return;
594                 }
595                 let offset = (info.offset - GPE0_ENABLE as u64) as usize;
596                 data[0] = self.gpe0.lock().enable[offset];
597             }
598             _ => {
599                 warn!("ACPIPM: Bad read from {}", info);
600             }
601         }
602     }
603 
write(&mut self, info: BusAccessInfo, data: &[u8])604     fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
605         match info.offset as u16 {
606             // Accesses to the PM1 registers are done through byte or word accesses
607             PM1_STATUS..=PM1_STATUS_LAST => {
608                 if data.len() > std::mem::size_of::<u16>()
609                     || info.offset + data.len() as u64 > (PM1_STATUS_LAST + 1).into()
610                 {
611                     warn!("ACPIPM: bad write size: {}", data.len());
612                     return;
613                 }
614                 let offset = (info.offset - PM1_STATUS as u64) as usize;
615 
616                 let mut pm1 = self.pm1.lock();
617                 let mut v = pm1.status.to_ne_bytes();
618                 for (i, j) in (offset..offset + data.len()).enumerate() {
619                     v[j] &= !data[i];
620                 }
621                 pm1.status = u16::from_ne_bytes(v);
622             }
623             PM1_ENABLE..=PM1_ENABLE_LAST => {
624                 if data.len() > std::mem::size_of::<u16>()
625                     || info.offset + data.len() as u64 > (PM1_ENABLE_LAST + 1).into()
626                 {
627                     warn!("ACPIPM: bad write size: {}", data.len());
628                     return;
629                 }
630                 let offset = (info.offset - PM1_ENABLE as u64) as usize;
631 
632                 let mut pm1 = self.pm1.lock();
633                 let mut v = pm1.enable.to_ne_bytes();
634                 for (i, j) in (offset..offset + data.len()).enumerate() {
635                     v[j] = data[i];
636                 }
637                 pm1.enable = u16::from_ne_bytes(v);
638                 pm1.resample_clear_evts_and_trigger(&self.sci_evt);
639             }
640             PM1_CONTROL..=PM1_CONTROL_LAST => {
641                 if data.len() > std::mem::size_of::<u16>()
642                     || info.offset + data.len() as u64 > (PM1_CONTROL_LAST + 1).into()
643                 {
644                     warn!("ACPIPM: bad write size: {}", data.len());
645                     return;
646                 }
647                 let offset = (info.offset - PM1_CONTROL as u64) as usize;
648 
649                 let mut pm1 = self.pm1.lock();
650 
651                 let mut v = pm1.control.to_ne_bytes();
652                 for (i, j) in (offset..offset + data.len()).enumerate() {
653                     v[j] = data[i];
654                 }
655                 let val = u16::from_ne_bytes(v);
656 
657                 // SLP_EN is a write-only bit and reads to it always return a zero
658                 if (val & BITMASK_PM1CNT_SLEEP_ENABLE) != 0 {
659                     match val & BITMASK_PM1CNT_SLEEP_TYPE {
660                         SLEEP_TYPE_S1 => {
661                             if let Err(e) = self.suspend_tube.lock().send(&true) {
662                                 error!("ACPIPM: failed to trigger suspend event: {}", e);
663                             }
664                         }
665                         SLEEP_TYPE_S5 => {
666                             if let Err(e) =
667                                 self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit)
668                             {
669                                 error!("ACPIPM: failed to trigger exit event: {}", e);
670                             }
671                         }
672                         _ => error!(
673                             "ACPIPM: unknown SLP_TYP written: {}",
674                             (val & BITMASK_PM1CNT_SLEEP_TYPE) >> 10
675                         ),
676                     }
677                 }
678                 pm1.control = val & !BITMASK_PM1CNT_SLEEP_ENABLE;
679 
680                 // Re-trigger PM & GPEs in case there is a pending wakeup that should
681                 // override us just having gone to sleep.
682                 pm1.resample_clear_evts_and_trigger(&self.sci_evt);
683                 self.gpe0
684                     .lock()
685                     .resample_clear_evts_and_trigger(&self.sci_evt);
686             }
687             // OSPM accesses GPE registers through byte accesses (regardless of their length)
688             GPE0_STATUS..=GPE0_STATUS_LAST => {
689                 if data.len() > std::mem::size_of::<u8>()
690                     || info.offset + data.len() as u64 > (GPE0_STATUS_LAST + 1).into()
691                 {
692                     warn!("ACPIPM: bad write size: {}", data.len());
693                     return;
694                 }
695                 let offset = (info.offset - GPE0_STATUS as u64) as usize;
696                 self.gpe0.lock().status[offset] &= !data[0];
697             }
698             GPE0_ENABLE..=GPE0_ENABLE_LAST => {
699                 if data.len() > std::mem::size_of::<u8>()
700                     || info.offset + data.len() as u64 > (GPE0_ENABLE_LAST + 1).into()
701                 {
702                     warn!("ACPIPM: bad write size: {}", data.len());
703                     return;
704                 }
705                 let offset = (info.offset - GPE0_ENABLE as u64) as usize;
706                 let mut gpe = self.gpe0.lock();
707                 if gpe.enable[offset] != data[0] {
708                     gpe.enable[offset] = data[0];
709                     gpe.resample_clear_evts_and_trigger(&self.sci_evt);
710                 }
711             }
712             _ => {
713                 warn!("ACPIPM: Bad write to {}", info);
714             }
715         };
716     }
717 }
718 
719 impl BusResumeDevice for ACPIPMResource {
resume_imminent(&mut self)720     fn resume_imminent(&mut self) {
721         self.pm1.lock().status |= BITMASK_PM1CNT_WAKE_STATUS;
722     }
723 }
724 
725 impl Aml for ACPIPMResource {
to_aml_bytes(&self, bytes: &mut Vec<u8>)726     fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
727         // S1
728         aml::Name::new(
729             "_S1_".into(),
730             &aml::Package::new(vec![&aml::ONE, &aml::ONE, &aml::ZERO, &aml::ZERO]),
731         )
732         .to_aml_bytes(bytes);
733 
734         // S5
735         aml::Name::new(
736             "_S5_".into(),
737             &aml::Package::new(vec![&aml::ZERO, &aml::ZERO, &aml::ZERO, &aml::ZERO]),
738         )
739         .to_aml_bytes(bytes);
740     }
741 }
742 
743 pub const PM_WAKEUP_GPIO: u32 = 0;
744 
745 pub struct PmWakeupEvent {
746     vm_control_tube: Arc<Mutex<Tube>>,
747     pm_config: Arc<Mutex<PmConfig>>,
748 }
749 
750 impl PmWakeupEvent {
new(vm_control_tube: Arc<Mutex<Tube>>, pm_config: Arc<Mutex<PmConfig>>) -> Self751     pub fn new(vm_control_tube: Arc<Mutex<Tube>>, pm_config: Arc<Mutex<PmConfig>>) -> Self {
752         Self {
753             vm_control_tube,
754             pm_config,
755         }
756     }
757 
trigger_wakeup(&self) -> anyhow::Result<Option<Event>>758     pub fn trigger_wakeup(&self) -> anyhow::Result<Option<Event>> {
759         if self.pm_config.lock().should_trigger_pme() {
760             let event = Event::new().context("failed to create clear event")?;
761             let tube = self.vm_control_tube.lock();
762             tube.send(&VmRequest::Gpe {
763                 gpe: PM_WAKEUP_GPIO,
764                 clear_evt: Some(event.try_clone().context("failed to clone clear event")?),
765             })
766             .context("failed to send pme")?;
767             match tube.recv::<VmResponse>() {
768                 Ok(VmResponse::Ok) => Ok(Some(event)),
769                 e => bail!("pme failure {:?}", e),
770             }
771         } else {
772             Ok(None)
773         }
774     }
775 }
776 
777 #[cfg(test)]
778 mod tests {
779     use base::Tube;
780 
781     use super::*;
782     use crate::suspendable_tests;
783 
get_send_tube() -> SendTube784     fn get_send_tube() -> SendTube {
785         Tube::directional_pair().unwrap().0
786     }
787 
get_irq_evt() -> IrqLevelEvent788     fn get_irq_evt() -> IrqLevelEvent {
789         match crate::IrqLevelEvent::new() {
790             Ok(evt) => evt,
791             Err(e) => panic!(
792                 "failed to create irqlevelevt: {} - panic. Can't test ACPI",
793                 e
794             ),
795         }
796     }
797 
modify_device(acpi: &mut ACPIPMResource)798     fn modify_device(acpi: &mut ACPIPMResource) {
799         {
800             let mut pm1 = acpi.pm1.lock();
801             pm1.enable += 1;
802         }
803     }
804 
805     suspendable_tests!(
806         acpi,
807         ACPIPMResource::new(
808             get_irq_evt(),
809             Arc::new(Mutex::new(get_send_tube())),
810             get_send_tube(),
811             None,
812         ),
813         modify_device
814     );
815 }
816