1 //! MMIO transport for VirtIO.
2 
3 use super::{DeviceStatus, DeviceType, Transport};
4 use crate::{
5     align_up,
6     queue::Descriptor,
7     volatile::{volread, volwrite, ReadOnly, Volatile, WriteOnly},
8     Error, PhysAddr, PAGE_SIZE,
9 };
10 use core::{
11     convert::{TryFrom, TryInto},
12     fmt::{self, Display, Formatter},
13     mem::{align_of, size_of},
14     ptr::NonNull,
15 };
16 
17 const MAGIC_VALUE: u32 = 0x7472_6976;
18 pub(crate) const LEGACY_VERSION: u32 = 1;
19 pub(crate) const MODERN_VERSION: u32 = 2;
20 const CONFIG_SPACE_OFFSET: usize = 0x100;
21 
22 /// The version of the VirtIO MMIO transport supported by a device.
23 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
24 #[repr(u32)]
25 pub enum MmioVersion {
26     /// Legacy MMIO transport with page-based addressing.
27     Legacy = LEGACY_VERSION,
28     /// Modern MMIO transport.
29     Modern = MODERN_VERSION,
30 }
31 
32 impl TryFrom<u32> for MmioVersion {
33     type Error = MmioError;
34 
try_from(version: u32) -> Result<Self, Self::Error>35     fn try_from(version: u32) -> Result<Self, Self::Error> {
36         match version {
37             LEGACY_VERSION => Ok(Self::Legacy),
38             MODERN_VERSION => Ok(Self::Modern),
39             _ => Err(MmioError::UnsupportedVersion(version)),
40         }
41     }
42 }
43 
44 impl From<MmioVersion> for u32 {
from(version: MmioVersion) -> Self45     fn from(version: MmioVersion) -> Self {
46         match version {
47             MmioVersion::Legacy => LEGACY_VERSION,
48             MmioVersion::Modern => MODERN_VERSION,
49         }
50     }
51 }
52 
53 /// An error encountered initialising a VirtIO MMIO transport.
54 #[derive(Clone, Debug, Eq, PartialEq)]
55 pub enum MmioError {
56     /// The header doesn't start with the expected magic value 0x74726976.
57     BadMagic(u32),
58     /// The header reports a version number that is neither 1 (legacy) nor 2 (modern).
59     UnsupportedVersion(u32),
60     /// The header reports a device ID of 0.
61     ZeroDeviceId,
62 }
63 
64 impl Display for MmioError {
fmt(&self, f: &mut Formatter) -> fmt::Result65     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
66         match self {
67             Self::BadMagic(magic) => write!(
68                 f,
69                 "Invalid magic value {:#010x} (expected 0x74726976).",
70                 magic
71             ),
72             Self::UnsupportedVersion(version) => {
73                 write!(f, "Unsupported Virtio MMIO version {}.", version)
74             }
75             Self::ZeroDeviceId => write!(f, "Device ID was zero."),
76         }
77     }
78 }
79 
80 /// MMIO Device Register Interface, both legacy and modern.
81 ///
82 /// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
83 #[repr(C)]
84 pub struct VirtIOHeader {
85     /// Magic value
86     magic: ReadOnly<u32>,
87 
88     /// Device version number
89     ///
90     /// Legacy device returns value 0x1.
91     version: ReadOnly<u32>,
92 
93     /// Virtio Subsystem Device ID
94     device_id: ReadOnly<u32>,
95 
96     /// Virtio Subsystem Vendor ID
97     vendor_id: ReadOnly<u32>,
98 
99     /// Flags representing features the device supports
100     device_features: ReadOnly<u32>,
101 
102     /// Device (host) features word selection
103     device_features_sel: WriteOnly<u32>,
104 
105     /// Reserved
106     __r1: [ReadOnly<u32>; 2],
107 
108     /// Flags representing device features understood and activated by the driver
109     driver_features: WriteOnly<u32>,
110 
111     /// Activated (guest) features word selection
112     driver_features_sel: WriteOnly<u32>,
113 
114     /// Guest page size
115     ///
116     /// The driver writes the guest page size in bytes to the register during
117     /// initialization, before any queues are used. This value should be a
118     /// power of 2 and is used by the device to calculate the Guest address
119     /// of the first queue page (see QueuePFN).
120     legacy_guest_page_size: WriteOnly<u32>,
121 
122     /// Reserved
123     __r2: ReadOnly<u32>,
124 
125     /// Virtual queue index
126     ///
127     /// Writing to this register selects the virtual queue that the following
128     /// operations on the QueueNumMax, QueueNum, QueueAlign and QueuePFN
129     /// registers apply to. The index number of the first queue is zero (0x0).
130     queue_sel: WriteOnly<u32>,
131 
132     /// Maximum virtual queue size
133     ///
134     /// Reading from the register returns the maximum size of the queue the
135     /// device is ready to process or zero (0x0) if the queue is not available.
136     /// This applies to the queue selected by writing to QueueSel and is
137     /// allowed only when QueuePFN is set to zero (0x0), so when the queue is
138     /// not actively used.
139     queue_num_max: ReadOnly<u32>,
140 
141     /// Virtual queue size
142     ///
143     /// Queue size is the number of elements in the queue. Writing to this
144     /// register notifies the device what size of the queue the driver will use.
145     /// This applies to the queue selected by writing to QueueSel.
146     queue_num: WriteOnly<u32>,
147 
148     /// Used Ring alignment in the virtual queue
149     ///
150     /// Writing to this register notifies the device about alignment boundary
151     /// of the Used Ring in bytes. This value should be a power of 2 and
152     /// applies to the queue selected by writing to QueueSel.
153     legacy_queue_align: WriteOnly<u32>,
154 
155     /// Guest physical page number of the virtual queue
156     ///
157     /// Writing to this register notifies the device about location of the
158     /// virtual queue in the Guest’s physical address space. This value is
159     /// the index number of a page starting with the queue Descriptor Table.
160     /// Value zero (0x0) means physical address zero (0x00000000) and is illegal.
161     /// When the driver stops using the queue it writes zero (0x0) to this
162     /// register. Reading from this register returns the currently used page
163     /// number of the queue, therefore a value other than zero (0x0) means that
164     /// the queue is in use. Both read and write accesses apply to the queue
165     /// selected by writing to QueueSel.
166     legacy_queue_pfn: Volatile<u32>,
167 
168     /// new interface only
169     queue_ready: Volatile<u32>,
170 
171     /// Reserved
172     __r3: [ReadOnly<u32>; 2],
173 
174     /// Queue notifier
175     queue_notify: WriteOnly<u32>,
176 
177     /// Reserved
178     __r4: [ReadOnly<u32>; 3],
179 
180     /// Interrupt status
181     interrupt_status: ReadOnly<u32>,
182 
183     /// Interrupt acknowledge
184     interrupt_ack: WriteOnly<u32>,
185 
186     /// Reserved
187     __r5: [ReadOnly<u32>; 2],
188 
189     /// Device status
190     ///
191     /// Reading from this register returns the current device status flags.
192     /// Writing non-zero values to this register sets the status flags,
193     /// indicating the OS/driver progress. Writing zero (0x0) to this register
194     /// triggers a device reset. The device sets QueuePFN to zero (0x0) for
195     /// all queues in the device. Also see 3.1 Device Initialization.
196     status: Volatile<DeviceStatus>,
197 
198     /// Reserved
199     __r6: [ReadOnly<u32>; 3],
200 
201     // new interface only since here
202     queue_desc_low: WriteOnly<u32>,
203     queue_desc_high: WriteOnly<u32>,
204 
205     /// Reserved
206     __r7: [ReadOnly<u32>; 2],
207 
208     queue_driver_low: WriteOnly<u32>,
209     queue_driver_high: WriteOnly<u32>,
210 
211     /// Reserved
212     __r8: [ReadOnly<u32>; 2],
213 
214     queue_device_low: WriteOnly<u32>,
215     queue_device_high: WriteOnly<u32>,
216 
217     /// Reserved
218     __r9: [ReadOnly<u32>; 21],
219 
220     config_generation: ReadOnly<u32>,
221 }
222 
223 impl VirtIOHeader {
224     /// Constructs a fake VirtIO header for use in unit tests.
225     #[cfg(test)]
make_fake_header( version: u32, device_id: u32, vendor_id: u32, device_features: u32, queue_num_max: u32, ) -> Self226     pub fn make_fake_header(
227         version: u32,
228         device_id: u32,
229         vendor_id: u32,
230         device_features: u32,
231         queue_num_max: u32,
232     ) -> Self {
233         Self {
234             magic: ReadOnly::new(MAGIC_VALUE),
235             version: ReadOnly::new(version),
236             device_id: ReadOnly::new(device_id),
237             vendor_id: ReadOnly::new(vendor_id),
238             device_features: ReadOnly::new(device_features),
239             device_features_sel: WriteOnly::default(),
240             __r1: Default::default(),
241             driver_features: Default::default(),
242             driver_features_sel: Default::default(),
243             legacy_guest_page_size: Default::default(),
244             __r2: Default::default(),
245             queue_sel: Default::default(),
246             queue_num_max: ReadOnly::new(queue_num_max),
247             queue_num: Default::default(),
248             legacy_queue_align: Default::default(),
249             legacy_queue_pfn: Default::default(),
250             queue_ready: Default::default(),
251             __r3: Default::default(),
252             queue_notify: Default::default(),
253             __r4: Default::default(),
254             interrupt_status: Default::default(),
255             interrupt_ack: Default::default(),
256             __r5: Default::default(),
257             status: Volatile::new(DeviceStatus::empty()),
258             __r6: Default::default(),
259             queue_desc_low: Default::default(),
260             queue_desc_high: Default::default(),
261             __r7: Default::default(),
262             queue_driver_low: Default::default(),
263             queue_driver_high: Default::default(),
264             __r8: Default::default(),
265             queue_device_low: Default::default(),
266             queue_device_high: Default::default(),
267             __r9: Default::default(),
268             config_generation: Default::default(),
269         }
270     }
271 }
272 
273 /// MMIO Device Register Interface.
274 ///
275 /// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
276 #[derive(Debug)]
277 pub struct MmioTransport {
278     header: NonNull<VirtIOHeader>,
279     version: MmioVersion,
280 }
281 
282 impl MmioTransport {
283     /// Constructs a new VirtIO MMIO transport, or returns an error if the header reports an
284     /// unsupported version.
285     ///
286     /// # Safety
287     /// `header` must point to a properly aligned valid VirtIO MMIO region, which must remain valid
288     /// for the lifetime of the transport that is returned.
new(header: NonNull<VirtIOHeader>) -> Result<Self, MmioError>289     pub unsafe fn new(header: NonNull<VirtIOHeader>) -> Result<Self, MmioError> {
290         let magic = volread!(header, magic);
291         if magic != MAGIC_VALUE {
292             return Err(MmioError::BadMagic(magic));
293         }
294         if volread!(header, device_id) == 0 {
295             return Err(MmioError::ZeroDeviceId);
296         }
297         let version = volread!(header, version).try_into()?;
298         Ok(Self { header, version })
299     }
300 
301     /// Gets the version of the VirtIO MMIO transport.
version(&self) -> MmioVersion302     pub fn version(&self) -> MmioVersion {
303         self.version
304     }
305 
306     /// Gets the vendor ID.
vendor_id(&self) -> u32307     pub fn vendor_id(&self) -> u32 {
308         // Safe because self.header points to a valid VirtIO MMIO region.
309         unsafe { volread!(self.header, vendor_id) }
310     }
311 }
312 
313 // SAFETY: `header` is only used for MMIO, which can happen from any thread or CPU core.
314 unsafe impl Send for MmioTransport {}
315 
316 // SAFETY: `&MmioTransport` only allows MMIO reads or getting the config space, both of which are
317 // fine to happen concurrently on different CPU cores.
318 unsafe impl Sync for MmioTransport {}
319 
320 impl Transport for MmioTransport {
device_type(&self) -> DeviceType321     fn device_type(&self) -> DeviceType {
322         // Safe because self.header points to a valid VirtIO MMIO region.
323         let device_id = unsafe { volread!(self.header, device_id) };
324         device_id.into()
325     }
326 
read_device_features(&mut self) -> u64327     fn read_device_features(&mut self) -> u64 {
328         // Safe because self.header points to a valid VirtIO MMIO region.
329         unsafe {
330             volwrite!(self.header, device_features_sel, 0); // device features [0, 32)
331             let mut device_features_bits = volread!(self.header, device_features).into();
332             volwrite!(self.header, device_features_sel, 1); // device features [32, 64)
333             device_features_bits += (volread!(self.header, device_features) as u64) << 32;
334             device_features_bits
335         }
336     }
337 
write_driver_features(&mut self, driver_features: u64)338     fn write_driver_features(&mut self, driver_features: u64) {
339         // Safe because self.header points to a valid VirtIO MMIO region.
340         unsafe {
341             volwrite!(self.header, driver_features_sel, 0); // driver features [0, 32)
342             volwrite!(self.header, driver_features, driver_features as u32);
343             volwrite!(self.header, driver_features_sel, 1); // driver features [32, 64)
344             volwrite!(self.header, driver_features, (driver_features >> 32) as u32);
345         }
346     }
347 
max_queue_size(&mut self, queue: u16) -> u32348     fn max_queue_size(&mut self, queue: u16) -> u32 {
349         // Safe because self.header points to a valid VirtIO MMIO region.
350         unsafe {
351             volwrite!(self.header, queue_sel, queue.into());
352             volread!(self.header, queue_num_max)
353         }
354     }
355 
notify(&mut self, queue: u16)356     fn notify(&mut self, queue: u16) {
357         // Safe because self.header points to a valid VirtIO MMIO region.
358         unsafe {
359             volwrite!(self.header, queue_notify, queue.into());
360         }
361     }
362 
get_status(&self) -> DeviceStatus363     fn get_status(&self) -> DeviceStatus {
364         // Safe because self.header points to a valid VirtIO MMIO region.
365         unsafe { volread!(self.header, status) }
366     }
367 
set_status(&mut self, status: DeviceStatus)368     fn set_status(&mut self, status: DeviceStatus) {
369         // Safe because self.header points to a valid VirtIO MMIO region.
370         unsafe {
371             volwrite!(self.header, status, status);
372         }
373     }
374 
set_guest_page_size(&mut self, guest_page_size: u32)375     fn set_guest_page_size(&mut self, guest_page_size: u32) {
376         match self.version {
377             MmioVersion::Legacy => {
378                 // Safe because self.header points to a valid VirtIO MMIO region.
379                 unsafe {
380                     volwrite!(self.header, legacy_guest_page_size, guest_page_size);
381                 }
382             }
383             MmioVersion::Modern => {
384                 // No-op, modern devices don't care.
385             }
386         }
387     }
388 
requires_legacy_layout(&self) -> bool389     fn requires_legacy_layout(&self) -> bool {
390         match self.version {
391             MmioVersion::Legacy => true,
392             MmioVersion::Modern => false,
393         }
394     }
395 
queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )396     fn queue_set(
397         &mut self,
398         queue: u16,
399         size: u32,
400         descriptors: PhysAddr,
401         driver_area: PhysAddr,
402         device_area: PhysAddr,
403     ) {
404         match self.version {
405             MmioVersion::Legacy => {
406                 assert_eq!(
407                     driver_area - descriptors,
408                     size_of::<Descriptor>() * size as usize
409                 );
410                 assert_eq!(
411                     device_area - descriptors,
412                     align_up(
413                         size_of::<Descriptor>() * size as usize
414                             + size_of::<u16>() * (size as usize + 3)
415                     )
416                 );
417                 let align = PAGE_SIZE as u32;
418                 let pfn = (descriptors / PAGE_SIZE) as u32;
419                 assert_eq!(pfn as usize * PAGE_SIZE, descriptors);
420                 // Safe because self.header points to a valid VirtIO MMIO region.
421                 unsafe {
422                     volwrite!(self.header, queue_sel, queue.into());
423                     volwrite!(self.header, queue_num, size);
424                     volwrite!(self.header, legacy_queue_align, align);
425                     volwrite!(self.header, legacy_queue_pfn, pfn);
426                 }
427             }
428             MmioVersion::Modern => {
429                 // Safe because self.header points to a valid VirtIO MMIO region.
430                 unsafe {
431                     volwrite!(self.header, queue_sel, queue.into());
432                     volwrite!(self.header, queue_num, size);
433                     volwrite!(self.header, queue_desc_low, descriptors as u32);
434                     volwrite!(self.header, queue_desc_high, (descriptors >> 32) as u32);
435                     volwrite!(self.header, queue_driver_low, driver_area as u32);
436                     volwrite!(self.header, queue_driver_high, (driver_area >> 32) as u32);
437                     volwrite!(self.header, queue_device_low, device_area as u32);
438                     volwrite!(self.header, queue_device_high, (device_area >> 32) as u32);
439                     volwrite!(self.header, queue_ready, 1);
440                 }
441             }
442         }
443     }
444 
queue_unset(&mut self, queue: u16)445     fn queue_unset(&mut self, queue: u16) {
446         match self.version {
447             MmioVersion::Legacy => {
448                 // Safe because self.header points to a valid VirtIO MMIO region.
449                 unsafe {
450                     volwrite!(self.header, queue_sel, queue.into());
451                     volwrite!(self.header, queue_num, 0);
452                     volwrite!(self.header, legacy_queue_align, 0);
453                     volwrite!(self.header, legacy_queue_pfn, 0);
454                 }
455             }
456             MmioVersion::Modern => {
457                 // Safe because self.header points to a valid VirtIO MMIO region.
458                 unsafe {
459                     volwrite!(self.header, queue_sel, queue.into());
460 
461                     volwrite!(self.header, queue_ready, 0);
462                     // Wait until we read the same value back, to ensure synchronisation (see 4.2.2.2).
463                     while volread!(self.header, queue_ready) != 0 {}
464 
465                     volwrite!(self.header, queue_num, 0);
466                     volwrite!(self.header, queue_desc_low, 0);
467                     volwrite!(self.header, queue_desc_high, 0);
468                     volwrite!(self.header, queue_driver_low, 0);
469                     volwrite!(self.header, queue_driver_high, 0);
470                     volwrite!(self.header, queue_device_low, 0);
471                     volwrite!(self.header, queue_device_high, 0);
472                 }
473             }
474         }
475     }
476 
queue_used(&mut self, queue: u16) -> bool477     fn queue_used(&mut self, queue: u16) -> bool {
478         // Safe because self.header points to a valid VirtIO MMIO region.
479         unsafe {
480             volwrite!(self.header, queue_sel, queue.into());
481             match self.version {
482                 MmioVersion::Legacy => volread!(self.header, legacy_queue_pfn) != 0,
483                 MmioVersion::Modern => volread!(self.header, queue_ready) != 0,
484             }
485         }
486     }
487 
ack_interrupt(&mut self) -> bool488     fn ack_interrupt(&mut self) -> bool {
489         // Safe because self.header points to a valid VirtIO MMIO region.
490         unsafe {
491             let interrupt = volread!(self.header, interrupt_status);
492             if interrupt != 0 {
493                 volwrite!(self.header, interrupt_ack, interrupt);
494                 true
495             } else {
496                 false
497             }
498         }
499     }
500 
config_space<T>(&self) -> Result<NonNull<T>, Error>501     fn config_space<T>(&self) -> Result<NonNull<T>, Error> {
502         if align_of::<T>() > 4 {
503             // Panic as this should only happen if the driver is written incorrectly.
504             panic!(
505                 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
506                 align_of::<T>()
507             );
508         }
509         Ok(NonNull::new((self.header.as_ptr() as usize + CONFIG_SPACE_OFFSET) as _).unwrap())
510     }
511 }
512 
513 impl Drop for MmioTransport {
drop(&mut self)514     fn drop(&mut self) {
515         // Reset the device when the transport is dropped.
516         self.set_status(DeviceStatus::empty())
517     }
518 }
519