xref: /aosp_15_r20/external/crosvm/devices/src/virtio/queue/split_queue.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::num::Wrapping;
6 use std::sync::atomic::fence;
7 use std::sync::atomic::Ordering;
8 
9 use anyhow::bail;
10 use anyhow::Context;
11 use anyhow::Result;
12 use base::error;
13 use base::Event;
14 use data_model::Le32;
15 use serde::Deserialize;
16 use serde::Serialize;
17 use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
18 use vm_memory::GuestAddress;
19 use vm_memory::GuestMemory;
20 use zerocopy::AsBytes;
21 use zerocopy::FromBytes;
22 use zerocopy::FromZeroes;
23 
24 use crate::virtio::DescriptorChain;
25 use crate::virtio::Interrupt;
26 use crate::virtio::QueueConfig;
27 use crate::virtio::SplitDescriptorChain;
28 
29 #[allow(dead_code)]
30 const VIRTQ_USED_F_NO_NOTIFY: u16 = 0x1;
31 #[allow(dead_code)]
32 const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 0x1;
33 
34 /// An activated virtio queue with split queue layout.
35 #[derive(Debug)]
36 pub struct SplitQueue {
37     mem: GuestMemory,
38 
39     event: Event,
40     interrupt: Interrupt,
41 
42     /// The queue size in elements the driver selected. This is always guaranteed to be a power of
43     /// two, as required for split virtqueues.
44     size: u16,
45 
46     /// MSI-X vector for the queue. Don't care for INTx
47     vector: u16,
48 
49     /// Guest physical address of the descriptor table
50     desc_table: GuestAddress,
51 
52     /// Guest physical address of the available ring
53     avail_ring: GuestAddress,
54 
55     /// Guest physical address of the used ring
56     used_ring: GuestAddress,
57 
58     next_avail: Wrapping<u16>,
59     next_used: Wrapping<u16>,
60 
61     // Device feature bits accepted by the driver
62     features: u64,
63     last_used: Wrapping<u16>,
64 }
65 
66 #[derive(Serialize, Deserialize)]
67 pub struct SplitQueueSnapshot {
68     size: u16,
69     vector: u16,
70     desc_table: GuestAddress,
71     avail_ring: GuestAddress,
72     used_ring: GuestAddress,
73     next_avail: Wrapping<u16>,
74     next_used: Wrapping<u16>,
75     features: u64,
76     last_used: Wrapping<u16>,
77 }
78 
79 #[repr(C)]
80 #[derive(AsBytes, FromZeroes, FromBytes)]
81 struct virtq_used_elem {
82     id: Le32,
83     len: Le32,
84 }
85 
86 impl SplitQueue {
87     /// Constructs an activated split virtio queue with the given configuration.
new( config: &QueueConfig, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> Result<SplitQueue>88     pub fn new(
89         config: &QueueConfig,
90         mem: &GuestMemory,
91         event: Event,
92         interrupt: Interrupt,
93     ) -> Result<SplitQueue> {
94         let size = config.size();
95         if !size.is_power_of_two() {
96             bail!("split queue size {size} is not a power of 2");
97         }
98 
99         let desc_table = config.desc_table();
100         let avail_ring = config.avail_ring();
101         let used_ring = config.used_ring();
102 
103         // Validate addresses and queue size to ensure that address calculation won't overflow.
104         let ring_sizes = Self::ring_sizes(size, desc_table, avail_ring, used_ring);
105         let rings = ring_sizes
106             .iter()
107             .zip(vec!["descriptor table", "available ring", "used ring"]);
108 
109         for ((addr, size), name) in rings {
110             if addr.checked_add(*size as u64).is_none() {
111                 bail!(
112                     "virtio queue {} goes out of bounds: start:0x{:08x} size:0x{:08x}",
113                     name,
114                     addr.offset(),
115                     size,
116                 );
117             }
118         }
119 
120         Ok(SplitQueue {
121             mem: mem.clone(),
122             event,
123             interrupt,
124             size,
125             vector: config.vector(),
126             desc_table: config.desc_table(),
127             avail_ring: config.avail_ring(),
128             used_ring: config.used_ring(),
129             features: config.acked_features(),
130             next_avail: config.next_avail(),
131             next_used: config.next_used(),
132             last_used: config.next_used(),
133         })
134     }
135 
vhost_user_reclaim(&mut self, vring_base: u16)136     pub fn vhost_user_reclaim(&mut self, vring_base: u16) {
137         self.next_avail = Wrapping(vring_base);
138         // The vhost-user spec says:
139         //
140         //     For the Used Ring, the device only needs the next descriptor index at which to put
141         //     new descriptors, which is the value in the vring structure in memory, so this value
142         //     is not covered by this message.
143         //
144         // So, we read the value from guest memory.
145         let used_index_addr = self.used_ring.unchecked_add(2);
146         self.next_used = self
147             .mem
148             .read_obj_from_addr_volatile(used_index_addr)
149             .unwrap();
150         // We assume the vhost-user backend sent interrupts for any descriptors it marked used
151         // before it stopped processing the queue, so `last_used == next_used`.
152         self.last_used = self.next_used;
153     }
154 
next_avail_to_process(&self) -> u16155     pub fn next_avail_to_process(&self) -> u16 {
156         self.next_avail.0
157     }
158 
159     /// Return the actual size of the queue, as the driver may not set up a
160     /// queue as big as the device allows.
size(&self) -> u16161     pub fn size(&self) -> u16 {
162         self.size
163     }
164 
165     /// Getter for vector field
vector(&self) -> u16166     pub fn vector(&self) -> u16 {
167         self.vector
168     }
169 
170     /// Getter for descriptor area
desc_table(&self) -> GuestAddress171     pub fn desc_table(&self) -> GuestAddress {
172         self.desc_table
173     }
174 
175     /// Getter for driver area
avail_ring(&self) -> GuestAddress176     pub fn avail_ring(&self) -> GuestAddress {
177         self.avail_ring
178     }
179 
180     /// Getter for device area
used_ring(&self) -> GuestAddress181     pub fn used_ring(&self) -> GuestAddress {
182         self.used_ring
183     }
184 
185     /// Get a reference to the queue's "kick event"
event(&self) -> &Event186     pub fn event(&self) -> &Event {
187         &self.event
188     }
189 
190     /// Get a reference to the queue's interrupt
interrupt(&self) -> &Interrupt191     pub fn interrupt(&self) -> &Interrupt {
192         &self.interrupt
193     }
194 
195     // Return `index` modulo the currently configured queue size.
wrap_queue_index(&self, index: Wrapping<u16>) -> u16196     fn wrap_queue_index(&self, index: Wrapping<u16>) -> u16 {
197         // We know that `self.size` is a power of two (enforced by `new()`), so the modulus can
198         // be calculated with a bitmask rather than actual division.
199         debug_assert!(self.size.is_power_of_two());
200         index.0 & self.size.wrapping_sub(1)
201     }
202 
ring_sizes( queue_size: u16, desc_table: GuestAddress, avail_ring: GuestAddress, used_ring: GuestAddress, ) -> Vec<(GuestAddress, usize)>203     fn ring_sizes(
204         queue_size: u16,
205         desc_table: GuestAddress,
206         avail_ring: GuestAddress,
207         used_ring: GuestAddress,
208     ) -> Vec<(GuestAddress, usize)> {
209         let queue_size = queue_size as usize;
210         vec![
211             (desc_table, 16 * queue_size),
212             (avail_ring, 6 + 2 * queue_size),
213             (used_ring, 6 + 8 * queue_size),
214         ]
215     }
216 
217     // Get the index of the first available descriptor chain in the available ring
218     // (the next one that the driver will fill).
219     //
220     // All available ring entries between `self.next_avail` and `get_avail_index()` are available
221     // to be processed by the device.
get_avail_index(&self) -> Wrapping<u16>222     fn get_avail_index(&self) -> Wrapping<u16> {
223         fence(Ordering::SeqCst);
224 
225         let avail_index_addr = self.avail_ring.unchecked_add(2);
226         let avail_index: u16 = self
227             .mem
228             .read_obj_from_addr_volatile(avail_index_addr)
229             .unwrap();
230 
231         Wrapping(avail_index)
232     }
233 
234     // Set the `avail_event` field in the used ring.
235     //
236     // This allows the device to inform the driver that driver-to-device notification
237     // (kicking the ring) is not necessary until the driver reaches the `avail_index` descriptor.
238     //
239     // This value is only used if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
set_avail_event(&mut self, avail_index: Wrapping<u16>)240     fn set_avail_event(&mut self, avail_index: Wrapping<u16>) {
241         fence(Ordering::SeqCst);
242 
243         let avail_event_addr = self.used_ring.unchecked_add(4 + 8 * u64::from(self.size));
244         self.mem
245             .write_obj_at_addr_volatile(avail_index.0, avail_event_addr)
246             .unwrap();
247     }
248 
249     // Query the value of a single-bit flag in the available ring.
250     //
251     // Returns `true` if `flag` is currently set (by the driver) in the available ring flags.
get_avail_flag(&self, flag: u16) -> bool252     fn get_avail_flag(&self, flag: u16) -> bool {
253         fence(Ordering::SeqCst);
254 
255         let avail_flags: u16 = self
256             .mem
257             .read_obj_from_addr_volatile(self.avail_ring)
258             .unwrap();
259 
260         avail_flags & flag == flag
261     }
262 
263     // Get the `used_event` field in the available ring.
264     //
265     // The returned value is the index of the next descriptor chain entry for which the driver
266     // needs to be notified upon use.  Entries before this index may be used without notifying
267     // the driver.
268     //
269     // This value is only valid if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
get_used_event(&self) -> Wrapping<u16>270     fn get_used_event(&self) -> Wrapping<u16> {
271         fence(Ordering::SeqCst);
272 
273         let used_event_addr = self.avail_ring.unchecked_add(4 + 2 * u64::from(self.size));
274         let used_event: u16 = self
275             .mem
276             .read_obj_from_addr_volatile(used_event_addr)
277             .unwrap();
278 
279         Wrapping(used_event)
280     }
281 
282     // Set the `idx` field in the used ring.
283     //
284     // This indicates to the driver that all entries up to (but not including) `used_index` have
285     // been used by the device and may be processed by the driver.
set_used_index(&mut self, used_index: Wrapping<u16>)286     fn set_used_index(&mut self, used_index: Wrapping<u16>) {
287         fence(Ordering::SeqCst);
288 
289         let used_index_addr = self.used_ring.unchecked_add(2);
290         self.mem
291             .write_obj_at_addr_volatile(used_index.0, used_index_addr)
292             .unwrap();
293     }
294 
295     /// Get the first available descriptor chain without removing it from the queue.
296     /// Call `pop_peeked` to remove the returned descriptor chain from the queue.
peek(&mut self) -> Option<DescriptorChain>297     pub fn peek(&mut self) -> Option<DescriptorChain> {
298         let avail_index = self.get_avail_index();
299         if self.next_avail == avail_index {
300             return None;
301         }
302 
303         // This fence ensures that subsequent reads from the descriptor do not
304         // get reordered and happen only after fetching the available_index and
305         // checking that there is a slot available.
306         fence(Ordering::SeqCst);
307 
308         let desc_idx_addr_offset = 4 + (u64::from(self.wrap_queue_index(self.next_avail)) * 2);
309         let desc_idx_addr = self.avail_ring.checked_add(desc_idx_addr_offset)?;
310 
311         // This index is checked below in checked_new.
312         let descriptor_index: u16 = self.mem.read_obj_from_addr_volatile(desc_idx_addr).unwrap();
313 
314         let chain =
315             SplitDescriptorChain::new(&self.mem, self.desc_table, self.size, descriptor_index);
316         DescriptorChain::new(chain, &self.mem, descriptor_index)
317             .map_err(|e| {
318                 error!("{:#}", e);
319                 e
320             })
321             .ok()
322     }
323 
324     /// Remove the first available descriptor chain from the queue.
325     /// This function should only be called immediately following `peek` and must be passed a
326     /// reference to the same `DescriptorChain` returned by the most recent `peek`.
pop_peeked(&mut self, _descriptor_chain: &DescriptorChain)327     pub(super) fn pop_peeked(&mut self, _descriptor_chain: &DescriptorChain) {
328         self.next_avail += Wrapping(1);
329         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
330             self.set_avail_event(self.next_avail);
331         }
332     }
333 
334     /// Puts an available descriptor head into the used ring for use by the guest.
add_used(&mut self, desc_chain: DescriptorChain, len: u32)335     pub fn add_used(&mut self, desc_chain: DescriptorChain, len: u32) {
336         let desc_index = desc_chain.index();
337         debug_assert!(desc_index < self.size);
338 
339         let used_ring = self.used_ring;
340         let next_used = self.wrap_queue_index(self.next_used) as usize;
341         let used_elem = used_ring.unchecked_add((4 + next_used * 8) as u64);
342 
343         let elem = virtq_used_elem {
344             id: Le32::from(u32::from(desc_index)),
345             len: Le32::from(len),
346         };
347 
348         // This write can't fail as we are guaranteed to be within the descriptor ring.
349         self.mem
350             .write_obj_at_addr_volatile(elem, used_elem)
351             .unwrap();
352 
353         self.next_used += Wrapping(1);
354         self.set_used_index(self.next_used);
355     }
356 
357     /// Returns if the queue should have an interrupt sent based on its state.
358     ///
359     /// This function implements `VIRTIO_RING_F_EVENT_IDX`, otherwise known as
360     /// interrupt suppression. The virtio spec provides the driver with a field,
361     /// `used_event`, which says that once we write that descriptor (or several
362     /// in the case of a flurry of `add_used` calls), we should send a
363     /// notification. Because the values involved wrap around `u16::MAX`, and to
364     /// avoid checking the condition on every `add_used` call, the math is a
365     /// little complicated.
366     ///
367     /// The critical inequality is:
368     /// ```text
369     ///      (next_used - 1) - used_event < next_used - last_used
370     /// ```
371     ///
372     /// For illustration purposes, we label it as `A < B`, where
373     /// `A = (next_used -1) - used_event`, and `B = next_used - last_used`.
374     ///
375     /// `A` and `B` represent two distances, measured in a wrapping ring of size
376     /// `u16::MAX`. In the "send intr" case, the inequality is true. In the
377     /// "don't send intr" case, the inequality is false. We must be very careful
378     /// in assigning a direction to the ring, so that when we
379     /// graph the subtraction operations, we are measuring the right distance
380     /// (similar to how DC circuits are analyzed).
381     ///
382     /// The two distances are as follows:
383     ///  * `A` is the distance between the driver's requested notification point, and the current
384     ///    position in the ring.
385     ///
386     ///  * `B` is the distance between the last time we notified the guest, and the current position
387     ///    in the ring.
388     ///
389     /// If we graph these distances for the situation where we want to notify
390     /// the guest, and when we don't want to notify the guest, we see that
391     /// `A < B` becomes true the moment `next_used - 1` passes `used_event`. See
392     /// the graphs at the bottom of this comment block for a more visual
393     /// explanation.
394     ///
395     /// Once an interrupt is sent, we have a final useful property: last_used
396     /// moves up next_used, which causes the inequality to be false. Thus, we
397     /// won't send notifications again until `used_event` is moved forward by
398     /// the driver.
399     ///
400     /// Finally, let's talk about a couple of ways to write this inequality
401     /// that don't work, and critically, explain *why*.
402     ///
403     /// First, a naive reading of the virtio spec might lead us to ask: why not
404     /// just use the following inequality:
405     /// ```text
406     ///      next_used - 1 >= used_event
407     /// ```
408     ///
409     /// because that's much simpler, right? The trouble is that the ring wraps,
410     /// so it could be that a smaller index is actually ahead of a larger one.
411     /// That's why we have to use distances in the ring instead.
412     ///
413     /// Second, one might look at the correct inequality:
414     /// ```text
415     ///      (next_used - 1) - used_event < next_used - last_used
416     /// ```
417     ///
418     /// And try to simplify it to:
419     /// ```text
420     ///      last_used - 1 < used_event
421     /// ```
422     ///
423     /// Functionally, this won't work because next_used isn't present at all
424     /// anymore. (Notifications will never be sent.) But why is that? The algebra
425     /// here *appears* to work out, but all semantic meaning is lost. There are
426     /// two explanations for why this happens:
427     /// * The intuitive one: the terms in the inequality are not actually separable; in other words,
428     ///   (next_used - last_used) is an inseparable term, so subtracting next_used from both sides
429     ///   of the original inequality and zeroing them out is semantically invalid. But why aren't
430     ///   they separable? See below.
431     /// * The theoretical one: canceling like terms relies a vector space law: a + x = b + x => a =
432     ///   b (cancellation law). For congruences / equality under modulo, this law is satisfied, but
433     ///   for inequalities under mod, it is not; therefore, we cannot cancel like terms.
434     ///
435     /// ```text
436     /// ┌──────────────────────────────────┐
437     /// │                                  │
438     /// │                                  │
439     /// │                                  │
440     /// │           ┌────────────  next_used - 1
441     /// │           │A                   x
442     /// │           │       ┌────────────x────────────┐
443     /// │           │       │            x            │
444     /// │           │       │                         │
445     /// │           │       │               │         │
446     /// │           │       │               │         │
447     /// │     used_event  xxxx        + ◄───┘       xxxxx last_used
448     /// │                   │                         │      │
449     /// │                   │        Send intr        │      │
450     /// │                   │                         │      │
451     /// │                   └─────────────────────────┘      │
452     /// │                                                    │
453     /// │ B                                                  │
454     /// └────────────────────────────────────────────────────┘
455     ///
456     ///             ┌───────────────────────────────────────────────────┐
457     ///             │                                                 A │
458     ///             │       ┌────────────────────────┐                  │
459     ///             │       │                        │                  │
460     ///             │       │                        │                  │
461     ///             │       │              │         │                  │
462     ///             │       │              │         │                  │
463     ///       used_event  xxxx             │       xxxxx last_used      │
464     ///                     │        + ◄───┘         │       │          │
465     ///                     │                        │       │          │
466     ///                     │     Don't send intr    │       │          │
467     ///                     │                        │       │          │
468     ///                     └───────────x────────────┘       │          │
469     ///                                 x                    │          │
470     ///                              next_used - 1           │          │
471     ///                              │  │                  B │          │
472     ///                              │  └────────────────────┘          │
473     ///                              │                                  │
474     ///                              └──────────────────────────────────┘
475     /// ```
queue_wants_interrupt(&self) -> bool476     fn queue_wants_interrupt(&self) -> bool {
477         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
478             let used_event = self.get_used_event();
479             self.next_used - used_event - Wrapping(1) < self.next_used - self.last_used
480         } else {
481             !self.get_avail_flag(VIRTQ_AVAIL_F_NO_INTERRUPT)
482         }
483     }
484 
485     /// inject interrupt into guest on this queue
486     /// return true: interrupt is injected into guest for this queue
487     ///        false: interrupt isn't injected
trigger_interrupt(&mut self) -> bool488     pub fn trigger_interrupt(&mut self) -> bool {
489         if self.queue_wants_interrupt() {
490             self.last_used = self.next_used;
491             self.interrupt.signal_used_queue(self.vector);
492             true
493         } else {
494             false
495         }
496     }
497 
snapshot(&self) -> anyhow::Result<serde_json::Value>498     pub fn snapshot(&self) -> anyhow::Result<serde_json::Value> {
499         serde_json::to_value(SplitQueueSnapshot {
500             size: self.size,
501             vector: self.vector,
502             desc_table: self.desc_table,
503             avail_ring: self.avail_ring,
504             used_ring: self.used_ring,
505             next_avail: self.next_avail,
506             next_used: self.next_used,
507             features: self.features,
508             last_used: self.last_used,
509         })
510         .context("failed to serialize MsixConfigSnapshot")
511     }
512 
restore( queue_value: serde_json::Value, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> anyhow::Result<SplitQueue>513     pub fn restore(
514         queue_value: serde_json::Value,
515         mem: &GuestMemory,
516         event: Event,
517         interrupt: Interrupt,
518     ) -> anyhow::Result<SplitQueue> {
519         let s: SplitQueueSnapshot = serde_json::from_value(queue_value)?;
520         let queue = SplitQueue {
521             mem: mem.clone(),
522             event,
523             interrupt,
524             size: s.size,
525             vector: s.vector,
526             desc_table: s.desc_table,
527             avail_ring: s.avail_ring,
528             used_ring: s.used_ring,
529             next_avail: s.next_avail,
530             next_used: s.next_used,
531             features: s.features,
532             last_used: s.last_used,
533         };
534         Ok(queue)
535     }
536 }
537 
538 #[cfg(test)]
539 mod tests {
540     use std::convert::TryInto;
541     use std::mem::offset_of;
542 
543     use data_model::Le16;
544     use data_model::Le32;
545     use data_model::Le64;
546     use zerocopy::AsBytes;
547     use zerocopy::FromBytes;
548 
549     use super::*;
550     use crate::virtio::create_descriptor_chain;
551     use crate::virtio::Desc;
552     use crate::virtio::Interrupt;
553     use crate::virtio::Queue;
554 
555     const GUEST_MEMORY_SIZE: u64 = 0x10000;
556     const DESC_OFFSET: u64 = 0;
557     const AVAIL_OFFSET: u64 = 0x200;
558     const USED_OFFSET: u64 = 0x400;
559     const QUEUE_SIZE: usize = 0x10;
560     const BUFFER_OFFSET: u64 = 0x8000;
561     const BUFFER_LEN: u32 = 0x400;
562 
563     #[derive(Copy, Clone, Debug, FromZeroes, FromBytes, AsBytes)]
564     #[repr(C)]
565     struct Avail {
566         flags: Le16,
567         idx: Le16,
568         ring: [Le16; QUEUE_SIZE],
569         used_event: Le16,
570     }
571 
572     impl Default for Avail {
default() -> Self573         fn default() -> Self {
574             Avail {
575                 flags: Le16::from(0u16),
576                 idx: Le16::from(0u16),
577                 ring: [Le16::from(0u16); QUEUE_SIZE],
578                 used_event: Le16::from(0u16),
579             }
580         }
581     }
582 
583     #[derive(Copy, Clone, Debug, FromZeroes, FromBytes, AsBytes)]
584     #[repr(C)]
585     struct UsedElem {
586         id: Le32,
587         len: Le32,
588     }
589 
590     impl Default for UsedElem {
default() -> Self591         fn default() -> Self {
592             UsedElem {
593                 id: Le32::from(0u32),
594                 len: Le32::from(0u32),
595             }
596         }
597     }
598 
599     #[derive(Copy, Clone, Debug, FromZeroes, FromBytes, AsBytes)]
600     #[repr(C, packed)]
601     struct Used {
602         flags: Le16,
603         idx: Le16,
604         used_elem_ring: [UsedElem; QUEUE_SIZE],
605         avail_event: Le16,
606     }
607 
608     impl Default for Used {
default() -> Self609         fn default() -> Self {
610             Used {
611                 flags: Le16::from(0u16),
612                 idx: Le16::from(0u16),
613                 used_elem_ring: [UsedElem::default(); QUEUE_SIZE],
614                 avail_event: Le16::from(0u16),
615             }
616         }
617     }
618 
setup_vq(queue: &mut QueueConfig, mem: &GuestMemory) -> Queue619     fn setup_vq(queue: &mut QueueConfig, mem: &GuestMemory) -> Queue {
620         let desc = Desc {
621             addr: Le64::from(BUFFER_OFFSET),
622             len: Le32::from(BUFFER_LEN),
623             flags: Le16::from(0u16),
624             next: Le16::from(1u16),
625         };
626         let _ = mem.write_obj_at_addr(desc, GuestAddress(DESC_OFFSET));
627 
628         let avail = Avail::default();
629         let _ = mem.write_obj_at_addr(avail, GuestAddress(AVAIL_OFFSET));
630 
631         let used = Used::default();
632         let _ = mem.write_obj_at_addr(used, GuestAddress(USED_OFFSET));
633 
634         queue.set_desc_table(GuestAddress(DESC_OFFSET));
635         queue.set_avail_ring(GuestAddress(AVAIL_OFFSET));
636         queue.set_used_ring(GuestAddress(USED_OFFSET));
637         queue.ack_features((1u64) << VIRTIO_RING_F_EVENT_IDX);
638         queue.set_ready(true);
639 
640         queue
641             .activate(mem, Event::new().unwrap(), Interrupt::new_for_test())
642             .expect("QueueConfig::activate failed")
643     }
644 
fake_desc_chain(mem: &GuestMemory) -> DescriptorChain645     fn fake_desc_chain(mem: &GuestMemory) -> DescriptorChain {
646         create_descriptor_chain(mem, GuestAddress(0), GuestAddress(0), Vec::new(), 0)
647             .expect("failed to create descriptor chain")
648     }
649 
650     #[test]
queue_event_id_guest_fast()651     fn queue_event_id_guest_fast() {
652         let mut queue =
653             QueueConfig::new(QUEUE_SIZE.try_into().unwrap(), 1 << VIRTIO_RING_F_EVENT_IDX);
654         let memory_start_addr = GuestAddress(0x0);
655         let mem = GuestMemory::new(&[(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
656         let mut queue = setup_vq(&mut queue, &mem);
657 
658         // Offset of used_event within Avail structure
659         let used_event_offset = offset_of!(Avail, used_event) as u64;
660         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
661 
662         // Assume driver submit 0x100 req to device,
663         // device has handled them, so increase self.next_used to 0x100
664         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
665         for _ in 0..device_generate.0 {
666             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
667         }
668 
669         // At this moment driver hasn't handled any interrupts yet, so it
670         // should inject interrupt.
671         assert_eq!(queue.trigger_interrupt(), true);
672 
673         // Driver handle all the interrupts and update avail.used_event to 0x100
674         let mut driver_handled = device_generate;
675         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
676 
677         // At this moment driver have handled all the interrupts, and
678         // device doesn't generate more data, so interrupt isn't needed.
679         assert_eq!(queue.trigger_interrupt(), false);
680 
681         // Assume driver submit another u16::MAX - 0x100 req to device,
682         // Device has handled all of them, so increase self.next_used to u16::MAX
683         for _ in device_generate.0..u16::MAX {
684             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
685         }
686         device_generate = Wrapping(u16::MAX);
687 
688         // At this moment driver just handled 0x100 interrupts, so it
689         // should inject interrupt.
690         assert_eq!(queue.trigger_interrupt(), true);
691 
692         // driver handle all the interrupts and update avail.used_event to u16::MAX
693         driver_handled = device_generate;
694         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
695 
696         // At this moment driver have handled all the interrupts, and
697         // device doesn't generate more data, so interrupt isn't needed.
698         assert_eq!(queue.trigger_interrupt(), false);
699 
700         // Assume driver submit another 1 request,
701         // device has handled it, so wrap self.next_used to 0
702         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
703         device_generate += Wrapping(1);
704 
705         // At this moment driver has handled all the previous interrupts, so it
706         // should inject interrupt again.
707         assert_eq!(queue.trigger_interrupt(), true);
708 
709         // driver handle that interrupts and update avail.used_event to 0
710         driver_handled = device_generate;
711         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
712 
713         // At this moment driver have handled all the interrupts, and
714         // device doesn't generate more data, so interrupt isn't needed.
715         assert_eq!(queue.trigger_interrupt(), false);
716     }
717 
718     #[test]
queue_event_id_guest_slow()719     fn queue_event_id_guest_slow() {
720         let mut queue =
721             QueueConfig::new(QUEUE_SIZE.try_into().unwrap(), 1 << VIRTIO_RING_F_EVENT_IDX);
722         let memory_start_addr = GuestAddress(0x0);
723         let mem = GuestMemory::new(&[(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
724         let mut queue = setup_vq(&mut queue, &mem);
725 
726         // Offset of used_event within Avail structure
727         let used_event_offset = offset_of!(Avail, used_event) as u64;
728         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
729 
730         // Assume driver submit 0x100 req to device,
731         // device have handled 0x100 req, so increase self.next_used to 0x100
732         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
733         for _ in 0..device_generate.0 {
734             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
735         }
736 
737         // At this moment driver hasn't handled any interrupts yet, so it
738         // should inject interrupt.
739         assert_eq!(queue.trigger_interrupt(), true);
740 
741         // Driver handle part of the interrupts and update avail.used_event to 0x80
742         let mut driver_handled = Wrapping(0x80);
743         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
744 
745         // At this moment driver hasn't finished last interrupt yet,
746         // so interrupt isn't needed.
747         assert_eq!(queue.trigger_interrupt(), false);
748 
749         // Assume driver submit another 1 request,
750         // device has handled it, so increment self.next_used.
751         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
752         device_generate += Wrapping(1);
753 
754         // At this moment driver hasn't finished last interrupt yet,
755         // so interrupt isn't needed.
756         assert_eq!(queue.trigger_interrupt(), false);
757 
758         // Assume driver submit another u16::MAX - 0x101 req to device,
759         // Device has handled all of them, so increase self.next_used to u16::MAX
760         for _ in device_generate.0..u16::MAX {
761             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
762         }
763         device_generate = Wrapping(u16::MAX);
764 
765         // At this moment driver hasn't finished last interrupt yet,
766         // so interrupt isn't needed.
767         assert_eq!(queue.trigger_interrupt(), false);
768 
769         // driver handle most of the interrupts and update avail.used_event to u16::MAX - 1,
770         driver_handled = device_generate - Wrapping(1);
771         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
772 
773         // Assume driver submit another 1 request,
774         // device has handled it, so wrap self.next_used to 0
775         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
776         device_generate += Wrapping(1);
777 
778         // At this moment driver has already finished the last interrupt(0x100),
779         // and device service other request, so new interrupt is needed.
780         assert_eq!(queue.trigger_interrupt(), true);
781 
782         // Assume driver submit another 1 request,
783         // device has handled it, so increment self.next_used to 1
784         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
785         device_generate += Wrapping(1);
786 
787         // At this moment driver hasn't finished last interrupt((Wrapping(0)) yet,
788         // so interrupt isn't needed.
789         assert_eq!(queue.trigger_interrupt(), false);
790 
791         // driver handle all the remain interrupts and wrap avail.used_event to 0x1.
792         driver_handled = device_generate;
793         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
794 
795         // At this moment driver has handled all the interrupts, and
796         // device doesn't generate more data, so interrupt isn't needed.
797         assert_eq!(queue.trigger_interrupt(), false);
798 
799         // Assume driver submit another 1 request,
800         // device has handled it, so increase self.next_used.
801         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
802         device_generate += Wrapping(1);
803 
804         // At this moment driver has finished all the previous interrupts, so it
805         // should inject interrupt again.
806         assert_eq!(queue.trigger_interrupt(), true);
807     }
808 }
809