1 use super::{DeviceStatus, DeviceType, Transport};
2 use crate::{
3     queue::{fake_read_write_queue, Descriptor},
4     PhysAddr, Result,
5 };
6 use alloc::{sync::Arc, vec::Vec};
7 use core::{
8     any::TypeId,
9     ptr::NonNull,
10     sync::atomic::{AtomicBool, Ordering},
11     time::Duration,
12 };
13 use std::{sync::Mutex, thread};
14 
15 /// A fake implementation of [`Transport`] for unit tests.
16 #[derive(Debug)]
17 pub struct FakeTransport<C: 'static> {
18     pub device_type: DeviceType,
19     pub max_queue_size: u32,
20     pub device_features: u64,
21     pub config_space: NonNull<C>,
22     pub state: Arc<Mutex<State>>,
23 }
24 
25 impl<C> Transport for FakeTransport<C> {
device_type(&self) -> DeviceType26     fn device_type(&self) -> DeviceType {
27         self.device_type
28     }
29 
read_device_features(&mut self) -> u6430     fn read_device_features(&mut self) -> u64 {
31         self.device_features
32     }
33 
write_driver_features(&mut self, driver_features: u64)34     fn write_driver_features(&mut self, driver_features: u64) {
35         self.state.lock().unwrap().driver_features = driver_features;
36     }
37 
max_queue_size(&mut self, _queue: u16) -> u3238     fn max_queue_size(&mut self, _queue: u16) -> u32 {
39         self.max_queue_size
40     }
41 
notify(&mut self, queue: u16)42     fn notify(&mut self, queue: u16) {
43         self.state.lock().unwrap().queues[queue as usize]
44             .notified
45             .store(true, Ordering::SeqCst);
46     }
47 
get_status(&self) -> DeviceStatus48     fn get_status(&self) -> DeviceStatus {
49         self.state.lock().unwrap().status
50     }
51 
set_status(&mut self, status: DeviceStatus)52     fn set_status(&mut self, status: DeviceStatus) {
53         self.state.lock().unwrap().status = status;
54     }
55 
set_guest_page_size(&mut self, guest_page_size: u32)56     fn set_guest_page_size(&mut self, guest_page_size: u32) {
57         self.state.lock().unwrap().guest_page_size = guest_page_size;
58     }
59 
requires_legacy_layout(&self) -> bool60     fn requires_legacy_layout(&self) -> bool {
61         false
62     }
63 
queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )64     fn queue_set(
65         &mut self,
66         queue: u16,
67         size: u32,
68         descriptors: PhysAddr,
69         driver_area: PhysAddr,
70         device_area: PhysAddr,
71     ) {
72         let mut state = self.state.lock().unwrap();
73         state.queues[queue as usize].size = size;
74         state.queues[queue as usize].descriptors = descriptors;
75         state.queues[queue as usize].driver_area = driver_area;
76         state.queues[queue as usize].device_area = device_area;
77     }
78 
queue_unset(&mut self, queue: u16)79     fn queue_unset(&mut self, queue: u16) {
80         let mut state = self.state.lock().unwrap();
81         state.queues[queue as usize].size = 0;
82         state.queues[queue as usize].descriptors = 0;
83         state.queues[queue as usize].driver_area = 0;
84         state.queues[queue as usize].device_area = 0;
85     }
86 
queue_used(&mut self, queue: u16) -> bool87     fn queue_used(&mut self, queue: u16) -> bool {
88         self.state.lock().unwrap().queues[queue as usize].descriptors != 0
89     }
90 
ack_interrupt(&mut self) -> bool91     fn ack_interrupt(&mut self) -> bool {
92         let mut state = self.state.lock().unwrap();
93         let pending = state.interrupt_pending;
94         if pending {
95             state.interrupt_pending = false;
96         }
97         pending
98     }
99 
config_space<T: 'static>(&self) -> Result<NonNull<T>>100     fn config_space<T: 'static>(&self) -> Result<NonNull<T>> {
101         if TypeId::of::<T>() == TypeId::of::<C>() {
102             Ok(self.config_space.cast())
103         } else {
104             panic!("Unexpected config space type.");
105         }
106     }
107 }
108 
109 #[derive(Debug, Default)]
110 pub struct State {
111     pub status: DeviceStatus,
112     pub driver_features: u64,
113     pub guest_page_size: u32,
114     pub interrupt_pending: bool,
115     pub queues: Vec<QueueStatus>,
116 }
117 
118 impl State {
119     /// Simulates the device writing to the given queue.
120     ///
121     /// The fake device always uses descriptors in order.
write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8])122     pub fn write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8]) {
123         let queue = &self.queues[queue_index as usize];
124         assert_ne!(queue.descriptors, 0);
125         fake_read_write_queue(
126             queue.descriptors as *const [Descriptor; QUEUE_SIZE],
127             queue.driver_area as *const u8,
128             queue.device_area as *mut u8,
129             |input| {
130                 assert_eq!(input, Vec::new());
131                 data.to_owned()
132             },
133         );
134     }
135 
136     /// Simulates the device reading from the given queue.
137     ///
138     /// Data is read into the `data` buffer passed in. Returns the number of bytes actually read.
139     ///
140     /// The fake device always uses descriptors in order.
read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8>141     pub fn read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8> {
142         let queue = &self.queues[queue_index as usize];
143         assert_ne!(queue.descriptors, 0);
144 
145         let mut ret = None;
146 
147         // Read data from the queue but don't write any response.
148         fake_read_write_queue(
149             queue.descriptors as *const [Descriptor; QUEUE_SIZE],
150             queue.driver_area as *const u8,
151             queue.device_area as *mut u8,
152             |input| {
153                 ret = Some(input);
154                 Vec::new()
155             },
156         );
157 
158         ret.unwrap()
159     }
160 
161     /// Simulates the device reading data from the given queue and then writing a response back.
162     ///
163     /// The fake device always uses descriptors in order.
read_write_queue<const QUEUE_SIZE: usize>( &mut self, queue_index: u16, handler: impl FnOnce(Vec<u8>) -> Vec<u8>, )164     pub fn read_write_queue<const QUEUE_SIZE: usize>(
165         &mut self,
166         queue_index: u16,
167         handler: impl FnOnce(Vec<u8>) -> Vec<u8>,
168     ) {
169         let queue = &self.queues[queue_index as usize];
170         assert_ne!(queue.descriptors, 0);
171         fake_read_write_queue(
172             queue.descriptors as *const [Descriptor; QUEUE_SIZE],
173             queue.driver_area as *const u8,
174             queue.device_area as *mut u8,
175             handler,
176         )
177     }
178 
179     /// Waits until the given queue is notified.
wait_until_queue_notified(state: &Mutex<Self>, queue_index: u16)180     pub fn wait_until_queue_notified(state: &Mutex<Self>, queue_index: u16) {
181         while !state.lock().unwrap().queues[usize::from(queue_index)]
182             .notified
183             .swap(false, Ordering::SeqCst)
184         {
185             thread::sleep(Duration::from_millis(10));
186         }
187     }
188 }
189 
190 #[derive(Debug, Default)]
191 pub struct QueueStatus {
192     pub size: u32,
193     pub descriptors: PhysAddr,
194     pub driver_area: PhysAddr,
195     pub device_area: PhysAddr,
196     pub notified: AtomicBool,
197 }
198