1 use super::{Config, EthernetAddress, Features, VirtioNetHdr};
2 use super::{MIN_BUFFER_LEN, NET_HDR_SIZE, QUEUE_RECEIVE, QUEUE_TRANSMIT, SUPPORTED_FEATURES};
3 use crate::hal::Hal;
4 use crate::queue::VirtQueue;
5 use crate::transport::Transport;
6 use crate::volatile::volread;
7 use crate::{Error, Result};
8 use log::{debug, info, warn};
9 use zerocopy::AsBytes;
10 
11 /// Raw driver for a VirtIO block device.
12 ///
13 /// This is a raw version of the VirtIONet driver. It provides non-blocking
14 /// methods for transmitting and receiving raw slices, without the buffer
15 /// management. For more higher-level fucntions such as receive buffer backing,
16 /// see [`VirtIONet`].
17 ///
18 /// [`VirtIONet`]: super::VirtIONet
19 pub struct VirtIONetRaw<H: Hal, T: Transport, const QUEUE_SIZE: usize> {
20     transport: T,
21     mac: EthernetAddress,
22     recv_queue: VirtQueue<H, QUEUE_SIZE>,
23     send_queue: VirtQueue<H, QUEUE_SIZE>,
24 }
25 
26 impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONetRaw<H, T, QUEUE_SIZE> {
27     /// Create a new VirtIO-Net driver.
new(mut transport: T) -> Result<Self>28     pub fn new(mut transport: T) -> Result<Self> {
29         let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
30         info!("negotiated_features {:?}", negotiated_features);
31         // read configuration space
32         let config = transport.config_space::<Config>()?;
33         let mac;
34         // Safe because config points to a valid MMIO region for the config space.
35         unsafe {
36             mac = volread!(config, mac);
37             debug!(
38                 "Got MAC={:02x?}, status={:?}",
39                 mac,
40                 volread!(config, status)
41             );
42         }
43         let send_queue = VirtQueue::new(
44             &mut transport,
45             QUEUE_TRANSMIT,
46             negotiated_features.contains(Features::RING_INDIRECT_DESC),
47             negotiated_features.contains(Features::RING_EVENT_IDX),
48         )?;
49         let recv_queue = VirtQueue::new(
50             &mut transport,
51             QUEUE_RECEIVE,
52             negotiated_features.contains(Features::RING_INDIRECT_DESC),
53             negotiated_features.contains(Features::RING_EVENT_IDX),
54         )?;
55 
56         transport.finish_init();
57 
58         Ok(VirtIONetRaw {
59             transport,
60             mac,
61             recv_queue,
62             send_queue,
63         })
64     }
65 
66     /// Acknowledge interrupt.
ack_interrupt(&mut self) -> bool67     pub fn ack_interrupt(&mut self) -> bool {
68         self.transport.ack_interrupt()
69     }
70 
71     /// Disable interrupts.
disable_interrupts(&mut self)72     pub fn disable_interrupts(&mut self) {
73         self.send_queue.set_dev_notify(false);
74         self.recv_queue.set_dev_notify(false);
75     }
76 
77     /// Enable interrupts.
enable_interrupts(&mut self)78     pub fn enable_interrupts(&mut self) {
79         self.send_queue.set_dev_notify(true);
80         self.recv_queue.set_dev_notify(true);
81     }
82 
83     /// Get MAC address.
mac_address(&self) -> EthernetAddress84     pub fn mac_address(&self) -> EthernetAddress {
85         self.mac
86     }
87 
88     /// Whether can send packet.
can_send(&self) -> bool89     pub fn can_send(&self) -> bool {
90         self.send_queue.available_desc() >= 2
91     }
92 
93     /// Whether the length of the receive buffer is valid.
check_rx_buf_len(rx_buf: &[u8]) -> Result<()>94     fn check_rx_buf_len(rx_buf: &[u8]) -> Result<()> {
95         if rx_buf.len() < MIN_BUFFER_LEN {
96             warn!("Receive buffer len {} is too small", rx_buf.len());
97             Err(Error::InvalidParam)
98         } else {
99             Ok(())
100         }
101     }
102 
103     /// Whether the length of the transmit buffer is valid.
check_tx_buf_len(tx_buf: &[u8]) -> Result<()>104     fn check_tx_buf_len(tx_buf: &[u8]) -> Result<()> {
105         if tx_buf.len() < NET_HDR_SIZE {
106             warn!("Transmit buffer len {} is too small", tx_buf.len());
107             Err(Error::InvalidParam)
108         } else {
109             Ok(())
110         }
111     }
112 
113     /// Fill the header of the `buffer` with [`VirtioNetHdr`].
114     ///
115     /// If the `buffer` is not large enough, it returns [`Error::InvalidParam`].
fill_buffer_header(&self, buffer: &mut [u8]) -> Result<usize>116     pub fn fill_buffer_header(&self, buffer: &mut [u8]) -> Result<usize> {
117         if buffer.len() < NET_HDR_SIZE {
118             return Err(Error::InvalidParam);
119         }
120         let header = VirtioNetHdr::default();
121         buffer[..NET_HDR_SIZE].copy_from_slice(header.as_bytes());
122         Ok(NET_HDR_SIZE)
123     }
124 
125     /// Submits a request to transmit a buffer immediately without waiting for
126     /// the transmission to complete.
127     ///
128     /// It will submit request to the VirtIO net device and return a token
129     /// identifying the position of the first descriptor in the chain. If there
130     /// are not enough descriptors to allocate, then it returns
131     /// [`Error::QueueFull`].
132     ///
133     /// The caller needs to fill the `tx_buf` with a header by calling
134     /// [`fill_buffer_header`] before transmission. Then it calls [`poll_transmit`]
135     /// with the returned token to check whether the device has finished handling
136     /// the request. Once it has, the caller must call [`transmit_complete`] with
137     /// the same buffer before reading the result (transmitted length).
138     ///
139     /// # Safety
140     ///
141     /// `tx_buf` is still borrowed by the underlying VirtIO net device even after
142     /// this method returns. Thus, it is the caller's responsibility to guarantee
143     /// that they are not accessed before the request is completed in order to
144     /// avoid data races.
145     ///
146     /// [`fill_buffer_header`]: Self::fill_buffer_header
147     /// [`poll_transmit`]: Self::poll_transmit
148     /// [`transmit_complete`]: Self::transmit_complete
transmit_begin(&mut self, tx_buf: &[u8]) -> Result<u16>149     pub unsafe fn transmit_begin(&mut self, tx_buf: &[u8]) -> Result<u16> {
150         Self::check_tx_buf_len(tx_buf)?;
151         let token = self.send_queue.add(&[tx_buf], &mut [])?;
152         if self.send_queue.should_notify() {
153             self.transport.notify(QUEUE_TRANSMIT);
154         }
155         Ok(token)
156     }
157 
158     /// Fetches the token of the next completed transmission request from the
159     /// used ring and returns it, without removing it from the used ring. If
160     /// there are no pending completed requests it returns [`None`].
poll_transmit(&mut self) -> Option<u16>161     pub fn poll_transmit(&mut self) -> Option<u16> {
162         self.send_queue.peek_used()
163     }
164 
165     /// Completes a transmission operation which was started by [`transmit_begin`].
166     /// Returns number of bytes transmitted.
167     ///
168     /// # Safety
169     ///
170     /// The same buffer must be passed in again as was passed to
171     /// [`transmit_begin`] when it returned the token.
172     ///
173     /// [`transmit_begin`]: Self::transmit_begin
transmit_complete(&mut self, token: u16, tx_buf: &[u8]) -> Result<usize>174     pub unsafe fn transmit_complete(&mut self, token: u16, tx_buf: &[u8]) -> Result<usize> {
175         let len = self.send_queue.pop_used(token, &[tx_buf], &mut [])?;
176         Ok(len as usize)
177     }
178 
179     /// Submits a request to receive a buffer immediately without waiting for
180     /// the reception to complete.
181     ///
182     /// It will submit request to the VirtIO net device and return a token
183     /// identifying the position of the first descriptor in the chain. If there
184     /// are not enough descriptors to allocate, then it returns
185     /// [`Error::QueueFull`].
186     ///
187     /// The caller can then call [`poll_receive`] with the returned token to
188     /// check whether the device has finished handling the request. Once it has,
189     /// the caller must call [`receive_complete`] with the same buffer before
190     /// reading the response.
191     ///
192     /// # Safety
193     ///
194     /// `rx_buf` is still borrowed by the underlying VirtIO net device even after
195     /// this method returns. Thus, it is the caller's responsibility to guarantee
196     /// that they are not accessed before the request is completed in order to
197     /// avoid data races.
198     ///
199     /// [`poll_receive`]: Self::poll_receive
200     /// [`receive_complete`]: Self::receive_complete
receive_begin(&mut self, rx_buf: &mut [u8]) -> Result<u16>201     pub unsafe fn receive_begin(&mut self, rx_buf: &mut [u8]) -> Result<u16> {
202         Self::check_rx_buf_len(rx_buf)?;
203         let token = self.recv_queue.add(&[], &mut [rx_buf])?;
204         if self.recv_queue.should_notify() {
205             self.transport.notify(QUEUE_RECEIVE);
206         }
207         Ok(token)
208     }
209 
210     /// Fetches the token of the next completed reception request from the
211     /// used ring and returns it, without removing it from the used ring. If
212     /// there are no pending completed requests it returns [`None`].
poll_receive(&self) -> Option<u16>213     pub fn poll_receive(&self) -> Option<u16> {
214         self.recv_queue.peek_used()
215     }
216 
217     /// Completes a transmission operation which was started by [`receive_begin`].
218     ///
219     /// After completion, the `rx_buf` will contain a header followed by the
220     /// received packet. It returns the length of the header and the length of
221     /// the packet.
222     ///
223     /// # Safety
224     ///
225     /// The same buffer must be passed in again as was passed to
226     /// [`receive_begin`] when it returned the token.
227     ///
228     /// [`receive_begin`]: Self::receive_begin
receive_complete( &mut self, token: u16, rx_buf: &mut [u8], ) -> Result<(usize, usize)>229     pub unsafe fn receive_complete(
230         &mut self,
231         token: u16,
232         rx_buf: &mut [u8],
233     ) -> Result<(usize, usize)> {
234         let len = self.recv_queue.pop_used(token, &[], &mut [rx_buf])? as usize;
235         let packet_len = len.checked_sub(NET_HDR_SIZE).ok_or(Error::IoError)?;
236         Ok((NET_HDR_SIZE, packet_len))
237     }
238 
239     /// Sends a packet to the network, and blocks until the request completed.
send(&mut self, tx_buf: &[u8]) -> Result240     pub fn send(&mut self, tx_buf: &[u8]) -> Result {
241         let header = VirtioNetHdr::default();
242         if tx_buf.is_empty() {
243             // Special case sending an empty packet, to avoid adding an empty buffer to the
244             // virtqueue.
245             self.send_queue.add_notify_wait_pop(
246                 &[header.as_bytes()],
247                 &mut [],
248                 &mut self.transport,
249             )?;
250         } else {
251             self.send_queue.add_notify_wait_pop(
252                 &[header.as_bytes(), tx_buf],
253                 &mut [],
254                 &mut self.transport,
255             )?;
256         }
257         Ok(())
258     }
259 
260     /// Blocks and waits for a packet to be received.
261     ///
262     /// After completion, the `rx_buf` will contain a header followed by the
263     /// received packet. It returns the length of the header and the length of
264     /// the packet.
receive_wait(&mut self, rx_buf: &mut [u8]) -> Result<(usize, usize)>265     pub fn receive_wait(&mut self, rx_buf: &mut [u8]) -> Result<(usize, usize)> {
266         let token = unsafe { self.receive_begin(rx_buf)? };
267         while self.poll_receive().is_none() {
268             core::hint::spin_loop();
269         }
270         unsafe { self.receive_complete(token, rx_buf) }
271     }
272 }
273 
274 impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> Drop for VirtIONetRaw<H, T, QUEUE_SIZE> {
drop(&mut self)275     fn drop(&mut self) {
276         // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
277         // after they have been freed.
278         self.transport.queue_unset(QUEUE_RECEIVE);
279         self.transport.queue_unset(QUEUE_TRANSMIT);
280     }
281 }
282