1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! HAL for the virtio_drivers crate.
16 
17 use super::pci::PCI_INFO;
18 use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
19 use crate::util::RangeExt as _;
20 use core::alloc::Layout;
21 use core::mem::size_of;
22 use core::ptr::{copy_nonoverlapping, NonNull};
23 use log::trace;
24 use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
25 
26 /// The alignment to use for the temporary buffers allocated by `HalImpl::share`. There doesn't seem
27 /// to be any particular alignment required by VirtIO for these, so 16 bytes should be enough to
28 /// allow appropriate alignment for whatever fields are accessed. `alloc_shared` will increase the
29 /// alignment to the memory sharing granule size anyway.
30 const SHARED_BUFFER_ALIGNMENT: usize = size_of::<u128>();
31 
32 /// HAL implementation for the virtio_drivers crate.
33 pub struct HalImpl;
34 
35 /// SAFETY: See the 'Implementation Safety' comments on methods below for how they fulfill the
36 /// safety requirements of the unsafe `Hal` trait.
37 unsafe impl Hal for HalImpl {
38     /// # Implementation Safety
39     ///
40     /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
41     /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
42     /// block of memory using `alloc_shared`, which is guaranteed to allocate valid and unique
43     /// memory. We request an alignment of at least `PAGE_SIZE` from `alloc_shared`. We zero the
44     /// buffer before returning it.
dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>)45     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
46         let layout = dma_layout(pages);
47         let vaddr =
48             alloc_shared(layout).expect("Failed to allocate and share VirtIO DMA range with host");
49         // SAFETY: vaddr points to a region allocated for the caller so is safe to access.
50         unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, layout.size()) };
51         let paddr = virt_to_phys(vaddr);
52         (paddr, vaddr)
53     }
54 
dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i3255     unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
56         // SAFETY: Memory was allocated by `dma_alloc` using `alloc_shared` with the same layout.
57         unsafe { dealloc_shared(vaddr, dma_layout(pages)) }
58             .expect("Failed to unshare VirtIO DMA range with host");
59         0
60     }
61 
62     /// # Implementation Safety
63     ///
64     /// The returned pointer must be valid because the `paddr` describes a valid MMIO region, we
65     /// check that it is within the PCI MMIO range, and we previously mapped the entire PCI MMIO
66     /// range. It can't alias any other allocations because we previously validated in
67     /// `map_mmio_range` that the PCI MMIO range didn't overlap with any other memory ranges.
mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8>68     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
69         let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialized");
70         let bar_range = {
71             let start = pci_info.bar_range.start.try_into().unwrap();
72             let end = pci_info.bar_range.end.try_into().unwrap();
73 
74             start..end
75         };
76         let mmio_range = paddr..paddr.checked_add(size).expect("PCI MMIO region end overflowed");
77 
78         // Check that the region is within the PCI MMIO range that we read from the device tree. If
79         // not, the host is probably trying to do something malicious.
80         assert!(
81             mmio_range.is_within(&bar_range),
82             "PCI MMIO region was outside of expected BAR range.",
83         );
84 
85         phys_to_virt(paddr)
86     }
87 
share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr88     unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
89         let size = buffer.len();
90 
91         let bounce = alloc_shared(bb_layout(size))
92             .expect("Failed to allocate and share VirtIO bounce buffer with host");
93         let paddr = virt_to_phys(bounce);
94         if direction != BufferDirection::DeviceToDriver {
95             let src = buffer.cast::<u8>().as_ptr().cast_const();
96             trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) initialized from {src:?}");
97             // SAFETY: Both regions are valid, properly aligned, and don't overlap.
98             unsafe { copy_nonoverlapping(src, bounce.as_ptr(), size) };
99         }
100 
101         paddr
102     }
103 
unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection)104     unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
105         let bounce = phys_to_virt(paddr);
106         let size = buffer.len();
107         if direction != BufferDirection::DriverToDevice {
108             let dest = buffer.cast::<u8>().as_ptr();
109             trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) copied back to {dest:?}");
110             // SAFETY: Both regions are valid, properly aligned, and don't overlap.
111             unsafe { copy_nonoverlapping(bounce.as_ptr(), dest, size) };
112         }
113 
114         // SAFETY: Memory was allocated by `share` using `alloc_shared` with the same layout.
115         unsafe { dealloc_shared(bounce, bb_layout(size)) }
116             .expect("Failed to unshare and deallocate VirtIO bounce buffer");
117     }
118 }
119 
dma_layout(pages: usize) -> Layout120 fn dma_layout(pages: usize) -> Layout {
121     let size = pages.checked_mul(PAGE_SIZE).unwrap();
122     Layout::from_size_align(size, PAGE_SIZE).unwrap()
123 }
124 
bb_layout(size: usize) -> Layout125 fn bb_layout(size: usize) -> Layout {
126     Layout::from_size_align(size, SHARED_BUFFER_ALIGNMENT).unwrap()
127 }
128