1 /*
2  * Copyright (c) 2024 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 use core::ops::Deref;
25 use core::ops::DerefMut;
26 use core::ptr::NonNull;
27 
28 use lazy_static::lazy_static;
29 
30 use rust_support::mmu::ARCH_MMU_FLAG_PERM_NO_EXECUTE;
31 use rust_support::mmu::ARCH_MMU_FLAG_UNCACHED_DEVICE;
32 use rust_support::mmu::PAGE_SIZE_SHIFT;
33 use rust_support::paddr_t;
34 use rust_support::sync::Mutex;
35 use rust_support::vaddr_t;
36 use rust_support::vmm::vaddr_to_paddr;
37 use rust_support::vmm::vmm_alloc_contiguous;
38 use rust_support::vmm::vmm_alloc_physical;
39 use rust_support::vmm::vmm_free_region;
40 use rust_support::vmm::vmm_get_kernel_aspace;
41 
42 use static_assertions::const_assert_eq;
43 
44 use virtio_drivers::transport::pci::bus::DeviceFunction;
45 use virtio_drivers::transport::pci::bus::PciRoot;
46 use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
47 
48 use crate::err::Error;
49 use crate::pci::arch;
50 
51 #[derive(Copy, Clone)]
52 struct BarInfo {
53     paddr: paddr_t,
54     size: usize,
55     vaddr: vaddr_t,
56 }
57 
58 const NUM_BARS: usize = 6;
59 lazy_static! {
60     static ref BARS: Mutex<[Option<BarInfo>; NUM_BARS]> = Mutex::new([None; NUM_BARS]);
61 }
62 
63 // virtio-drivers requires 4k pages, check that we meet requirement
64 const_assert_eq!(PAGE_SIZE, rust_support::mmu::PAGE_SIZE as usize);
65 
66 pub struct TrustyHal;
67 
68 impl TrustyHal {
mmio_alloc( pci_root: &mut PciRoot, device_function: DeviceFunction, ) -> Result<(), Error>69     pub fn mmio_alloc(
70         pci_root: &mut PciRoot,
71         device_function: DeviceFunction,
72     ) -> Result<(), Error> {
73         for bar in 0..NUM_BARS {
74             let bar_info = pci_root.bar_info(device_function, bar as u8).unwrap();
75             if let Some((bar_paddr, bar_size)) = bar_info.memory_address_size() {
76                 let bar_vaddr = core::ptr::null_mut();
77                 let bar_size_aligned = (bar_size as usize + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
78 
79                 // Safety:
80                 // `aspace` is `vmm_get_kernel_aspace()`.
81                 // `name` is a `&'static CStr`.
82                 // `bar_paddr` and `bar_size_aligned` are safe by this function's safety requirements.
83                 let ret = unsafe {
84                     vmm_alloc_physical(
85                         vmm_get_kernel_aspace(),
86                         c"pci_config_space".as_ptr(),
87                         bar_size_aligned,
88                         &bar_vaddr,
89                         0,
90                         bar_paddr as usize,
91                         0,
92                         ARCH_MMU_FLAG_PERM_NO_EXECUTE | ARCH_MMU_FLAG_UNCACHED_DEVICE,
93                     )
94                 };
95                 rust_support::Error::from_lk(ret)?;
96 
97                 BARS.lock().deref_mut()[bar] = Some(BarInfo {
98                     paddr: bar_paddr as usize,
99                     size: bar_size_aligned,
100                     vaddr: bar_vaddr as usize,
101                 });
102             }
103         }
104         Ok(())
105     }
106 }
107 
108 // Safety: TrustyHal is stateless and thus trivially safe to send to another thread
109 unsafe impl Send for TrustyHal {}
110 
111 // Safety: See function specific comments
112 unsafe impl Hal for TrustyHal {
113     // Safety:
114     // Function either returns a non-null, properly aligned pointer or panics the kernel.
115     // The call to `vmm_alloc_contiguous` ensures that the pointed to memory is zeroed.
dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>)116     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
117         let name = c"vsock-rust";
118         // dma_alloc requests num pages but vmm_alloc_contiguous expects bytes.
119         let size = pages * PAGE_SIZE;
120         let mut vaddr = core::ptr::null_mut(); // stores pointer to virtual memory
121         let align_pow2 = PAGE_SIZE_SHIFT as u8;
122         let vmm_flags = 0;
123         let arch_mmu_flags = ARCH_MMU_FLAG_PERM_NO_EXECUTE;
124         let aspace = vmm_get_kernel_aspace();
125 
126         // NOTE: the allocated memory will be zeroed since vmm_alloc_contiguous
127         // calls vmm_alloc_pmm which does not set the PMM_ALLOC_FLAG_NO_CLEAR
128         // flag.
129         //
130         // Safety:
131         // `aspace` is `vmm_get_kernel_aspace()`.
132         // `name` is a `&'static CStr`.
133         // `size` is validated by the callee
134         let rc = unsafe {
135             vmm_alloc_contiguous(
136                 aspace,
137                 name.as_ptr(),
138                 size,
139                 &mut vaddr,
140                 align_pow2,
141                 vmm_flags,
142                 arch_mmu_flags,
143             )
144         };
145         if rc != 0 {
146             panic!("error {} allocating physical memory", rc);
147         }
148         if vaddr as usize & (PAGE_SIZE - 1usize) != 0 {
149             panic!("error page-aligning allocation {:#x}", vaddr as usize);
150         }
151 
152         // Safety: `vaddr` is valid because the call to `vmm_alloc_continuous` succeeded
153         let paddr = unsafe { vaddr_to_paddr(vaddr) };
154 
155         arch::dma_alloc_share(paddr, size);
156 
157         (paddr, NonNull::<u8>::new(vaddr as *mut u8).unwrap())
158     }
159 
160     // Safety: `vaddr` was returned by `dma_alloc` and hasn't been deallocated.
dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32161     unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
162         let size = pages * PAGE_SIZE;
163         arch::dma_dealloc_unshare(paddr, size);
164 
165         let aspace = vmm_get_kernel_aspace();
166         let vaddr = vaddr.as_ptr();
167         // Safety:
168         // - function-level requirements
169         // - `aspace` points to the kernel address space object
170         // - `vaddr` is a region in `aspace`
171         unsafe { vmm_free_region(aspace, vaddr as usize) }
172     }
173 
174     // Only used for MMIO addresses within BARs read from the device,
175     // for the PCI transport.
176     //
177     // Safety: `paddr` and `size` are validated against allocations made in
178     // `Self::mmio_alloc`; panics on validation failure.
mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8>179     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
180         for bar in BARS.lock().deref().iter().flatten() {
181             let bar_paddr_end = bar.paddr + bar.size;
182             if (bar.paddr..bar_paddr_end).contains(&paddr) {
183                 // check that the address range up to the given size is within
184                 // the region expected for MMIO.
185                 if paddr + size > bar_paddr_end {
186                     panic!("invalid arguments passed to mmio_phys_to_virt");
187                 }
188                 let offset = paddr - bar.paddr;
189 
190                 let bar_vaddr_ptr: *mut u8 = bar.vaddr as _;
191                 // Safety:
192                 // - `BARS` correctly maps from physical to virtual pages
193                 // - `offset` is less than or equal to bar.size because
194                 //   `bar.paddr` <= `paddr`` < `bar_paddr_end`
195                 let vaddr = unsafe { bar_vaddr_ptr.add(offset) };
196                 return NonNull::<u8>::new(vaddr).unwrap();
197             }
198         }
199 
200         panic!("error mapping physical memory to virtual for mmio");
201     }
202 
203     // Safety: delegated to callee
share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr204     unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
205         // Safety: delegated to arch::share
206         unsafe { arch::share(buffer, direction) }
207     }
208 
209     // Safety: delegated to callee
unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection)210     unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
211         // Safety: delegated to arch::unshare
212         unsafe {
213             arch::unshare(paddr, buffer, direction);
214         }
215     }
216 }
217