1 //! Fake HAL implementation for tests.
2 
3 #![deny(unsafe_op_in_unsafe_fn)]
4 
5 use crate::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
6 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
7 use core::{
8     alloc::Layout,
9     ptr::{self, NonNull},
10 };
11 use zerocopy::FromZeroes;
12 
13 #[derive(Debug)]
14 pub struct FakeHal;
15 
16 /// Fake HAL implementation for use in unit tests.
17 unsafe impl Hal for FakeHal {
dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>)18     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
19         assert_ne!(pages, 0);
20         let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
21         // Safe because the size and alignment of the layout are non-zero.
22         let ptr = unsafe { alloc_zeroed(layout) };
23         if let Some(ptr) = NonNull::new(ptr) {
24             (ptr.as_ptr() as PhysAddr, ptr)
25         } else {
26             handle_alloc_error(layout);
27         }
28     }
29 
dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i3230     unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
31         assert_ne!(pages, 0);
32         let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
33         // Safe because the layout is the same as was used when the memory was allocated by
34         // `dma_alloc` above.
35         unsafe {
36             dealloc(vaddr.as_ptr(), layout);
37         }
38         0
39     }
40 
mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8>41     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
42         NonNull::new(paddr as _).unwrap()
43     }
44 
share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr45     unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
46         assert_ne!(buffer.len(), 0);
47         // To ensure that the driver is handling and unsharing buffers properly, allocate a new
48         // buffer and copy to it if appropriate.
49         let mut shared_buffer = u8::new_box_slice_zeroed(buffer.len());
50         if let BufferDirection::DriverToDevice | BufferDirection::Both = direction {
51             unsafe {
52                 buffer
53                     .as_ptr()
54                     .cast::<u8>()
55                     .copy_to(shared_buffer.as_mut_ptr(), buffer.len());
56             }
57         }
58         let vaddr = Box::into_raw(shared_buffer) as *mut u8 as usize;
59         // Nothing to do, as the host already has access to all memory.
60         virt_to_phys(vaddr)
61     }
62 
unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection)63     unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
64         assert_ne!(buffer.len(), 0);
65         assert_ne!(paddr, 0);
66         let vaddr = phys_to_virt(paddr);
67         let shared_buffer = unsafe {
68             Box::from_raw(ptr::slice_from_raw_parts_mut(
69                 vaddr as *mut u8,
70                 buffer.len(),
71             ))
72         };
73         if let BufferDirection::DeviceToDriver | BufferDirection::Both = direction {
74             unsafe {
75                 buffer
76                     .as_ptr()
77                     .cast::<u8>()
78                     .copy_from(shared_buffer.as_ptr(), buffer.len());
79             }
80         }
81     }
82 }
83 
virt_to_phys(vaddr: usize) -> PhysAddr84 fn virt_to_phys(vaddr: usize) -> PhysAddr {
85     vaddr
86 }
87 
phys_to_virt(paddr: PhysAddr) -> usize88 fn phys_to_virt(paddr: PhysAddr) -> usize {
89     paddr
90 }
91