xref: /aosp_15_r20/external/mesa3d/src/gallium/frontends/rusticl/util/ptr.rs (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 use std::{
2     alloc::Layout,
3     collections::{btree_map::Entry, BTreeMap},
4     hash::{Hash, Hasher},
5     mem,
6     ops::{Add, Deref},
7     ptr::{self, NonNull},
8 };
9 
10 /// A wrapper around pointers to C data type which are considered thread safe.
11 #[derive(Eq)]
12 pub struct ThreadSafeCPtr<T>(NonNull<T>);
13 
14 impl<T> ThreadSafeCPtr<T> {
15     /// # Safety
16     ///
17     /// Only safe on `T` which are thread-safe C data types. That usually means the following:
18     /// * Fields are accessed in a thread-safe manner, either through atomic operations or
19     ///   functions
20     /// * Bugs and Data races caused by accessing the type in multiple threads is considered a bug.
21     ///
22     /// As nothing of this can actually be verified this solely relies on contracts made on those
23     /// types, either by a specification or by convention. In practical terms this means that a
24     /// pointer to `T` meets all requirements expected by [Send] and [Sync]
new(ptr: *mut T) -> Option<Self>25     pub unsafe fn new(ptr: *mut T) -> Option<Self> {
26         Some(Self(NonNull::new(ptr)?))
27     }
28 }
29 
30 impl<T> Deref for ThreadSafeCPtr<T> {
31     type Target = NonNull<T>;
32 
deref(&self) -> &Self::Target33     fn deref(&self) -> &Self::Target {
34         &self.0
35     }
36 }
37 
38 impl<T> Hash for ThreadSafeCPtr<T> {
hash<H: Hasher>(&self, state: &mut H)39     fn hash<H: Hasher>(&self, state: &mut H) {
40         self.0.as_ptr().hash(state)
41     }
42 }
43 
44 impl<T> PartialEq for ThreadSafeCPtr<T> {
eq(&self, other: &Self) -> bool45     fn eq(&self, other: &Self) -> bool {
46         self.0.as_ptr() == other.0.as_ptr()
47     }
48 }
49 
50 // SAFETY: safety requierements of Send fullfilled at [ThreadSafeCPtr::new] time
51 unsafe impl<T> Send for ThreadSafeCPtr<T> {}
52 
53 // SAFETY: safety requierements of Sync fullfilled at [ThreadSafeCPtr::new] time
54 unsafe impl<T> Sync for ThreadSafeCPtr<T> {}
55 
56 pub trait CheckedPtr<T> {
57     /// # Safety
58     ///
59     /// besides a null check the function can't make sure the pointer is valid
60     /// for the entire size
copy_checked(self, val: *const T, size: usize)61     unsafe fn copy_checked(self, val: *const T, size: usize);
write_checked(self, val: T)62     fn write_checked(self, val: T);
63 }
64 
65 impl<T> CheckedPtr<T> for *mut T {
66     /// # Safety
67     ///
68     /// This function follows the same safety rules as `std::ptr::copy` except that it already
69     /// checks for a NULL pointer.
copy_checked(self, val: *const T, size: usize)70     unsafe fn copy_checked(self, val: *const T, size: usize) {
71         if !self.is_null() {
72             // SAFETY: we move the responsibilities up to the caller
73             unsafe {
74                 ptr::copy(val, self, size);
75             }
76         }
77     }
78 
write_checked(self, val: T)79     fn write_checked(self, val: T) {
80         if !self.is_null() {
81             unsafe {
82                 *self = val;
83             }
84         }
85     }
86 }
87 
88 // from https://internals.rust-lang.org/t/discussion-on-offset-of/7440/2
89 #[macro_export]
90 macro_rules! offset_of {
91     ($Struct:path, $($field:ident).+ $(,)?) => {{
92         // Using a separate function to minimize unhygienic hazards
93         // (e.g. unsafety of #[repr(packed)] field borrows).
94         // Uncomment `const` when `const fn`s can juggle pointers.
95         /*const*/
96         fn offset() -> usize {
97             let u = std::mem::MaybeUninit::<$Struct>::uninit();
98             let f = unsafe { &(*u.as_ptr()).$($field).+ };
99             let o = (f as *const _ as usize).wrapping_sub(&u as *const _ as usize);
100             // Triple check that we are within `u` still.
101             assert!((0..=std::mem::size_of_val(&u)).contains(&o));
102             o
103         }
104         offset()
105     }};
106 }
107 
108 // Adapted from libstd since std::ptr::is_aligned is still unstable
109 // See https://github.com/rust-lang/rust/issues/96284
110 #[must_use]
111 #[inline]
is_aligned<T>(ptr: *const T) -> bool where T: Sized,112 pub const fn is_aligned<T>(ptr: *const T) -> bool
113 where
114     T: Sized,
115 {
116     let align = mem::align_of::<T>();
117     addr(ptr) & (align - 1) == 0
118 }
119 
120 // Adapted from libstd since std::ptr::addr is still unstable
121 // See https://github.com/rust-lang/rust/issues/95228
122 #[must_use]
123 #[inline(always)]
addr<T>(ptr: *const T) -> usize124 pub const fn addr<T>(ptr: *const T) -> usize {
125     // The libcore implementations of `addr` and `expose_addr` suggest that, while both transmuting
126     // and casting to usize will give you the address of a ptr in the end, they are not identical
127     // in their side-effects.
128     // A cast "exposes" a ptr, which can potentially cause the compiler to optimize less
129     // aggressively around it.
130     // Let's trust the libcore devs over clippy on whether a transmute also exposes a ptr.
131     #[allow(clippy::transmutes_expressible_as_ptr_casts)]
132     // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
133     // provenance).
134     unsafe {
135         mem::transmute(ptr.cast::<()>())
136     }
137 }
138 
139 pub trait AllocSize<P> {
size(&self) -> P140     fn size(&self) -> P;
141 }
142 
143 impl AllocSize<usize> for Layout {
size(&self) -> usize144     fn size(&self) -> usize {
145         Self::size(self)
146     }
147 }
148 
149 pub struct TrackedPointers<P, T: AllocSize<P>> {
150     ptrs: BTreeMap<P, T>,
151 }
152 
153 impl<P, T: AllocSize<P>> TrackedPointers<P, T> {
new() -> Self154     pub fn new() -> Self {
155         Self {
156             ptrs: BTreeMap::new(),
157         }
158     }
159 }
160 
161 impl<P, T: AllocSize<P>> TrackedPointers<P, T>
162 where
163     P: Ord + Add<Output = P> + Copy,
164 {
contains_key(&self, ptr: P) -> bool165     pub fn contains_key(&self, ptr: P) -> bool {
166         self.ptrs.contains_key(&ptr)
167     }
168 
entry(&mut self, ptr: P) -> Entry<P, T>169     pub fn entry(&mut self, ptr: P) -> Entry<P, T> {
170         self.ptrs.entry(ptr)
171     }
172 
find_alloc(&self, ptr: P) -> Option<(P, &T)>173     pub fn find_alloc(&self, ptr: P) -> Option<(P, &T)> {
174         if let Some((&base, val)) = self.ptrs.range(..=ptr).next_back() {
175             let size = val.size();
176             // we check if ptr is within [base..base+size)
177             // means we can check if ptr - (base + size) < 0
178             if ptr < (base + size) {
179                 return Some((base, val));
180             }
181         }
182         None
183     }
184 
find_alloc_precise(&self, ptr: P) -> Option<&T>185     pub fn find_alloc_precise(&self, ptr: P) -> Option<&T> {
186         self.ptrs.get(&ptr)
187     }
188 
insert(&mut self, ptr: P, val: T) -> Option<T>189     pub fn insert(&mut self, ptr: P, val: T) -> Option<T> {
190         self.ptrs.insert(ptr, val)
191     }
192 
remove(&mut self, ptr: P) -> Option<T>193     pub fn remove(&mut self, ptr: P) -> Option<T> {
194         self.ptrs.remove(&ptr)
195     }
196 }
197 
198 impl<P, T: AllocSize<P>> Default for TrackedPointers<P, T> {
default() -> Self199     fn default() -> Self {
200         Self::new()
201     }
202 }
203