1 // SPDX-License-Identifier: GPL-2.0 2 3 //! Revocable objects. 4 //! 5 //! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence 6 //! of a [`RevocableGuard`] ensures that objects remain valid. 7 8 use crate::{bindings, prelude::*, sync::rcu, types::Opaque}; 9 use core::{ 10 marker::PhantomData, 11 ops::Deref, 12 ptr::drop_in_place, 13 sync::atomic::{AtomicBool, Ordering}, 14 }; 15 16 /// An object that can become inaccessible at runtime. 17 /// 18 /// Once access is revoked and all concurrent users complete (i.e., all existing instances of 19 /// [`RevocableGuard`] are dropped), the wrapped object is also dropped. 20 /// 21 /// # Examples 22 /// 23 /// ``` 24 /// # use kernel::revocable::Revocable; 25 /// 26 /// struct Example { 27 /// a: u32, 28 /// b: u32, 29 /// } 30 /// 31 /// fn add_two(v: &Revocable<Example>) -> Option<u32> { 32 /// let guard = v.try_access()?; 33 /// Some(guard.a + guard.b) 34 /// } 35 /// 36 /// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap(); 37 /// assert_eq!(add_two(&v), Some(30)); 38 /// v.revoke(); 39 /// assert_eq!(add_two(&v), None); 40 /// ``` 41 /// 42 /// Sample example as above, but explicitly using the rcu read side lock. 43 /// 44 /// ``` 45 /// # use kernel::revocable::Revocable; 46 /// use kernel::sync::rcu; 47 /// 48 /// struct Example { 49 /// a: u32, 50 /// b: u32, 51 /// } 52 /// 53 /// fn add_two(v: &Revocable<Example>) -> Option<u32> { 54 /// let guard = rcu::read_lock(); 55 /// let e = v.try_access_with_guard(&guard)?; 56 /// Some(e.a + e.b) 57 /// } 58 /// 59 /// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap(); 60 /// assert_eq!(add_two(&v), Some(30)); 61 /// v.revoke(); 62 /// assert_eq!(add_two(&v), None); 63 /// ``` 64 #[pin_data(PinnedDrop)] 65 pub struct Revocable<T> { 66 is_available: AtomicBool, 67 #[pin] 68 data: Opaque<T>, 69 } 70 71 // SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the 72 // functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that 73 // this isn't supported by the wrapped object. 74 unsafe impl<T: Send> Send for Revocable<T> {} 75 76 // SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send` 77 // from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop` 78 // implementation of the wrapped object from an arbitrary thread. 79 unsafe impl<T: Sync + Send> Sync for Revocable<T> {} 80 81 impl<T> Revocable<T> { 82 /// Creates a new revocable instance of the given data. new(data: impl PinInit<T>) -> impl PinInit<Self>83 pub fn new(data: impl PinInit<T>) -> impl PinInit<Self> { 84 pin_init!(Self { 85 is_available: AtomicBool::new(true), 86 data <- Opaque::pin_init(data), 87 }) 88 } 89 90 /// Tries to access the revocable wrapped object. 91 /// 92 /// Returns `None` if the object has been revoked and is therefore no longer accessible. 93 /// 94 /// Returns a guard that gives access to the object otherwise; the object is guaranteed to 95 /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep 96 /// because another CPU may be waiting to complete the revocation of this object. try_access(&self) -> Option<RevocableGuard<'_, T>>97 pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> { 98 let guard = rcu::read_lock(); 99 if self.is_available.load(Ordering::Relaxed) { 100 // Since `self.is_available` is true, data is initialised and has to remain valid 101 // because the RCU read side lock prevents it from being dropped. 102 Some(RevocableGuard::new(self.data.get(), guard)) 103 } else { 104 None 105 } 106 } 107 108 /// Tries to access the revocable wrapped object. 109 /// 110 /// Returns `None` if the object has been revoked and is therefore no longer accessible. 111 /// 112 /// Returns a shared reference to the object otherwise; the object is guaranteed to 113 /// remain accessible while the rcu read side guard is alive. In such cases, callers are not 114 /// allowed to sleep because another CPU may be waiting to complete the revocation of this 115 /// object. try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T>116 pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> { 117 if self.is_available.load(Ordering::Relaxed) { 118 // SAFETY: Since `self.is_available` is true, data is initialised and has to remain 119 // valid because the RCU read side lock prevents it from being dropped. 120 Some(unsafe { &*self.data.get() }) 121 } else { 122 None 123 } 124 } 125 126 /// # Safety 127 /// 128 /// Callers must ensure that there are no more concurrent users of the revocable object. revoke_internal<const SYNC: bool>(&self)129 unsafe fn revoke_internal<const SYNC: bool>(&self) { 130 if self.is_available.swap(false, Ordering::Relaxed) { 131 if SYNC { 132 // SAFETY: Just an FFI call, there are no further requirements. 133 unsafe { bindings::synchronize_rcu() }; 134 } 135 136 // SAFETY: We know `self.data` is valid because only one CPU can succeed the 137 // `compare_exchange` above that takes `is_available` from `true` to `false`. 138 unsafe { drop_in_place(self.data.get()) }; 139 } 140 } 141 142 /// Revokes access to and drops the wrapped object. 143 /// 144 /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`], 145 /// expecting that there are no concurrent users of the object. 146 /// 147 /// # Safety 148 /// 149 /// Callers must ensure that there are no more concurrent users of the revocable object. revoke_nosync(&self)150 pub unsafe fn revoke_nosync(&self) { 151 // SAFETY: By the safety requirement of this function, the caller ensures that nobody is 152 // accessing the data anymore and hence we don't have to wait for the grace period to 153 // finish. 154 unsafe { self.revoke_internal::<false>() } 155 } 156 157 /// Revokes access to and drops the wrapped object. 158 /// 159 /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`]. 160 /// 161 /// If there are concurrent users of the object (i.e., ones that called 162 /// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this 163 /// function waits for the concurrent access to complete before dropping the wrapped object. revoke(&self)164 pub fn revoke(&self) { 165 // SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to 166 // finish. 167 unsafe { self.revoke_internal::<true>() } 168 } 169 } 170 171 #[pinned_drop] 172 impl<T> PinnedDrop for Revocable<T> { drop(self: Pin<&mut Self>)173 fn drop(self: Pin<&mut Self>) { 174 // Drop only if the data hasn't been revoked yet (in which case it has already been 175 // dropped). 176 // SAFETY: We are not moving out of `p`, only dropping in place 177 let p = unsafe { self.get_unchecked_mut() }; 178 if *p.is_available.get_mut() { 179 // SAFETY: We know `self.data` is valid because no other CPU has changed 180 // `is_available` to `false` yet, and no other CPU can do it anymore because this CPU 181 // holds the only reference (mutable) to `self` now. 182 unsafe { drop_in_place(p.data.get()) }; 183 } 184 } 185 } 186 187 /// A guard that allows access to a revocable object and keeps it alive. 188 /// 189 /// CPUs may not sleep while holding on to [`RevocableGuard`] because it's in atomic context 190 /// holding the RCU read-side lock. 191 /// 192 /// # Invariants 193 /// 194 /// The RCU read-side lock is held while the guard is alive. 195 pub struct RevocableGuard<'a, T> { 196 data_ref: *const T, 197 _rcu_guard: rcu::Guard, 198 _p: PhantomData<&'a ()>, 199 } 200 201 impl<T> RevocableGuard<'_, T> { new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self202 fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self { 203 Self { 204 data_ref, 205 _rcu_guard: rcu_guard, 206 _p: PhantomData, 207 } 208 } 209 } 210 211 impl<T> Deref for RevocableGuard<'_, T> { 212 type Target = T; 213 deref(&self) -> &Self::Target214 fn deref(&self) -> &Self::Target { 215 // SAFETY: By the type invariants, we hold the rcu read-side lock, so the object is 216 // guaranteed to remain valid. 217 unsafe { &*self.data_ref } 218 } 219 } 220